Merge pull request #304 from v-dareck/bufferedresult_leak

Fix memory leaks in buffered resultsets.
This commit is contained in:
Hadis Kakanejadi Fard 2017-02-27 14:46:07 -08:00 committed by GitHub
commit f0a5da9823
3 changed files with 87 additions and 76 deletions

View file

@ -440,15 +440,11 @@ sqlsrv_buffered_result_set::sqlsrv_buffered_result_set( sqlsrv_stmt* stmt TSRMLS
sqlsrv_result_set( stmt ),
cache(NULL),
col_count(0),
meta(NULL),
current(0),
last_field_index(-1),
read_so_far(0),
temp_length(0)
{
// 10 is an arbitrary number for now for the initial size of the cache
ALLOC_HASHTABLE( cache );
core::sqlsrv_zend_hash_init( *stmt, cache, 10 /* # of buckets */, cache_row_dtor /*dtor*/, 0 /*persistent*/ TSRMLS_CC );
col_count = core::SQLNumResultCols( stmt TSRMLS_CC );
// there is no result set to buffer
if( col_count == 0 ) {
@ -662,87 +658,104 @@ sqlsrv_buffered_result_set::sqlsrv_buffered_result_set( sqlsrv_stmt* stmt TSRMLS
// (offset from the above loop has the size of the row buffer necessary)
zend_long mem_used = 0;
size_t row_count = 0;
// 10 is an arbitrary number for now for the initial size of the cache
ALLOC_HASHTABLE( cache );
core::sqlsrv_zend_hash_init( *stmt, cache, 10 /* # of buckets */, cache_row_dtor /*dtor*/, 0 /*persistent*/ TSRMLS_CC );
while( core::SQLFetchScroll( stmt, SQL_FETCH_NEXT, 0 TSRMLS_CC ) != SQL_NO_DATA ) {
// allocate the row buffer
unsigned char* row = static_cast<unsigned char*>( sqlsrv_malloc( offset ));
memset( row, 0, offset );
try {
while( core::SQLFetchScroll( stmt, SQL_FETCH_NEXT, 0 TSRMLS_CC ) != SQL_NO_DATA ) {
// allocate the row buffer
sqlsrv_malloc_auto_ptr<unsigned char> rowAuto;
rowAuto = static_cast<unsigned char*>( sqlsrv_malloc( offset ));
unsigned char* row = rowAuto.get();
memset( row, 0, offset );
// read the fields into the row buffer
for( SQLSMALLINT i = 0; i < col_count; ++i ) {
// read the fields into the row buffer
for( SQLSMALLINT i = 0; i < col_count; ++i ) {
SQLLEN out_buffer_temp = SQL_NULL_DATA;
SQLPOINTER buffer;
SQLLEN* out_buffer_length = &out_buffer_temp;
SQLLEN out_buffer_temp = SQL_NULL_DATA;
SQLPOINTER buffer;
SQLLEN* out_buffer_length = &out_buffer_temp;
switch( meta[i].c_type ) {
switch( meta[i].c_type ) {
case SQL_C_CHAR:
case SQL_C_WCHAR:
case SQL_C_BINARY:
if( meta[i].length == sqlsrv_buffered_result_set::meta_data::SIZE_UNKNOWN ) {
case SQL_C_CHAR:
case SQL_C_WCHAR:
case SQL_C_BINARY:
if( meta[i].length == sqlsrv_buffered_result_set::meta_data::SIZE_UNKNOWN ) {
out_buffer_length = &out_buffer_temp;
SQLPOINTER* lob_addr = reinterpret_cast<SQLPOINTER*>( &row[ meta[i].offset ] );
*lob_addr = read_lob_field( stmt, i, meta[i], mem_used TSRMLS_CC );
// a NULL pointer means NULL field
if( *lob_addr == NULL ) {
*out_buffer_length = SQL_NULL_DATA;
out_buffer_length = &out_buffer_temp;
SQLPOINTER* lob_addr = reinterpret_cast<SQLPOINTER*>( &row[ meta[i].offset ] );
*lob_addr = read_lob_field( stmt, i, meta[i], mem_used TSRMLS_CC );
// a NULL pointer means NULL field
if( *lob_addr == NULL ) {
*out_buffer_length = SQL_NULL_DATA;
}
else {
*out_buffer_length = **reinterpret_cast<SQLLEN**>( lob_addr );
mem_used += *out_buffer_length;
}
}
else {
*out_buffer_length = **reinterpret_cast<SQLLEN**>( lob_addr );
mem_used += *out_buffer_length;
mem_used += meta[i].length;
CHECK_CUSTOM_ERROR( mem_used > stmt->buffered_query_limit * 1024, stmt,
SQLSRV_ERROR_BUFFER_LIMIT_EXCEEDED, stmt->buffered_query_limit ) {
throw core::CoreException();
}
buffer = row + meta[i].offset + sizeof( SQLULEN );
out_buffer_length = reinterpret_cast<SQLLEN*>( row + meta[i].offset );
core::SQLGetData( stmt, i + 1, meta[i].c_type, buffer, meta[i].length, out_buffer_length,
false TSRMLS_CC );
}
}
else {
break;
mem_used += meta[i].length;
CHECK_CUSTOM_ERROR( mem_used > stmt->buffered_query_limit * 1024, stmt,
SQLSRV_ERROR_BUFFER_LIMIT_EXCEEDED, stmt->buffered_query_limit ) {
case SQL_C_LONG:
case SQL_C_DOUBLE:
{
mem_used += meta[i].length;
CHECK_CUSTOM_ERROR( mem_used > stmt->buffered_query_limit * 1024, stmt,
SQLSRV_ERROR_BUFFER_LIMIT_EXCEEDED, stmt->buffered_query_limit ) {
throw core::CoreException();
throw core::CoreException();
}
buffer = row + meta[i].offset;
out_buffer_length = &out_buffer_temp;
core::SQLGetData( stmt, i + 1, meta[i].c_type, buffer, meta[i].length, out_buffer_length,
false TSRMLS_CC );
}
break;
buffer = row + meta[i].offset + sizeof( SQLULEN );
out_buffer_length = reinterpret_cast<SQLLEN*>( row + meta[i].offset );
core::SQLGetData( stmt, i + 1, meta[i].c_type, buffer, meta[i].length, out_buffer_length,
false TSRMLS_CC );
}
break;
default:
SQLSRV_ASSERT( false, "Unknown C type" );
break;
}
case SQL_C_LONG:
case SQL_C_DOUBLE:
{
mem_used += meta[i].length;
CHECK_CUSTOM_ERROR( mem_used > stmt->buffered_query_limit * 1024, stmt,
SQLSRV_ERROR_BUFFER_LIMIT_EXCEEDED, stmt->buffered_query_limit ) {
throw core::CoreException();
}
buffer = row + meta[i].offset;
out_buffer_length = &out_buffer_temp;
core::SQLGetData( stmt, i + 1, meta[i].c_type, buffer, meta[i].length, out_buffer_length,
false TSRMLS_CC );
}
break;
default:
SQLSRV_ASSERT( false, "Unknown C type" );
break;
row_count++;
if( *out_buffer_length == SQL_NULL_DATA ) {
set_bit( row, i );
}
}
row_count++;
if( *out_buffer_length == SQL_NULL_DATA ) {
set_bit( row, i );
}
SQLSRV_ASSERT( row_count < INT_MAX, "Hard maximum of 2 billion rows exceeded in a buffered query" );
// add it to the cache
row_dtor_closure cl( this, row );
sqlsrv_zend_hash_next_index_insert_mem( *stmt, cache, &cl, sizeof(row_dtor_closure) TSRMLS_CC );
rowAuto.transferred();
}
}
catch( core::CoreException& ) {
// free the rows
if( cache ) {
zend_hash_destroy( cache );
FREE_HASHTABLE( cache );
cache = NULL;
}
SQLSRV_ASSERT( row_count < INT_MAX, "Hard maximum of 2 billion rows exceeded in a buffered query" );
// add it to the cache
row_dtor_closure cl( this, row );
sqlsrv_zend_hash_next_index_insert_mem( *stmt, cache, &cl, sizeof(row_dtor_closure) TSRMLS_CC );
throw;
}
}
@ -755,12 +768,6 @@ sqlsrv_buffered_result_set::~sqlsrv_buffered_result_set( void )
FREE_HASHTABLE( cache );
cache = NULL;
}
// free the meta data
if( meta ) {
efree( meta );
meta = NULL;
}
}
SQLRETURN sqlsrv_buffered_result_set::fetch( SQLSMALLINT orientation, SQLLEN offset TSRMLS_DC )

View file

@ -1484,7 +1484,7 @@ struct sqlsrv_buffered_result_set : public sqlsrv_result_set {
HashTable* cache; // rows of data kept in index based hash table
SQLSMALLINT col_count; // number of columns in the current result set
meta_data* meta; // metadata for fields in the cache
sqlsrv_malloc_auto_ptr<meta_data> meta; // metadata for fields in the cache
SQLLEN current; // 1 based, 0 means before first row
sqlsrv_error_auto_ptr last_error; // if an error occurred, it is kept here
SQLUSMALLINT last_field_index; // the last field data retrieved from

View file

@ -208,7 +208,11 @@ void sqlsrv_stmt::new_result_set( TSRMLS_D )
// create a new result set
if( cursor_type == SQLSRV_CURSOR_BUFFERED ) {
current_results = new (sqlsrv_malloc( sizeof( sqlsrv_buffered_result_set ))) sqlsrv_buffered_result_set( this TSRMLS_CC );
sqlsrv_malloc_auto_ptr<sqlsrv_buffered_result_set> result;
result = reinterpret_cast<sqlsrv_buffered_result_set*> ( sqlsrv_malloc( sizeof( sqlsrv_buffered_result_set ) ) );
new ( result.get() ) sqlsrv_buffered_result_set( this TSRMLS_CC );
current_results = result.get();
result.transferred();
}
else {
current_results = new (sqlsrv_malloc( sizeof( sqlsrv_odbc_result_set ))) sqlsrv_odbc_result_set( this );