MySQL Opimization of insert sequences - c++

I have a realtime application that processes information and log's it to a MySQL database (actually MariaDB, a fork of MySQL). It does anywhere around 1.5 million inserts a day + 150,000 deletes.
I am having great problems with performance and don't know how to make it function any better.
The basic structure of the application is that I have a producer class, that pushes a Struct to a threadsafe deque. The following code
#include "dbUserQueue.h"
dbUserQueue::~dbUserQueue() {
}
void dbUserQueue::createConnection()
{
sql::Driver * driver = sql::mysql::get_driver_instance();
std::auto_ptr< sql::Connection > newCon(driver->connect(dbURL, dbUser, dbPass));
con = newCon;
std::auto_ptr< sql::Statement > stmt(con->createStatement());
stmt->execute("USE twitter");
}
inline void dbUserQueue::updateStatement(const std::string & value,
std::auto_ptr< sql::PreparedStatement> & stmt, const int index)
{
if(value != "\0") stmt->setString(index, value);
else stmt->setNull(index,sql::DataType::VARCHAR);
}
inline void dbUserQueue::updateStatement(const boost::int64_t & value,
std::auto_ptr< sql::PreparedStatement> & stmt, const int index)
{
if(value != -1) stmt->setInt64(index,value);
else stmt->setNull(index,sql::DataType::BIGINT);
}
inline void dbUserQueue::updateStatement(const bool value,
std::auto_ptr< sql::PreparedStatement> & stmt, const int index)
{
stmt->setBoolean(index, value);
}
inline void dbUserQueue::updateStatement(const int value,
std::auto_ptr< sql::PreparedStatement> & stmt, const int index)
{
if(value != -1) stmt->setInt(index,value);
else stmt->setNull(index,sql::DataType::INTEGER);
}
inline void dbUserQueue::updateStatementDateTime(const std::string & value,
std::auto_ptr< sql::PreparedStatement> & stmt, const int & index)
{
if(value != "\0") stmt->setDateTime(index, value);
else stmt->setNull(index,sql::DataType::DATE);
}
/*
* This method creates a database connection
* and then creates a new thread to process the incoming queue
*/
void dbUserQueue::start() {
createConnection();
if(con->isClosed() == false)
{
insertStmt = std::auto_ptr< sql::PreparedStatement>(con->prepareStatement("\
insert ignore into users(contributors_enabled, created_at, \
description, favourites_count, followers_count, \
following, friends_count, geo_enabled, id, lang, listed_count, location, \
name, notifications, screen_name, show_all_inline_media, statuses_count, \
url, utc_offset, verified) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"));
}
thread = boost::thread(&dbUserQueue::processLoop, this);
}
/*
* Stops the thread once it is finished processing the information
*/
void dbUserQueue::join(){
thread.interrupt();
thread.join();
}
/*
* The worker function of the thread.
* Pops items from the queue and updates the database accordingly.
*/
void dbUserQueue::processLoop() {
user input;
int recordCount = 0;
con->setAutoCommit(false);
while (true) {
try {
if(recordCount >= 1000)
{
recordCount = 0;
con->commit();
}
// Insert all the data into the prepared statement
if (userQ.wait_and_pop(input)) {
updateStatement(input.contributors_enabled, insertStmt, 1);
updateStatementDateTime(input.created_at, insertStmt, 2);
updateStatement(input.description, insertStmt, 3);
updateStatement(input.favourites_count, insertStmt, 4);
updateStatement(input.followers_count, insertStmt, 5);
updateStatement(input.following, insertStmt, 6);
updateStatement(input.friends_count, insertStmt, 7);
updateStatement(input.geo_enabled, insertStmt, 8);
updateStatement(input.id, insertStmt, 9);
updateStatement(input.lang, insertStmt, 10);
updateStatement(input.listed_count, insertStmt, 11);
updateStatement(input.location, insertStmt, 12);
updateStatement(input.name, insertStmt, 13);
updateStatement(input.notifications, insertStmt, 14);
updateStatement(input.screenName, insertStmt, 15);
updateStatement(input.show_all_inline_media, insertStmt, 16);
updateStatement(input.statuses_count, insertStmt, 17);
updateStatement(input.url, insertStmt, 18);
updateStatement(input.utc_offset, insertStmt, 19);
updateStatement(input.verified, insertStmt, 20);
insertStmt->executeUpdate();
insertStmt->clearParameters();
recordCount++;
continue;
}
} catch (std::exception & e) {
}
}// end of while
// Close the statements and the connection before exiting
insertStmt->close();
con->commit();
if(con->isClosed() == false)
con->close();
}
My questions is on how to improve the performance? Things I have tried:
Having multiple consumers connecting to one MySQL/MariaDB
Committing after a large number of records
Single Producer, Single consumer, commit after 1000 records = ~275 Seconds
Dual Producer, Triple consumers, commit after 1000 records = ~100 Seconds
Dual Producer, Triple consumers, commit after 2000 records = ~100 Seconds
Dual Producer, Triple consumers, commit every 1 record = ~100 Seconds
Dual Producer, 6 Consumers, commit every 1 record = ~95 Seconds
Dual Producer, 6 Consumers, commit every 2000 records = ~100 Seconds
Triple Producer, 6 Consumesr, commit every 2000 records = ~100 Seconds
A couple notes on the problem domain. The messages to insert and or delete come randomly throughout the day with an average of ~20 inserts/deletes per second, bursts much higher but there is no reason that the updates can't be queued for a short period, as long as the queue doesn't grow to large.
The table that the data is currently being inserted into has approximately 52 million records in it. Here is the MySQL table information
CREATE TABLE `users` (
`id` bigint(20) unsigned NOT NULL,
`contributors_enabled` tinyint(4) DEFAULT '0',
`created_at` datetime NOT NULL,
`description` varchar(255) DEFAULT NULL,
`favourites_count` int(11) NOT NULL,
`followers_count` int(11) DEFAULT NULL,
`following` varchar(255) DEFAULT NULL,
`friends_count` int(11) NOT NULL,
`geo_enabled` tinyint(4) DEFAULT '0',
`lang` varchar(255) DEFAULT NULL,
`listed_count` int(11) DEFAULT NULL,
`location` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`notifications` varchar(45) DEFAULT NULL,
`screen_name` varchar(45) NOT NULL,
`show_all_inline_media` tinyint(4) DEFAULT NULL,
`statuses_count` int(11) NOT NULL,
`url` varchar(255) DEFAULT NULL,
`utc_offset` int(11) DEFAULT NULL,
`verified` tinyint(4) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `id_UNIQUE` (`id`)
) ENGINE=MARIA DEFAULT CHARSET=latin1 CHECKSUM=1 PAGE_CHECKSUM=1 TRANSACTIONAL=1

You could change the code to do bulk inserts, rather than insert one row at a time.

Related

SQLite3 slow execution (ESP32 & C++)?

I have setup the SQLite3 C Interface on a ESP32 (Compiler: Arduino IDE) with a CPU frequency of 240Mhz.
All the functions working properly, except the sqlite3_step() function.
This command is "incredible" slow.
My Function for insert 3 data-pairs in a vector which has a size of 1k (vector<struct DATA>):
struct DATA{
pair<int, byte> id;
pair<int, byte> value;
pair<string, byte> s_timestamp;
};
bool ESPDB::insert_vector(const vector<struct DATA> &data_buffer) {
char *errmsg;
if(sqlite3_open(("/sdcard/" + database_).c_str(), &db) != SQLITE_OK){
return 0;
}
sqlite3_stmt *stmt;
sqlite3_prepare_v2(db, ("INSERT INTO " + table_ + " " + columns_ + " VALUES " +
placeholder_).c_str(), -1, &stmt, 0);
sqlite3_exec(db, "BEGIN TRANSACTION", NULL, NULL, &errmsg);
for (const struct DATA &data_pair : data_buffer){
if(data_pair.id.first !=0 && data_pair.id.second !=0){
sqlite3_bind_int(stmt, data_pair.id.second, data_pair.id.first);
}
if(data_pair.value.first !=0 && data_pair.value.second !=0){
sqlite3_bind_int(stmt, data_pair.value.second, data_pair.value.first);
}
if(data_pair.s_timestamp.first.length() > 0 && data_pair.s_timestamp.second !=0){
sqlite3_bind_text(stmt, data_pair.s_timestamp.second, data_pair.s_timestamp.first.c_str(),
data_pair.s_timestamp.first.length(), SQLITE_STATIC);
}
sqlite3_step(stmt);
sqlite3_reset(stmt);
}
sqlite3_exec(db, "END TRANSACTION", NULL, NULL, &errmsg);
sqlite3_finalize(stmt);
sqlite3_close(db);
return 1;
}
The average result of the performance test was around 3000ms for 1k vector - (a few times, its excecuting it in less than 300ms).
The step command itself taking around 16-25ms for each row insert.
The strange thing is, that the sqlite3_step() command speeds up after around 100 inserts to finally 0ms (<1ms) for each row.
The sequence of the code is following:
-open database
-drop table
-create table if not exists
-single insert function (single DATA struct)
-select query
-insert the vector (size = 1000)
-select query in JSON
in ALL functions which handling the database connection, i open and close the database by following code:
Drop-Table Example:
bool ESPDB::drop_table(const string &table){
if(sqlite3_open(("/sdcard/" + database_).c_str(), &db) != SQLITE_OK){
return 0;
}
sql_stmt_(("DROP TABLE IF EXISTS " + table).c_str());
sqlite3_close(db);
return 1;
}
Update:
opened database only once
ensured 240MHz CPU speed
Here a chart for visualisation:
(the issue is, lateron i have a vector size of 250 due to small available heap size in my code - this makes it really slow - as you can see, the first ~130 taking around 15ms. after this its operating really fast)
chart3:
time of insert a vector with size 100 in a while loop - total excecutions 85~]

How do I bind a variable to a prepared statement with the SOCI libary?

Currently my application only supports SQLite databases, but I would like to support both SQLite and MySQL databases, so I'm testing out the SOCI library to see if it does what I need. However, despite the examples and documentation, I can't figure out how SOCI handles prepared statements.
When using the SQLite C API, you prepare statement:
sqlite3_stmt* statement;
sqlite3_prepare_v2( database_handle_pointer,
"SELECT * FROM table WHERE user_id=:id;",
-1,
&statement,
NULL );
And later you bind a value to the :id place holder, execute the statement and step through the results:
const sqlite3_int64 user_id = some_function_that_returns_a_user_id();
const int index = sqlite3_bind_parameter_index( statement, ":id" );
sqlite3_bind_int64( statement, index, user_id );
while ( sqlite3_step( statement ) == SQLITE_ROW )
{
// Do something with the row
}
How do I do this with SOCI? It looks like the prepare and bind concepts are not separated like with the native SQLite API. Does the bind have to happen during the prepare using soci::use()?
Update 1: In case I'm not explaining the question well enough: Here's a small, working, C++ example using the SQLite C API. If I could see this re-implemented using SOCI, it would answer the question.
#include <sqlite3.h>
#include <iostream>
// Tables and data
const char* table = "CREATE TABLE test ( user_id INTEGER, name CHAR );";
const char* hank = "INSERT INTO test (user_id,name) VALUES(1,'Hank');";
const char* bill = "INSERT INTO test (user_id,name) VALUES(2,'Bill');";
const char* fred = "INSERT INTO test (user_id,name) VALUES(3,'Fred');";
// Create a SQLite prepared statement to select a user from the test table.
sqlite3_stmt* make_statement( sqlite3* database )
{
sqlite3_stmt* statement;
sqlite3_prepare_v2( database,
"SELECT name FROM test WHERE user_id=:id;",
-1, &statement, NULL );
return statement;
}
// Bind the requested user_id to the prepared statement.
void bind_statement( sqlite3_stmt* statement, const sqlite3_int64 user_id )
{
const int index = sqlite3_bind_parameter_index( statement, ":id" );
sqlite3_bind_int64( statement, index, user_id );
}
// Execute the statement and print the name of the selected user.
void execute_statement( sqlite3_stmt* statement )
{
while ( sqlite3_step( statement ) == SQLITE_ROW )
{
std::cout << sqlite3_column_text( statement, 0 ) << "\n";
}
}
int main()
{
// Create an in-memory database.
sqlite3* database;
if ( sqlite3_open( ":memory:", &database ) != SQLITE_OK )
{
std::cerr << "Error creating database" << std::endl;
return -1;
}
// Create a table and some rows.
sqlite3_exec( database, table, NULL, NULL, NULL );
sqlite3_exec( database, hank, NULL, NULL, NULL );
sqlite3_exec( database, bill, NULL, NULL, NULL );
sqlite3_exec( database, fred, NULL, NULL, NULL );
sqlite3_stmt* statement = make_statement( database );
bind_statement( statement, 2 );
execute_statement( statement );
// Cleanup
sqlite3_finalize( statement );
sqlite3_close( database );
return 1;
}
The same program partially implemented using SOCI (Note the two stub functions marked as HELPME)
#include <soci/soci.h>
#include <iostream>
const char* table = "CREATE TABLE test ( user_id INTEGER, name CHAR );";
const char* hank = "INSERT INTO test (user_id,name) VALUES(1,'Hank');";
const char* bill = "INSERT INTO test (user_id,name) VALUES(2,'Bill');";
const char* fred = "INSERT INTO test (user_id,name) VALUES(3,'Fred');";
soci::statement make_statement( soci::session& database )
{
soci::statement statement =
database.prepare << "SELECT name FROM test WHERE user_id=:id";
return statement;
}
void bind_statement( soci::statement& statement, const int user_id )
{
// HELPME: What goes here?
}
void execute_statement( soci::statement& statement )
{
// HELPME: What goes here?
}
int main()
{
soci::session database( "sqlite3", ":memory:" );
database << table;
database << hank;
database << bill;
database << fred;
soci::statement statement = make_statement( database );
bind_statement( statement, 2 );
execute_statement( statement );
}
Update 2: I ended up ditching SOCI when I found the cppdb library. Unlike SOCI, it is just a very thin wrapper around the native C APIs, which suits my needs at this time.
The documentation explains how to use prepared statements with parameters:
int user_id;
string name;
statement st = (database.prepare << "SELECT name FROM test WHERE user_id = :id",
use(user_id),
into(name));
user_id = 1;
st.execute(true);
Please note that the lifetime of the user_id and name variables must be at least as long as that of st.

sqlite query not prepared row insterted but empty values

I am writing a function to prepare a sql query and execute it against a sqlite db. I am using a query to insert values in to a table, the query looks like this
"insert into files values (?, ?, ?, ?, ?, 0)";
A row is inserted but all values for text fields are empty except the last field which is 0. The first 5 fields are of type TEXT
// If I hard code a value for value.data() then the row is inserted correctly with my hardcoded data, the exact line is below
status = sqlite3_bind_text(ppStmt, index, /* if I hardcode it works*/ value.data(), -1, SQLITE_STATIC);
The full function is shown below.The list is created on stack by the calller, I am not sure why it is not replacing question marks with my string args, hardcoded ones work though.. no error codes are returned
//db is already open before I call this
void MediaCache::prepareAndExecuteQuery(string query, list<string> args)
{
sqlite3_stmt *ppStmt = 0;
const char **pzTail = 0;
int status = 0;
if( sqlite3_prepare_v2(db, query.data(), query.length(), &ppStmt, pzTail) != SQLITE_OK )
{
string error = sqlite3_errmsg(db);
//throw an exception
}
if(ppStmt)
{
list<string>::iterator current = args.begin();
int index = 1;
for(current = args.begin() ; current != args.end(); current++)
{
string value = *current;
status = sqlite3_bind_text(ppStmt, index, value.data(), -1, SQLITE_STATIC);
if(status != SQLITE_OK)
{
//log error;
}
index++;
}
status = sqlite3_step(ppStmt);
status = sqlite3_finalize(ppStmt);
//sqlite3_exec(db, "COMMIT", NULL, NULL, NULL);
}
else
{
//ppStmt is null
//throw an exception
}
}
I suspect the problem is here:
string value = *current;
status = sqlite3_bind_text(ppStmt, index, value.data(), -1, SQLITE_STATIC);
In this case value goes out of scope before the execution of the statement takes place. So the pointers given to sqlite3_bind_text are no longer valid. To "fix" that, you could use SQLITE_TRANSIENT, which would force the library to make its own copy of the data before returning.
Also, if you are not using C++11, I don't believe string::data() is guaranteed to be NULL terminated in which case the -1 parameter could be incorrect. It should maybe be value.length() instead.

Have successfully inserted blob data to SQLite database but can't get the data inserted

I use BLOB to insert an object to a SQLite databse. After the insertion, I can get the data with "SELECT" sentence and the data is correct, although the row of "TASK_HEAD" is "Empty" when browsing the database with "SQLite Database Browser".
However, if I destroy the object which has just been inserted, I can't get the correct data anymore, with the pointer "pHead" points to an address where the content of its "id" member is "铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪铪UG?" when read in VS2008 in debug mode.
Here is an example:
// user-defined data type
typedef std::string TASK_ID;
struct TASK_HEAD
{
TASK_ID id;
std::string userData;
int Size()
{
return (id.size() + userData.size()) * sizeof(TCHAR);
}
};
// when TEST_INSIDE is defined, pHead is invalid; but if undef it, I can get the "head" I just inserted
// and if the blob data is a string (when USING_STRING is defined), I can get the string inserted into the db even though the "test" string has been destroyed
void CDBWriter::WriteTestData()
{
// open db
sqlite3* db = NULL;
int nRet = sqlite3_open(DATABASE_NAME.c_str(), &db);
if (nRet != SQLITE_OK)
{
return;
}
if (db != NULL)
{
// create a table
std::string cmdCreate("CREATE TABLE IF NOT EXISTS testTable (id TEXT NOT NULL, TASK_HEAD BLOB, PRIMARY KEY(id));");
char* errMsg = NULL;
nRet = sqlite3_exec( db , cmdCreate.c_str() , 0 , 0 , &errMsg );
if( errMsg != NULL )
{
sqlite3_free( errMsg );
errMsg = NULL;
return;
}
//#define USING_STRING
#define TEST_INSIDE
#ifndef TEST_INSIDE
TASK_HEAD head;
#endif // TEST_INSIDE
// insert blob data
const TASK_ID newID(NewGUID()); // NewGUID returns string like this: "5811307F-7AA7-4C44-831F-774FC5832627"
string query = "INSERT OR REPLACE INTO testTable (id, TASK_HEAD) VALUES ('";
query += newID;
query += "', ?1);";
sqlite3_stmt* res = NULL;
nRet = sqlite3_prepare_v2(db, query.c_str(), query.length(), &res, 0);
{
#ifdef TEST_INSIDE
TASK_HEAD head;
#endif // TEST_INSIDE
head.id = newID;
#ifdef USING_STRING
std::string test("ewsjoafijdoaijeofsafsd");
nRet = sqlite3_bind_blob (res, 1, test.c_str(), test.size(), SQLITE_TRANSIENT);
#else
int nsizeHead = sizeof(head);
int nSizeHeadSt = sizeof(TASK_HEAD);
int sizeString = sizeof(std::string);
size_t nLen = newID.size();
//nRet = sqlite3_bind_blob (res, 1, &head, sizeof(head), SQLITE_TRANSIENT);
nRet = sqlite3_bind_blob (res, 1, &head, head.Size(), SQLITE_TRANSIENT);
#endif // USING_STRING
if (SQLITE_OK == nRet)
{
nRet = sqlite3_step(res);
}
if (nRet != SQLITE_OK && nRet != SQLITE_DONE)
{
return;
}
}
// get all columns in the database
query = "SELECT * FROM testTable;";
nRet = sqlite3_prepare_v2 (db, query.c_str(), query.length(), &res, 0);
if (SQLITE_OK == nRet)
{
while (SQLITE_ROW == sqlite3_step(res))
{
#ifdef USING_STRING
const char* pHead = (const char*)sqlite3_column_blob(res, 1);
#else
const TASK_HEAD *pHead = (const TASK_HEAD*)sqlite3_column_blob(res, 1);
#endif // USING_STRING
continue;
}
}
sqlite3_finalize(res);
sqlite3_close(db);
}
}
At first, I thought it might be the problem of bytes passed to sqlite3_bind_blob, so I get the bytes of the object with a stupid method, as you can see here (the size() function of TASK_HEAD), but that doesn't help.
Then I tried to use SQLITE_STATIC instead of SQLITE_TRANSIENT, still not working.
What's wrong?
Ps: I know it's a bad solution to insert an object to the db, and I just wanna know why I can't read back my data inserted into the db.
The content of userData is likely to be stored on the heap. Even if it's stored inside the std::string (for SSO) it still may use a pointer to itself internally, and so it won't work when you bitwise copy it to another place in memory (what you're doing is equivalent to a memcpy).
However, it doesn't matter why exactly it doesn't work, since it's just undefined behavior. Just don't "insert an object to the db" like this. Either serialize it using some serialization library and then insert it, or use two columns in the table, one for id and one for userData.
I think the problem is at:
nRet = sqlite3_bind_blob (res, 1, &head, head.Size(), SQLITE_TRANSIENT);
You cannot get the address of the TASK_HEAD structure and pass it to sqlite like this. To build a blob you need flat data, nothing with pointers and dynamic buffers like std::string objects.
You need to serialize the TASK_HEAD structure in a buffer before the binding operation.
For instance:
struct TASK_HEAD
{
TASK_ID id;
std::string userData;
std::string Data()
{
return id+userData;
}
int Size()
{
return (id.size() + userData.size()) * sizeof(TCHAR);
}
};
and:
nRet = sqlite3_bind_blob (res, 1, head.Data().c_str(), head.Size(), SQLITE_TRANSIENT);
Please note adding the fields to serialize as shown above is very poor (since this format cannot be unserialized). To deal with blobs, you need to find a good serialization library or format (protocol buffer, message pack, JSON, etc ...) or roll your own.
There is a second issue in your code at:
const TASK_HEAD *pHead = (const TASK_HEAD*)sqlite3_column_blob(res, 1);
This will not work, for a similar reason.

SQLite multi insert from C++ just adding the first one

I have the following code for SQLite:
std::vector<std::vector<std::string> > InternalDatabaseManager::query(std::string query)
{
sqlite3_stmt *statement;
std::vector<std::vector<std::string> > results;
if(sqlite3_prepare_v2(internalDbManager, query.c_str(), -1, &statement, 0) == SQLITE_OK)
{
int cols = sqlite3_column_count(statement);
int result = 0;
while(true)
{
result = sqlite3_step(statement);
std::vector<std::string> values;
if(result == SQLITE_ROW)
{
for(int col = 0; col < cols; col++)
{
std::string s;
char *ptr = (char*)sqlite3_column_text(statement, col);
if(ptr) s = ptr;
values.push_back(s);
}
results.push_back(values);
} else
{
break;
}
}
sqlite3_finalize(statement);
}
std::string error = sqlite3_errmsg(internalDbManager);
if(error != "not an error") std::cout << query << " " << error << std::endl;
return results;
}
When I try to pass a query string like:
INSERT INTO CpuUsage (NODE_ID, TIME_ID, CORE_ID, USER, NICE, SYSMODE, IDLE, IOWAIT, IRQ, SOFTIRQ, STEAL, GUEST) VALUES (1, 1, -1, 1014711, 117915, 175551, 5908257, 112996, 2613, 4359, 0, 0); INSERT INTO CpuUsage (NODE_ID, TIME_ID, CORE_ID, USER, NICE, SYSMODE, IDLE, IOWAIT, IRQ, SOFTIRQ, STEAL, GUEST) VALUES (1, 1, 0, 1014711, 117915, 175551, 5908257, 112996, 2613, 4359, 0, 0); INSERT INTO CpuUsage (NODE_ID, TIME_ID, CORE_ID, USER, NICE, SYSMODE, IDLE, IOWAIT, IRQ, SOFTIRQ, STEAL, GUEST) VALUES (1, 1, 1, 1014711, 117915, 175551, 5908257, 112996, 2613, 4359, 0, 0);
It results just inserting the first insert. Using some other tool lite SQLiteStudio it performs ok.
Any ideas to help me, please?
Thanks,
Pedro
EDIT
My query is a std::string.
const char** pzTail;
const char* q = query.c_str();
int result = -1;
do {
result = sqlite3_prepare_v2(internalDbManager, q, -1, &statement, pzTail);
q = *pzTail;
}
while(result == SQLITE_OK);
This gives me Description: cannot convert ‘const char*’ to ‘const char**’ for argument ‘5’ to ‘int sqlite3_prepare_v2(sqlite3*, const char*, int, sqlite3_stmt*, const char*)’
SQLite's prepare_v2 will only create a statement from the first insert in your string. You can think of it as a "pop front" mechanism.
int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
From http://www.sqlite.org/c3ref/prepare.html
If pzTail is not NULL then *pzTail is made to point to the first byte
past the end of the first SQL statement in zSql. These routines only
compile the first statement in zSql, so *pzTail is left pointing to
what remains uncompiled.
The pzTail parameter will point to the rest of the inserts, so you can loop through them all until they have all been prepared.
The other option is to only do one insert at a time, which makes the rest of your handling code a little bit simpler usually.
Typically I have seen people do this sort of thing under the impression that they will be evaluated under the same transaction. This is not the case, though. The second one may fail and the first and third will still succeed.