From 2fdac39ff6057c49714dd24db2477cfa17e84ec4 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Fri, 27 Feb 2026 23:10:18 -0600 Subject: [PATCH 01/10] feat(network): add support for new status endpoint - automatically update the latest sent db_version value after sending payloads depending on the status returned by the cloudsync http service - the cloudsync_network_send_changes functions now returns the status data in the json format - the cloudsync_network_has_unsent_changes now call the status endpoints before comparing with the latest local db_version - only consider the db_version value in the WHERE clause of cloudsync_payload_get, don't filter at seq level --- src/cloudsync.c | 28 +- src/cloudsync.h | 2 +- src/database.h | 2 +- src/jsmn.h | 471 +++++++++++++++++++++++++++ src/network.c | 266 ++++++++++++--- src/network.m | 3 +- src/network_private.h | 3 +- src/postgresql/database_postgresql.c | 25 +- src/sqlite/database_sqlite.c | 15 +- test/unit.c | 4 +- 10 files changed, 717 insertions(+), 102 deletions(-) create mode 100644 src/jsmn.h diff --git a/src/cloudsync.c b/src/cloudsync.c index 221d77e..64e2ce6 100644 --- a/src/cloudsync.c +++ b/src/cloudsync.c @@ -2872,21 +2872,18 @@ int cloudsync_payload_apply (cloudsync_context *data, const char *payload, int b // MARK: - Payload load/store - -int cloudsync_payload_get (cloudsync_context *data, char **blob, int *blob_size, int *db_version, int *seq, int64_t *new_db_version, int64_t *new_seq) { +int cloudsync_payload_get (cloudsync_context *data, char **blob, int *blob_size, int *db_version, int64_t *new_db_version) { // retrieve current db_version and seq *db_version = dbutils_settings_get_int_value(data, CLOUDSYNC_KEY_SEND_DBVERSION); if (*db_version < 0) return DBRES_ERROR; - - *seq = dbutils_settings_get_int_value(data, CLOUDSYNC_KEY_SEND_SEQ); - if (*seq < 0) return DBRES_ERROR; // retrieve BLOB char sql[1024]; snprintf(sql, sizeof(sql), "WITH max_db_version AS (SELECT MAX(db_version) AS max_db_version FROM cloudsync_changes WHERE site_id=cloudsync_siteid()) " - "SELECT * FROM (SELECT cloudsync_payload_encode(tbl, pk, col_name, col_value, col_version, db_version, site_id, cl, seq) AS payload, max_db_version AS max_db_version, MAX(IIF(db_version = max_db_version, seq, 0)) FROM cloudsync_changes, max_db_version WHERE site_id=cloudsync_siteid() AND (db_version>%d OR (db_version=%d AND seq>%d))) WHERE payload IS NOT NULL", *db_version, *db_version, *seq); + "SELECT * FROM (SELECT cloudsync_payload_encode(tbl, pk, col_name, col_value, col_version, db_version, site_id, cl, seq) AS payload, max_db_version AS max_db_version FROM cloudsync_changes, max_db_version WHERE site_id=cloudsync_siteid() AND db_version>%d) WHERE payload IS NOT NULL", *db_version); int64_t len = 0; - int rc = database_select_blob_2int(data, sql, blob, &len, new_db_version, new_seq); + int rc = database_select_blob_int(data, sql, blob, &len, new_db_version); *blob_size = (int)len; if (rc != DBRES_OK) return rc; @@ -2904,12 +2901,11 @@ int cloudsync_payload_save (cloudsync_context *data, const char *payload_path, i // retrieve payload char *blob = NULL; - int blob_size = 0, db_version = 0, seq = 0; - int64_t new_db_version = 0, new_seq = 0; - int rc = cloudsync_payload_get(data, &blob, &blob_size, &db_version, &seq, &new_db_version, &new_seq); + int blob_size = 0, db_version = 0; + int64_t new_db_version = 0; + int rc = cloudsync_payload_get(data, &blob, &blob_size, &db_version, &new_db_version); if (rc != DBRES_OK) { if (db_version < 0) return cloudsync_set_error(data, "Unable to retrieve db_version", rc); - else if (seq < 0) return cloudsync_set_error(data, "Unable to retrieve seq", rc); return cloudsync_set_error(data, "Unable to retrieve changes in cloudsync_payload_save", rc); } @@ -2926,18 +2922,6 @@ int cloudsync_payload_save (cloudsync_context *data, const char *payload_path, i return cloudsync_set_error(data, "Unable to write payload to file path", DBRES_IOERR); } - // TODO: dbutils_settings_set_key_value remove context and return error here (in case of error) - // update db_version and seq - char buf[256]; - if (new_db_version != db_version) { - snprintf(buf, sizeof(buf), "%" PRId64, new_db_version); - dbutils_settings_set_key_value(data, CLOUDSYNC_KEY_SEND_DBVERSION, buf); - } - if (new_seq != seq) { - snprintf(buf, sizeof(buf), "%" PRId64, new_seq); - dbutils_settings_set_key_value(data, CLOUDSYNC_KEY_SEND_SEQ, buf); - } - // returns blob size if (size) *size = blob_size; return DBRES_OK; diff --git a/src/cloudsync.h b/src/cloudsync.h index d0718fa..43a2b67 100644 --- a/src/cloudsync.h +++ b/src/cloudsync.h @@ -83,7 +83,7 @@ int cloudsync_payload_encode_step (cloudsync_payload_context *payload, clouds int cloudsync_payload_encode_final (cloudsync_payload_context *payload, cloudsync_context *data); char *cloudsync_payload_blob (cloudsync_payload_context *payload, int64_t *blob_size, int64_t *nrows); size_t cloudsync_payload_context_size (size_t *header_size); -int cloudsync_payload_get (cloudsync_context *data, char **blob, int *blob_size, int *db_version, int *seq, int64_t *new_db_version, int64_t *new_seq); +int cloudsync_payload_get (cloudsync_context *data, char **blob, int *blob_size, int *db_version, int64_t *new_db_version); int cloudsync_payload_save (cloudsync_context *data, const char *payload_path, int *blob_size); // available only on Desktop OS (no WASM, no mobile) // CloudSync table context diff --git a/src/database.h b/src/database.h index acf98c6..31b3f7a 100644 --- a/src/database.h +++ b/src/database.h @@ -64,7 +64,7 @@ int database_exec_callback (cloudsync_context *data, const char *sql, database_ int database_select_int (cloudsync_context *data, const char *sql, int64_t *value); int database_select_text (cloudsync_context *data, const char *sql, char **value); int database_select_blob (cloudsync_context *data, const char *sql, char **value, int64_t *value_len); -int database_select_blob_2int (cloudsync_context *data, const char *sql, char **value, int64_t *value_len, int64_t *value2, int64_t *value3); +int database_select_blob_int (cloudsync_context *data, const char *sql, char **value, int64_t *value_len, int64_t *value2); int database_write (cloudsync_context *data, const char *sql, const char **values, DBTYPE types[], int lens[], int count); bool database_table_exists (cloudsync_context *data, const char *table_name, const char *schema); bool database_internal_table_exists (cloudsync_context *data, const char *name); diff --git a/src/jsmn.h b/src/jsmn.h new file mode 100644 index 0000000..dca2bb5 --- /dev/null +++ b/src/jsmn.h @@ -0,0 +1,471 @@ +/* + * MIT License + * + * Copyright (c) 2010 Serge Zaitsev + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef JSMN_H +#define JSMN_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef JSMN_STATIC +#define JSMN_API static +#else +#define JSMN_API extern +#endif + +/** + * JSON type identifier. Basic types are: + * o Object + * o Array + * o String + * o Other primitive: number, boolean (true/false) or null + */ +typedef enum { + JSMN_UNDEFINED = 0, + JSMN_OBJECT = 1 << 0, + JSMN_ARRAY = 1 << 1, + JSMN_STRING = 1 << 2, + JSMN_PRIMITIVE = 1 << 3 +} jsmntype_t; + +enum jsmnerr { + /* Not enough tokens were provided */ + JSMN_ERROR_NOMEM = -1, + /* Invalid character inside JSON string */ + JSMN_ERROR_INVAL = -2, + /* The string is not a full JSON packet, more bytes expected */ + JSMN_ERROR_PART = -3 +}; + +/** + * JSON token description. + * type type (object, array, string etc.) + * start start position in JSON data string + * end end position in JSON data string + */ +typedef struct jsmntok { + jsmntype_t type; + int start; + int end; + int size; +#ifdef JSMN_PARENT_LINKS + int parent; +#endif +} jsmntok_t; + +/** + * JSON parser. Contains an array of token blocks available. Also stores + * the string being parsed now and current position in that string. + */ +typedef struct jsmn_parser { + unsigned int pos; /* offset in the JSON string */ + unsigned int toknext; /* next token to allocate */ + int toksuper; /* superior token node, e.g. parent object or array */ +} jsmn_parser; + +/** + * Create JSON parser over an array of tokens + */ +JSMN_API void jsmn_init(jsmn_parser *parser); + +/** + * Run JSON parser. It parses a JSON data string into and array of tokens, each + * describing + * a single JSON object. + */ +JSMN_API int jsmn_parse(jsmn_parser *parser, const char *js, const size_t len, + jsmntok_t *tokens, const unsigned int num_tokens); + +#ifndef JSMN_HEADER +/** + * Allocates a fresh unused token from the token pool. + */ +static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser, jsmntok_t *tokens, + const size_t num_tokens) { + jsmntok_t *tok; + if (parser->toknext >= num_tokens) { + return NULL; + } + tok = &tokens[parser->toknext++]; + tok->start = tok->end = -1; + tok->size = 0; +#ifdef JSMN_PARENT_LINKS + tok->parent = -1; +#endif + return tok; +} + +/** + * Fills token type and boundaries. + */ +static void jsmn_fill_token(jsmntok_t *token, const jsmntype_t type, + const int start, const int end) { + token->type = type; + token->start = start; + token->end = end; + token->size = 0; +} + +/** + * Fills next available token with JSON primitive. + */ +static int jsmn_parse_primitive(jsmn_parser *parser, const char *js, + const size_t len, jsmntok_t *tokens, + const size_t num_tokens) { + jsmntok_t *token; + int start; + + start = parser->pos; + + for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { + switch (js[parser->pos]) { +#ifndef JSMN_STRICT + /* In strict mode primitive must be followed by "," or "}" or "]" */ + case ':': +#endif + case '\t': + case '\r': + case '\n': + case ' ': + case ',': + case ']': + case '}': + goto found; + default: + /* to quiet a warning from gcc*/ + break; + } + if (js[parser->pos] < 32 || js[parser->pos] >= 127) { + parser->pos = start; + return JSMN_ERROR_INVAL; + } + } +#ifdef JSMN_STRICT + /* In strict mode primitive must be followed by a comma/object/array */ + parser->pos = start; + return JSMN_ERROR_PART; +#endif + +found: + if (tokens == NULL) { + parser->pos--; + return 0; + } + token = jsmn_alloc_token(parser, tokens, num_tokens); + if (token == NULL) { + parser->pos = start; + return JSMN_ERROR_NOMEM; + } + jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos); +#ifdef JSMN_PARENT_LINKS + token->parent = parser->toksuper; +#endif + parser->pos--; + return 0; +} + +/** + * Fills next token with JSON string. + */ +static int jsmn_parse_string(jsmn_parser *parser, const char *js, + const size_t len, jsmntok_t *tokens, + const size_t num_tokens) { + jsmntok_t *token; + + int start = parser->pos; + + /* Skip starting quote */ + parser->pos++; + + for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { + char c = js[parser->pos]; + + /* Quote: end of string */ + if (c == '\"') { + if (tokens == NULL) { + return 0; + } + token = jsmn_alloc_token(parser, tokens, num_tokens); + if (token == NULL) { + parser->pos = start; + return JSMN_ERROR_NOMEM; + } + jsmn_fill_token(token, JSMN_STRING, start + 1, parser->pos); +#ifdef JSMN_PARENT_LINKS + token->parent = parser->toksuper; +#endif + return 0; + } + + /* Backslash: Quoted symbol expected */ + if (c == '\\' && parser->pos + 1 < len) { + int i; + parser->pos++; + switch (js[parser->pos]) { + /* Allowed escaped symbols */ + case '\"': + case '/': + case '\\': + case 'b': + case 'f': + case 'r': + case 'n': + case 't': + break; + /* Allows escaped symbol \uXXXX */ + case 'u': + parser->pos++; + for (i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0'; + i++) { + /* If it isn't a hex character we have an error */ + if (!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */ + (js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */ + (js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */ + parser->pos = start; + return JSMN_ERROR_INVAL; + } + parser->pos++; + } + parser->pos--; + break; + /* Unexpected symbol */ + default: + parser->pos = start; + return JSMN_ERROR_INVAL; + } + } + } + parser->pos = start; + return JSMN_ERROR_PART; +} + +/** + * Parse JSON string and fill tokens. + */ +JSMN_API int jsmn_parse(jsmn_parser *parser, const char *js, const size_t len, + jsmntok_t *tokens, const unsigned int num_tokens) { + int r; + int i; + jsmntok_t *token; + int count = parser->toknext; + + for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { + char c; + jsmntype_t type; + + c = js[parser->pos]; + switch (c) { + case '{': + case '[': + count++; + if (tokens == NULL) { + break; + } + token = jsmn_alloc_token(parser, tokens, num_tokens); + if (token == NULL) { + return JSMN_ERROR_NOMEM; + } + if (parser->toksuper != -1) { + jsmntok_t *t = &tokens[parser->toksuper]; +#ifdef JSMN_STRICT + /* In strict mode an object or array can't become a key */ + if (t->type == JSMN_OBJECT) { + return JSMN_ERROR_INVAL; + } +#endif + t->size++; +#ifdef JSMN_PARENT_LINKS + token->parent = parser->toksuper; +#endif + } + token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY); + token->start = parser->pos; + parser->toksuper = parser->toknext - 1; + break; + case '}': + case ']': + if (tokens == NULL) { + break; + } + type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY); +#ifdef JSMN_PARENT_LINKS + if (parser->toknext < 1) { + return JSMN_ERROR_INVAL; + } + token = &tokens[parser->toknext - 1]; + for (;;) { + if (token->start != -1 && token->end == -1) { + if (token->type != type) { + return JSMN_ERROR_INVAL; + } + token->end = parser->pos + 1; + parser->toksuper = token->parent; + break; + } + if (token->parent == -1) { + if (token->type != type || parser->toksuper == -1) { + return JSMN_ERROR_INVAL; + } + break; + } + token = &tokens[token->parent]; + } +#else + for (i = parser->toknext - 1; i >= 0; i--) { + token = &tokens[i]; + if (token->start != -1 && token->end == -1) { + if (token->type != type) { + return JSMN_ERROR_INVAL; + } + parser->toksuper = -1; + token->end = parser->pos + 1; + break; + } + } + /* Error if unmatched closing bracket */ + if (i == -1) { + return JSMN_ERROR_INVAL; + } + for (; i >= 0; i--) { + token = &tokens[i]; + if (token->start != -1 && token->end == -1) { + parser->toksuper = i; + break; + } + } +#endif + break; + case '\"': + r = jsmn_parse_string(parser, js, len, tokens, num_tokens); + if (r < 0) { + return r; + } + count++; + if (parser->toksuper != -1 && tokens != NULL) { + tokens[parser->toksuper].size++; + } + break; + case '\t': + case '\r': + case '\n': + case ' ': + break; + case ':': + parser->toksuper = parser->toknext - 1; + break; + case ',': + if (tokens != NULL && parser->toksuper != -1 && + tokens[parser->toksuper].type != JSMN_ARRAY && + tokens[parser->toksuper].type != JSMN_OBJECT) { +#ifdef JSMN_PARENT_LINKS + parser->toksuper = tokens[parser->toksuper].parent; +#else + for (i = parser->toknext - 1; i >= 0; i--) { + if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) { + if (tokens[i].start != -1 && tokens[i].end == -1) { + parser->toksuper = i; + break; + } + } + } +#endif + } + break; +#ifdef JSMN_STRICT + /* In strict mode primitives are: numbers and booleans */ + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case 't': + case 'f': + case 'n': + /* And they must not be keys of the object */ + if (tokens != NULL && parser->toksuper != -1) { + const jsmntok_t *t = &tokens[parser->toksuper]; + if (t->type == JSMN_OBJECT || + (t->type == JSMN_STRING && t->size != 0)) { + return JSMN_ERROR_INVAL; + } + } +#else + /* In non-strict mode every unquoted value is a primitive */ + default: +#endif + r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens); + if (r < 0) { + return r; + } + count++; + if (parser->toksuper != -1 && tokens != NULL) { + tokens[parser->toksuper].size++; + } + break; + +#ifdef JSMN_STRICT + /* Unexpected char in strict mode */ + default: + return JSMN_ERROR_INVAL; +#endif + } + } + + if (tokens != NULL) { + for (i = parser->toknext - 1; i >= 0; i--) { + /* Unmatched opened object or array */ + if (tokens[i].start != -1 && tokens[i].end == -1) { + return JSMN_ERROR_PART; + } + } + } + + return count; +} + +/** + * Creates a new parser based over a given buffer with an array of tokens + * available. + */ +JSMN_API void jsmn_init(jsmn_parser *parser) { + parser->pos = 0; + parser->toknext = 0; + parser->toksuper = -1; +} + +#endif /* JSMN_HEADER */ + +#ifdef __cplusplus +} +#endif + +#endif /* JSMN_H */ diff --git a/src/network.c b/src/network.c index 2a3c1c7..5c28785 100644 --- a/src/network.c +++ b/src/network.c @@ -9,6 +9,7 @@ #include #include +#include #include "network.h" #include "utils.h" @@ -16,6 +17,9 @@ #include "cloudsync.h" #include "network_private.h" +#define JSMN_STATIC +#include "jsmn.h" + #ifndef SQLITE_WASM_EXTRA_INIT #ifndef CLOUDSYNC_OMIT_CURL #include "curl/curl.h" @@ -50,6 +54,7 @@ struct network_data { char *check_endpoint; char *upload_endpoint; char *apply_endpoint; + char *status_endpoint; }; typedef struct { @@ -80,27 +85,30 @@ char *network_data_get_siteid (network_data *data) { return data->site_id; } -bool network_data_set_endpoints (network_data *data, char *auth, char *check, char *upload, char *apply) { +bool network_data_set_endpoints (network_data *data, char *auth, char *check, char *upload, char *apply, char *status) { // sanity check if (!check || !upload) return false; - + // always free previous owned pointers if (data->authentication) cloudsync_memory_free(data->authentication); if (data->check_endpoint) cloudsync_memory_free(data->check_endpoint); if (data->upload_endpoint) cloudsync_memory_free(data->upload_endpoint); if (data->apply_endpoint) cloudsync_memory_free(data->apply_endpoint); + if (data->status_endpoint) cloudsync_memory_free(data->status_endpoint); // clear pointers data->authentication = NULL; data->check_endpoint = NULL; data->upload_endpoint = NULL; data->apply_endpoint = NULL; + data->status_endpoint = NULL; // make a copy of the new endpoints char *auth_copy = NULL; char *check_copy = NULL; char *upload_copy = NULL; char *apply_copy = NULL; + char *status_copy = NULL; // auth is optional if (auth) { @@ -109,24 +117,29 @@ bool network_data_set_endpoints (network_data *data, char *auth, char *check, ch } check_copy = cloudsync_string_dup(check); if (!check_copy) goto abort_endpoints; - + upload_copy = cloudsync_string_dup(upload); if (!upload_copy) goto abort_endpoints; - + apply_copy = cloudsync_string_dup(apply); if (!apply_copy) goto abort_endpoints; + status_copy = cloudsync_string_dup(status); + if (!status_copy) goto abort_endpoints; + data->authentication = auth_copy; data->check_endpoint = check_copy; data->upload_endpoint = upload_copy; data->apply_endpoint = apply_copy; + data->status_endpoint = status_copy; return true; - + abort_endpoints: if (auth_copy) cloudsync_memory_free(auth_copy); if (check_copy) cloudsync_memory_free(check_copy); if (upload_copy) cloudsync_memory_free(upload_copy); if (apply_copy) cloudsync_memory_free(apply_copy); + if (status_copy) cloudsync_memory_free(status_copy); return false; } @@ -137,6 +150,7 @@ void network_data_free (network_data *data) { if (data->check_endpoint) cloudsync_memory_free(data->check_endpoint); if (data->upload_endpoint) cloudsync_memory_free(data->upload_endpoint); if (data->apply_endpoint) cloudsync_memory_free(data->apply_endpoint); + if (data->status_endpoint) cloudsync_memory_free(data->status_endpoint); cloudsync_memory_free(data); } @@ -414,6 +428,96 @@ char *network_authentication_token (const char *key, const char *value) { return buffer; } +// MARK: - JSON helpers (jsmn) - + +#define JSMN_MAX_TOKENS 64 + +static bool jsmn_token_eq(const char *json, const jsmntok_t *tok, const char *s) { + return (tok->type == JSMN_STRING && + (int)strlen(s) == tok->end - tok->start && + strncmp(json + tok->start, s, tok->end - tok->start) == 0); +} + +static int jsmn_find_key(const char *json, const jsmntok_t *tokens, int ntokens, const char *key) { + for (int i = 1; i + 1 < ntokens; i++) { + if (jsmn_token_eq(json, &tokens[i], key)) return i; + } + return -1; +} + +static char *json_unescape_string(const char *src, int len) { + char *out = cloudsync_memory_zeroalloc(len + 1); + if (!out) return NULL; + + int j = 0; + for (int i = 0; i < len; ) { + if (src[i] == '\\' && i + 1 < len) { + char c = src[i + 1]; + if (c == '"' || c == '\\' || c == '/') { out[j++] = c; i += 2; } + else if (c == 'n') { out[j++] = '\n'; i += 2; } + else if (c == 'r') { out[j++] = '\r'; i += 2; } + else if (c == 't') { out[j++] = '\t'; i += 2; } + else if (c == 'b') { out[j++] = '\b'; i += 2; } + else if (c == 'f') { out[j++] = '\f'; i += 2; } + else if (c == 'u' && i + 5 < len) { + unsigned int cp = 0; + for (int k = 0; k < 4; k++) { + char h = src[i + 2 + k]; + cp <<= 4; + if (h >= '0' && h <= '9') cp |= h - '0'; + else if (h >= 'a' && h <= 'f') cp |= 10 + h - 'a'; + else if (h >= 'A' && h <= 'F') cp |= 10 + h - 'A'; + } + if (cp < 0x80) { out[j++] = (char)cp; } + else { out[j++] = '?'; } // non-ASCII: replace + i += 6; + } + else { out[j++] = src[i]; i++; } + } else { + out[j++] = src[i]; i++; + } + } + out[j] = '\0'; + return out; +} + +static char *json_extract_string(const char *json, size_t json_len, const char *key) { + if (!json || json_len == 0 || !key) return NULL; + + jsmn_parser parser; + jsmntok_t tokens[JSMN_MAX_TOKENS]; + jsmn_init(&parser); + int ntokens = jsmn_parse(&parser, json, json_len, tokens, JSMN_MAX_TOKENS); + if (ntokens < 1) return NULL; + + int i = jsmn_find_key(json, tokens, ntokens, key); + if (i < 0 || i + 1 >= ntokens) return NULL; + + jsmntok_t *val = &tokens[i + 1]; + if (val->type != JSMN_STRING) return NULL; + + return json_unescape_string(json + val->start, val->end - val->start); +} + +static int64_t json_extract_int(const char *json, size_t json_len, const char *key, int64_t default_value) { + if (!json || json_len == 0 || !key) return default_value; + + jsmn_parser parser; + jsmntok_t tokens[JSMN_MAX_TOKENS]; + jsmn_init(&parser); + int ntokens = jsmn_parse(&parser, json, json_len, tokens, JSMN_MAX_TOKENS); + if (ntokens < 1 || tokens[0].type != JSMN_OBJECT) return default_value; + + int i = jsmn_find_key(json, tokens, ntokens, key); + if (i < 0 || i + 1 >= ntokens) return default_value; + + jsmntok_t *val = &tokens[i + 1]; + if (val->type != JSMN_PRIMITIVE) return default_value; + + return strtoll(json + val->start, NULL, 10); +} + + int network_extract_query_param (const char *query, const char *key, char *output, size_t output_size) { if (!query || !key || !output || output_size == 0) { return -1; // Invalid input @@ -472,6 +576,7 @@ bool network_compute_endpoints (sqlite3_context *context, network_data *data, co char *check_endpoint = NULL; char *upload_endpoint = NULL; char *apply_endpoint = NULL; + char *status_endpoint = NULL; char *conn_string_https = NULL; @@ -555,12 +660,14 @@ bool network_compute_endpoints (sqlite3_context *context, network_data *data, co check_endpoint = (char *)cloudsync_memory_zeroalloc(requested); upload_endpoint = (char *)cloudsync_memory_zeroalloc(requested); apply_endpoint = (char *)cloudsync_memory_zeroalloc(requested); + status_endpoint = (char *)cloudsync_memory_zeroalloc(requested); - if ((!upload_endpoint) || (!check_endpoint) || (!apply_endpoint)) goto finalize; + if ((!upload_endpoint) || (!check_endpoint) || (!apply_endpoint) || (!status_endpoint)) goto finalize; snprintf(check_endpoint, requested, "%s://%s:%s/%s%s/%s/%s", scheme, host, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database, data->site_id, CLOUDSYNC_ENDPOINT_CHECK); snprintf(upload_endpoint, requested, "%s://%s:%s/%s%s/%s/%s", scheme, host, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database, data->site_id, CLOUDSYNC_ENDPOINT_UPLOAD); snprintf(apply_endpoint, requested, "%s://%s:%s/%s%s/%s/%s", scheme, host, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database, data->site_id, CLOUDSYNC_ENDPOINT_APPLY); + snprintf(status_endpoint, requested, "%s://%s:%s/%s%s/%s/%s", scheme, host, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database, data->site_id, CLOUDSYNC_ENDPOINT_STATUS); result = true; @@ -580,6 +687,7 @@ bool network_compute_endpoints (sqlite3_context *context, network_data *data, co if (check_endpoint) cloudsync_memory_free(check_endpoint); if (upload_endpoint) cloudsync_memory_free(upload_endpoint); if (apply_endpoint) cloudsync_memory_free(apply_endpoint); + if (status_endpoint) cloudsync_memory_free(status_endpoint); } if (result) { @@ -596,6 +704,9 @@ bool network_compute_endpoints (sqlite3_context *context, network_data *data, co if (data->apply_endpoint) cloudsync_memory_free(data->apply_endpoint); data->apply_endpoint = apply_endpoint; + + if (data->status_endpoint) cloudsync_memory_free(data->status_endpoint); + data->status_endpoint = status_endpoint; } // cleanup memory @@ -738,6 +849,9 @@ void cloudsync_network_has_unsent_changes (sqlite3_context *context, int argc, s sqlite3 *db = sqlite3_context_db_handle(context); cloudsync_context *data = (cloudsync_context *)sqlite3_user_data(context); + network_data *netdata = (network_data *)cloudsync_auxdata(data); + if (!netdata) {sqlite3_result_error(context, "Unable to retrieve CloudSync network context.", -1); return;} + char *sql = "SELECT max(db_version) FROM cloudsync_changes WHERE site_id == (SELECT site_id FROM cloudsync_site_id WHERE rowid=0)"; int64_t last_local_change = 0; int rc = database_select_int(data, sql, &last_local_change); @@ -752,8 +866,20 @@ void cloudsync_network_has_unsent_changes (sqlite3_context *context, int argc, s return; } - int sent_db_version = dbutils_settings_get_int_value(data, CLOUDSYNC_KEY_SEND_DBVERSION); - sqlite3_result_int(context, (sent_db_version < last_local_change)); + NETWORK_RESULT res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); + + int64_t last_optimistic_version = -1; + + if (res.code == CLOUDSYNC_NETWORK_BUFFER && res.buffer) { + last_optimistic_version = json_extract_int(res.buffer, res.blen, "lastOptimisticVersion", -1); + } else if (res.code != CLOUDSYNC_NETWORK_OK) { + network_result_to_sqlite_error(context, res, "unable to retrieve current status from remote host."); + network_result_cleanup(&res); + return; + } + + network_result_cleanup(&res); + sqlite3_result_int(context, (last_optimistic_version >= 0 && last_optimistic_version < last_local_change)); } int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, sqlite3_value **argv) { @@ -767,61 +893,82 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, // retrieve payload char *blob = NULL; - int blob_size = 0, db_version = 0, seq = 0; - int64_t new_db_version = 0, new_seq = 0; - int rc = cloudsync_payload_get(data, &blob, &blob_size, &db_version, &seq, &new_db_version, &new_seq); + int blob_size = 0, db_version = 0; + int64_t new_db_version = 0; + int rc = cloudsync_payload_get(data, &blob, &blob_size, &db_version, &new_db_version); if (rc != SQLITE_OK) { if (db_version < 0) sqlite3_result_error(context, "Unable to retrieve db_version.", -1); - else if (seq < 0) sqlite3_result_error(context, "Unable to retrieve seq.", -1); else sqlite3_result_error(context, "Unable to retrieve changes in cloudsync_network_send_changes", -1); return rc; } - - // exit if there is no data to send - if (blob == NULL || blob_size == 0) return SQLITE_OK; - NETWORK_RESULT res = network_receive_buffer(netdata, netdata->upload_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); - if (res.code != CLOUDSYNC_NETWORK_BUFFER) { + NETWORK_RESULT res; + if (blob != NULL || blob_size > 0) { + // there is data to send + res = network_receive_buffer(netdata, netdata->upload_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); + if (res.code != CLOUDSYNC_NETWORK_BUFFER) { + cloudsync_memory_free(blob); + network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to receive upload URL"); + network_result_cleanup(&res); + return SQLITE_ERROR; + } + + char *s3_url = json_extract_string(res.buffer, res.blen, "url"); + if (!s3_url) { + cloudsync_memory_free(blob); + sqlite3_result_error(context, "cloudsync_network_send_changes: missing 'url' in upload response.", -1); + network_result_cleanup(&res); + return SQLITE_ERROR; + } + bool sent = network_send_buffer(netdata, s3_url, NULL, blob, blob_size); cloudsync_memory_free(blob); - network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to receive upload URL"); - network_result_cleanup(&res); - return SQLITE_ERROR; - } - - const char *s3_url = res.buffer; - bool sent = network_send_buffer(netdata, s3_url, NULL, blob, blob_size); - cloudsync_memory_free(blob); - if (sent == false) { - network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to upload BLOB changes to remote host."); + if (sent == false) { + cloudsync_memory_free(s3_url); + network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to upload BLOB changes to remote host."); + network_result_cleanup(&res); + return SQLITE_ERROR; + } + + int db_version_min = db_version+1; + int db_version_max = (int)new_db_version; + if (db_version_min > db_version_max) db_version_min = db_version_max; + char json_payload[4096]; + snprintf(json_payload, sizeof(json_payload), "{\"url\":\"%s\", \"dbVersionMin\":%d, \"dbVersionMax\":%d}", s3_url, db_version_min, db_version_max); + cloudsync_memory_free(s3_url); + + // free res network_result_cleanup(&res); - return SQLITE_ERROR; + + // notify remote host that we succesfully uploaded changes + res = network_receive_buffer(netdata, netdata->apply_endpoint, netdata->authentication, true, true, json_payload, CLOUDSYNC_HEADER_SQLITECLOUD); + } else { + // there is no data to send, just check the status to update the db_version value in settings and to reply the status + res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); } - - char json_payload[2024]; - snprintf(json_payload, sizeof(json_payload), "{\"url\":\"%s\", \"dbVersionMin\":%d, \"dbVersionMax\":%lld}", s3_url, db_version, (long long)new_db_version); - - // free res - network_result_cleanup(&res); - - // notify remote host that we succesfully uploaded changes - res = network_receive_buffer(netdata, netdata->apply_endpoint, netdata->authentication, true, true, json_payload, CLOUDSYNC_HEADER_SQLITECLOUD); - if (res.code != CLOUDSYNC_NETWORK_OK) { + + int64_t last_optimistic_version = -1; + + if (res.code == CLOUDSYNC_NETWORK_BUFFER && res.buffer) { + last_optimistic_version = json_extract_int(res.buffer, res.blen, "lastOptimisticVersion", -1); + } else if (res.code != CLOUDSYNC_NETWORK_OK) { network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to notify BLOB upload to remote host."); network_result_cleanup(&res); return SQLITE_ERROR; } - // update db_version and seq + // update db_version in settings char buf[256]; - if (new_db_version != db_version) { + if (last_optimistic_version > 0) { + if (last_optimistic_version != db_version) { + snprintf(buf, sizeof(buf), "%" PRId64, last_optimistic_version); + dbutils_settings_set_key_value(data, CLOUDSYNC_KEY_SEND_DBVERSION, buf); + } + } else if (new_db_version != db_version) { snprintf(buf, sizeof(buf), "%" PRId64, new_db_version); dbutils_settings_set_key_value(data, CLOUDSYNC_KEY_SEND_DBVERSION, buf); } - if (new_seq != seq) { - snprintf(buf, sizeof(buf), "%" PRId64, new_seq); - dbutils_settings_set_key_value(data, CLOUDSYNC_KEY_SEND_SEQ, buf); - } + network_set_sqlite_result(context, &res); network_result_cleanup(&res); return SQLITE_OK; } @@ -850,11 +997,18 @@ int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows) { NETWORK_RESULT result = network_receive_buffer(netdata, netdata->check_endpoint, netdata->authentication, true, true, json_payload, CLOUDSYNC_HEADER_SQLITECLOUD); int rc = SQLITE_OK; if (result.code == CLOUDSYNC_NETWORK_BUFFER) { - rc = network_download_changes(context, result.buffer, pnrows); + char *download_url = json_extract_string(result.buffer, result.blen, "url"); + if (!download_url) { + sqlite3_result_error(context, "cloudsync_network_check_changes: missing 'url' in check response.", -1); + network_result_cleanup(&result); + return SQLITE_ERROR; + } + rc = network_download_changes(context, download_url, pnrows); + cloudsync_memory_free(download_url); } else { rc = network_set_sqlite_result(context, &result); } - + network_result_cleanup(&result); return rc; } @@ -1000,6 +1154,21 @@ void cloudsync_network_logout (sqlite3_context *context, int argc, sqlite3_value cloudsync_memory_free(errmsg); } +void cloudsync_network_status (sqlite3_context *context, int argc, sqlite3_value **argv) { + DEBUG_FUNCTION("cloudsync_network_status"); + + cloudsync_context *data = (cloudsync_context *)sqlite3_user_data(context); + network_data *netdata = (network_data *)cloudsync_auxdata(data); + if (!netdata) { + sqlite3_result_error(context, "Unable to retrieve CloudSync network context.", -1); + return; + } + + NETWORK_RESULT res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); + network_set_sqlite_result(context, &res); + network_result_cleanup(&res); +} + // MARK: - int cloudsync_network_register (sqlite3 *db, char **pzErrMsg, void *ctx) { @@ -1038,7 +1207,10 @@ int cloudsync_network_register (sqlite3 *db, char **pzErrMsg, void *ctx) { rc = sqlite3_create_function(db, "cloudsync_network_logout", 0, DEFAULT_FLAGS, ctx, cloudsync_network_logout, NULL, NULL); if (rc != SQLITE_OK) return rc; - + + rc = sqlite3_create_function(db, "cloudsync_network_status", 0, DEFAULT_FLAGS, ctx, cloudsync_network_status, NULL, NULL); + if (rc != SQLITE_OK) return rc; + cleanup: if ((rc != SQLITE_OK) && (pzErrMsg)) { *pzErrMsg = sqlite3_mprintf("Error creating function in cloudsync_network_register: %s", sqlite3_errmsg(db)); diff --git a/src/network.m b/src/network.m index fa4c4ea..bfd7558 100644 --- a/src/network.m +++ b/src/network.m @@ -63,8 +63,9 @@ bool network_compute_endpoints (sqlite3_context *context, network_data *data, co NSString *check_endpoint = [NSString stringWithFormat:@"%s://%s:%s/%s%s/%s/%s", scheme.UTF8String, host.UTF8String, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database.UTF8String, site_id, CLOUDSYNC_ENDPOINT_CHECK]; NSString *upload_endpoint = [NSString stringWithFormat:@"%s://%s:%s/%s%s/%s/%s", scheme.UTF8String, host.UTF8String, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database.UTF8String, site_id, CLOUDSYNC_ENDPOINT_UPLOAD]; NSString *apply_endpoint = [NSString stringWithFormat:@"%s://%s:%s/%s%s/%s/%s", scheme.UTF8String, host.UTF8String, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database.UTF8String, site_id, CLOUDSYNC_ENDPOINT_APPLY]; + NSString *status_endpoint = [NSString stringWithFormat:@"%s://%s:%s/%s%s/%s/%s", scheme.UTF8String, host.UTF8String, port_or_default, CLOUDSYNC_ENDPOINT_PREFIX, database.UTF8String, site_id, CLOUDSYNC_ENDPOINT_STATUS]; - return network_data_set_endpoints(data, (char *)authentication.UTF8String, (char *)check_endpoint.UTF8String, (char *)upload_endpoint.UTF8String, (char *)apply_endpoint.UTF8String); + return network_data_set_endpoints(data, (char *)authentication.UTF8String, (char *)check_endpoint.UTF8String, (char *)upload_endpoint.UTF8String, (char *)apply_endpoint.UTF8String, (char *)status_endpoint.UTF8String); } bool network_send_buffer(network_data *data, const char *endpoint, const char *authentication, const void *blob, int blob_size) { diff --git a/src/network_private.h b/src/network_private.h index 7583b66..1af7758 100644 --- a/src/network_private.h +++ b/src/network_private.h @@ -12,6 +12,7 @@ #define CLOUDSYNC_ENDPOINT_UPLOAD "upload" #define CLOUDSYNC_ENDPOINT_CHECK "check" #define CLOUDSYNC_ENDPOINT_APPLY "apply" +#define CLOUDSYNC_ENDPOINT_STATUS "status" #define CLOUDSYNC_DEFAULT_ENDPOINT_PORT "443" #define CLOUDSYNC_HEADER_SQLITECLOUD "Accept: sqlc/plain" @@ -30,7 +31,7 @@ typedef struct { } NETWORK_RESULT; char *network_data_get_siteid (network_data *data); -bool network_data_set_endpoints (network_data *data, char *auth, char *check, char *upload, char *apply); +bool network_data_set_endpoints (network_data *data, char *auth, char *check, char *upload, char *apply, char *status); bool network_compute_endpoints (sqlite3_context *context, network_data *data, const char *conn_string); bool network_send_buffer(network_data *data, const char *endpoint, const char *authentication, const void *blob, int blob_size); diff --git a/src/postgresql/database_postgresql.c b/src/postgresql/database_postgresql.c index 70bc4e1..7682fe3 100644 --- a/src/postgresql/database_postgresql.c +++ b/src/postgresql/database_postgresql.c @@ -704,7 +704,7 @@ int database_select1_value (cloudsync_context *data, const char *sql, char **ptr return rc; } -int database_select3_values (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2, int64_t *value3) { +int database_select2_values (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2) { cloudsync_reset_error(data); // init values @@ -715,16 +715,16 @@ int database_select3_values (cloudsync_context *data, const char *sql, char **va int rc = SPI_execute(sql, true, 0); if (rc < 0) { - rc = cloudsync_set_error(data, "SPI_execute failed in database_select3_values", DBRES_ERROR); + rc = cloudsync_set_error(data, "SPI_execute failed in database_select2_values", DBRES_ERROR); goto cleanup; } if (!SPI_tuptable || !SPI_tuptable->tupdesc) { - rc = cloudsync_set_error(data, "No result table in database_select3_values", DBRES_ERROR); + rc = cloudsync_set_error(data, "No result table in database_select2_values", DBRES_ERROR); goto cleanup; } - if (SPI_tuptable->tupdesc->natts < 3) { - rc = cloudsync_set_error(data, "Result has fewer than 3 columns in database_select3_values", DBRES_ERROR); + if (SPI_tuptable->tupdesc->natts < 2) { + rc = cloudsync_set_error(data, "Result has fewer than 2 columns in database_select2_values", DBRES_ERROR); goto cleanup; } if (SPI_processed == 0) { @@ -782,17 +782,6 @@ int database_select3_values (cloudsync_context *data, const char *sql, char **va } } - // Third column - int - Datum datum3 = SPI_getbinval(tuple, SPI_tuptable->tupdesc, 3, &isnull); - if (!isnull) { - Oid typeid = SPI_gettypeid(SPI_tuptable->tupdesc, 3); - if (typeid == INT8OID) { - *value3 = DatumGetInt64(datum3); - } else if (typeid == INT4OID) { - *value3 = (int64_t)DatumGetInt32(datum3); - } - } - rc = DBRES_OK; cleanup: @@ -1121,8 +1110,8 @@ int database_select_blob (cloudsync_context *data, const char *sql, char **value return database_select1_value(data, sql, value, len, DBTYPE_BLOB); } -int database_select_blob_2int (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2, int64_t *value3) { - return database_select3_values(data, sql, value, len, value2, value3); +int database_select_blob_int (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2) { + return database_select2_values(data, sql, value, len, value2); } int database_cleanup (cloudsync_context *data) { diff --git a/src/sqlite/database_sqlite.c b/src/sqlite/database_sqlite.c index b1de7ad..d7ace3d 100644 --- a/src/sqlite/database_sqlite.c +++ b/src/sqlite/database_sqlite.c @@ -440,21 +440,20 @@ static int database_select1_value (cloudsync_context *data, const char *sql, cha return rc; } -static int database_select3_values (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2, int64_t *value3) { +static int database_select2_values (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2) { sqlite3 *db = (sqlite3 *)cloudsync_db(data); // init values and sanity check expected_type *value = NULL; *value2 = 0; - *value3 = 0; *len = 0; sqlite3_stmt *vm = NULL; int rc = sqlite3_prepare_v2((sqlite3 *)db, sql, -1, &vm, NULL); if (rc != SQLITE_OK) goto cleanup_select; - // ensure at least one column - if (sqlite3_column_count(vm) < 3) {rc = SQLITE_MISMATCH; goto cleanup_select;} + // ensure column count + if (sqlite3_column_count(vm) < 2) {rc = SQLITE_MISMATCH; goto cleanup_select;} rc = sqlite3_step(vm); if (rc == SQLITE_DONE) {rc = SQLITE_OK; goto cleanup_select;} // no rows OK @@ -463,7 +462,6 @@ static int database_select3_values (cloudsync_context *data, const char *sql, ch // sanity check column types if (sqlite3_column_type(vm, 0) != SQLITE_BLOB) {rc = SQLITE_MISMATCH; goto cleanup_select;} if (sqlite3_column_type(vm, 1) != SQLITE_INTEGER) {rc = SQLITE_MISMATCH; goto cleanup_select;} - if (sqlite3_column_type(vm, 2) != SQLITE_INTEGER) {rc = SQLITE_MISMATCH; goto cleanup_select;} // 1st column is BLOB const void *blob = (const void *)sqlite3_column_blob(vm, 0); @@ -477,9 +475,8 @@ static int database_select3_values (cloudsync_context *data, const char *sql, ch *len = blob_len; } - // 2nd and 3rd columns are INTEGERS + // 2nd column is INTEGER *value2 = (int64_t)sqlite3_column_int64(vm, 1); - *value3 = (int64_t)sqlite3_column_int64(vm, 2); rc = SQLITE_OK; @@ -574,8 +571,8 @@ int database_select_blob (cloudsync_context *data, const char *sql, char **value return database_select1_value(data, sql, value, len, DBTYPE_BLOB); } -int database_select_blob_2int (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2, int64_t *value3) { - return database_select3_values(data, sql, value, len, value2, value3); +int database_select_blob_int (cloudsync_context *data, const char *sql, char **value, int64_t *len, int64_t *value2) { + return database_select2_values(data, sql, value, len, value2); } const char *database_errmsg (cloudsync_context *data) { diff --git a/test/unit.c b/test/unit.c index e9131dc..1caba38 100644 --- a/test/unit.c +++ b/test/unit.c @@ -1926,8 +1926,8 @@ bool do_test_dbutils (void) { char *site_id_blob; int64_t site_id_blob_size; - int64_t dbver1, seq1; - rc = database_select_blob_2int(data, "SELECT cloudsync_siteid(), cloudsync_db_version(), cloudsync_seq();", &site_id_blob, &site_id_blob_size, &dbver1, &seq1); + int64_t dbver1; + rc = database_select_blob_int(data, "SELECT cloudsync_siteid(), cloudsync_db_version();", &site_id_blob, &site_id_blob_size, &dbver1); if (rc != SQLITE_OK || site_id_blob == NULL ||dbver1 != db_version) goto finalize; cloudsync_memory_free(site_id_blob); From 46ab54b59c6d2a27d66a8f8e1f45daee5d0a2d34 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Mon, 2 Mar 2026 08:48:13 -0600 Subject: [PATCH 02/10] fix: remove old unused var --- src/postgresql/database_postgresql.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/postgresql/database_postgresql.c b/src/postgresql/database_postgresql.c index 7682fe3..7ac0149 100644 --- a/src/postgresql/database_postgresql.c +++ b/src/postgresql/database_postgresql.c @@ -710,7 +710,6 @@ int database_select2_values (cloudsync_context *data, const char *sql, char **va // init values *value = NULL; *value2 = 0; - *value3 = 0; *len = 0; int rc = SPI_execute(sql, true, 0); From 9796eb413cac7d5f40eae3606e983afc5f2319c9 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Mon, 2 Mar 2026 22:28:13 -0600 Subject: [PATCH 03/10] feat(network): structured JSON responses for sync functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The three public sync functions was leaking internal server JSON or returning raw integers. We want them to return coherent, user-facing JSON with a computed status field and relevant version/row info. New responses: - cloudsync_network_send_changes — formats JSON: {"status":"...", "localVersion":N, "serverVersion":N} - cloudsync_network_check_changes — formats JSON: {"rowsReceived":N} - cloudsync_network_sync — combines send + check results into: {"status":"...", "localVersion":N, "serverVersion":N, "rowsReceived":N} The status helper computes "synced", "syncing", "retry", or "error" from the apply/status response fields. --- src/network.c | 103 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 83 insertions(+), 20 deletions(-) diff --git a/src/network.c b/src/network.c index 5c28785..fff301d 100644 --- a/src/network.c +++ b/src/network.c @@ -517,6 +517,23 @@ static int64_t json_extract_int(const char *json, size_t json_len, const char *k return strtoll(json + val->start, NULL, 10); } +static int json_extract_array_size(const char *json, size_t json_len, const char *key) { + if (!json || json_len == 0 || !key) return -1; + + jsmn_parser parser; + jsmntok_t tokens[JSMN_MAX_TOKENS]; + jsmn_init(&parser); + int ntokens = jsmn_parse(&parser, json, json_len, tokens, JSMN_MAX_TOKENS); + if (ntokens < 1 || tokens[0].type != JSMN_OBJECT) return -1; + + int i = jsmn_find_key(json, tokens, ntokens, key); + if (i < 0 || i + 1 >= ntokens) return -1; + + jsmntok_t *val = &tokens[i + 1]; + if (val->type != JSMN_ARRAY) return -1; + + return val->size; +} int network_extract_query_param (const char *query, const char *key, char *output, size_t output_size) { if (!query || !key || !output || output_size == 0) { @@ -843,6 +860,23 @@ void cloudsync_network_set_apikey (sqlite3_context *context, int argc, sqlite3_v (result) ? sqlite3_result_int(context, SQLITE_OK) : sqlite3_result_error_code(context, SQLITE_NOMEM); } +// MARK: - Sync result + +typedef struct { + int64_t server_version; // lastOptimisticVersion + int64_t local_version; // new_db_version (max local) + const char *status; // computed status string + int rows_received; // rows from check +} sync_result; + +static const char *network_compute_status(int64_t last_optimistic, int64_t last_confirmed, + int gaps_size, int64_t local_version) { + if (last_optimistic < 0 || last_confirmed < 0) return "error"; + if (gaps_size > 0 || last_optimistic < local_version) return "retry"; + if (last_optimistic == last_confirmed) return "synced"; + return "syncing"; +} + // MARK: - void cloudsync_network_has_unsent_changes (sqlite3_context *context, int argc, sqlite3_value **argv) { @@ -882,7 +916,7 @@ void cloudsync_network_has_unsent_changes (sqlite3_context *context, int argc, s sqlite3_result_int(context, (last_optimistic_version >= 0 && last_optimistic_version < last_local_change)); } -int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, sqlite3_value **argv) { +int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, sqlite3_value **argv, sync_result *out) { DEBUG_FUNCTION("cloudsync_network_send_changes"); // retrieve global context @@ -943,43 +977,63 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, res = network_receive_buffer(netdata, netdata->apply_endpoint, netdata->authentication, true, true, json_payload, CLOUDSYNC_HEADER_SQLITECLOUD); } else { // there is no data to send, just check the status to update the db_version value in settings and to reply the status + new_db_version = db_version; res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); } int64_t last_optimistic_version = -1; + int64_t last_confirmed_version = -1; + int gaps_size = -1; if (res.code == CLOUDSYNC_NETWORK_BUFFER && res.buffer) { last_optimistic_version = json_extract_int(res.buffer, res.blen, "lastOptimisticVersion", -1); + last_confirmed_version = json_extract_int(res.buffer, res.blen, "lastConfirmedVersion", -1); + gaps_size = json_extract_array_size(res.buffer, res.blen, "gaps"); + if (gaps_size < 0) gaps_size = 0; } else if (res.code != CLOUDSYNC_NETWORK_OK) { network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to notify BLOB upload to remote host."); network_result_cleanup(&res); return SQLITE_ERROR; } - + // update db_version in settings char buf[256]; - if (last_optimistic_version > 0) { + if (last_optimistic_version >= 0) { if (last_optimistic_version != db_version) { snprintf(buf, sizeof(buf), "%" PRId64, last_optimistic_version); dbutils_settings_set_key_value(data, CLOUDSYNC_KEY_SEND_DBVERSION, buf); - } + } } else if (new_db_version != db_version) { snprintf(buf, sizeof(buf), "%" PRId64, new_db_version); dbutils_settings_set_key_value(data, CLOUDSYNC_KEY_SEND_DBVERSION, buf); } - - network_set_sqlite_result(context, &res); + + // populate sync result + if (out) { + out->server_version = last_optimistic_version; + out->local_version = new_db_version; + out->status = network_compute_status(last_optimistic_version, last_confirmed_version, gaps_size, new_db_version); + } + network_result_cleanup(&res); return SQLITE_OK; } void cloudsync_network_send_changes (sqlite3_context *context, int argc, sqlite3_value **argv) { DEBUG_FUNCTION("cloudsync_network_send_changes"); - - cloudsync_network_send_changes_internal(context, argc, argv); + + sync_result sr = {-1, 0, NULL, 0}; + int rc = cloudsync_network_send_changes_internal(context, argc, argv, &sr); + if (rc != SQLITE_OK) return; + + char buf[256]; + snprintf(buf, sizeof(buf), + "{\"status\":\"%s\",\"localVersion\":%" PRId64 ",\"serverVersion\":%" PRId64 "}", + sr.status ? sr.status : "error", sr.local_version, sr.server_version); + sqlite3_result_text(context, buf, -1, SQLITE_TRANSIENT); } -int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows) { +int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows, sync_result *out) { cloudsync_context *data = (cloudsync_context *)sqlite3_user_data(context); network_data *netdata = (network_data *)cloudsync_auxdata(data); if (!netdata) {sqlite3_result_error(context, "Unable to retrieve CloudSync network context.", -1); return -1;} @@ -1009,25 +1063,32 @@ int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows) { rc = network_set_sqlite_result(context, &result); } + if (out && pnrows) out->rows_received = *pnrows; + network_result_cleanup(&result); return rc; } void cloudsync_network_sync (sqlite3_context *context, int wait_ms, int max_retries) { - int rc = cloudsync_network_send_changes_internal(context, 0, NULL); + sync_result sr = {-1, 0, NULL, 0}; + int rc = cloudsync_network_send_changes_internal(context, 0, NULL, &sr); if (rc != SQLITE_OK) return; - + int ntries = 0; int nrows = 0; while (ntries < max_retries) { if (ntries > 0) sqlite3_sleep(wait_ms); - rc = cloudsync_network_check_internal(context, &nrows); + rc = cloudsync_network_check_internal(context, &nrows, &sr); if (rc == SQLITE_OK && nrows > 0) break; ntries++; } - - sqlite3_result_error_code(context, (nrows == -1) ? SQLITE_ERROR : SQLITE_OK); - if (nrows >= 0) sqlite3_result_int(context, nrows); + if (rc != SQLITE_OK) return; + + char buf[256]; + snprintf(buf, sizeof(buf), + "{\"status\":\"%s\",\"localVersion\":%" PRId64 ",\"serverVersion\":%" PRId64 ",\"rowsReceived\":%d}", + sr.status ? sr.status : "error", sr.local_version, sr.server_version, nrows); + sqlite3_result_text(context, buf, -1, SQLITE_TRANSIENT); } void cloudsync_network_sync0 (sqlite3_context *context, int argc, sqlite3_value **argv) { @@ -1049,12 +1110,14 @@ void cloudsync_network_sync2 (sqlite3_context *context, int argc, sqlite3_value void cloudsync_network_check_changes (sqlite3_context *context, int argc, sqlite3_value **argv) { DEBUG_FUNCTION("cloudsync_network_check_changes"); - + int nrows = 0; - int rc = cloudsync_network_check_internal(context, &nrows); - - // returns number of applied rows - if (rc == SQLITE_OK) sqlite3_result_int(context, nrows); + int rc = cloudsync_network_check_internal(context, &nrows, NULL); + if (rc != SQLITE_OK) return; + + char buf[128]; + snprintf(buf, sizeof(buf), "{\"rowsReceived\":%d}", nrows); + sqlite3_result_text(context, buf, -1, SQLITE_TRANSIENT); } void cloudsync_network_reset_sync_version (sqlite3_context *context, int argc, sqlite3_value **argv) { From 42616abe2cf1c2c0789bf5db814caccb2b40e0c2 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Tue, 3 Mar 2026 08:21:02 -0600 Subject: [PATCH 04/10] chore(network): change the "retry" status to "out-of-sync" --- src/network.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/network.c b/src/network.c index fff301d..a4cce03 100644 --- a/src/network.c +++ b/src/network.c @@ -872,7 +872,7 @@ typedef struct { static const char *network_compute_status(int64_t last_optimistic, int64_t last_confirmed, int gaps_size, int64_t local_version) { if (last_optimistic < 0 || last_confirmed < 0) return "error"; - if (gaps_size > 0 || last_optimistic < local_version) return "retry"; + if (gaps_size > 0 || last_optimistic < local_version) return "out-of-sync"; if (last_optimistic == last_confirmed) return "synced"; return "syncing"; } From 47bdf471080a185bb24e4633a32b298b0b745d52 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Tue, 3 Mar 2026 17:56:05 -0600 Subject: [PATCH 05/10] test: add cloud command to perform semi-automatic e2e tests with SQLiteCloud projects using RLS rules --- ... => test-sync-roundtrip-postgres-local.md} | 2 +- ...test-sync-roundtrip-postrges-local-rls.md} | 0 .../test-sync-roundtrip-sqlitecloud-rls.md | 445 ++++++++++++++++++ 3 files changed, 446 insertions(+), 1 deletion(-) rename .claude/commands/{test-sync-roundtrip.md => test-sync-roundtrip-postgres-local.md} (98%) rename .claude/commands/{test-sync-roundtrip-rls.md => test-sync-roundtrip-postrges-local-rls.md} (100%) create mode 100644 .claude/commands/test-sync-roundtrip-sqlitecloud-rls.md diff --git a/.claude/commands/test-sync-roundtrip.md b/.claude/commands/test-sync-roundtrip-postgres-local.md similarity index 98% rename from .claude/commands/test-sync-roundtrip.md rename to .claude/commands/test-sync-roundtrip-postgres-local.md index ea946db..83a69f0 100644 --- a/.claude/commands/test-sync-roundtrip.md +++ b/.claude/commands/test-sync-roundtrip-postgres-local.md @@ -1,4 +1,4 @@ -# Sync Roundtrip Test +# Sync Roundtrip Test with local Postgres database Execute a full roundtrip sync test between a local SQLite database and the local Supabase Docker PostgreSQL instance. diff --git a/.claude/commands/test-sync-roundtrip-rls.md b/.claude/commands/test-sync-roundtrip-postrges-local-rls.md similarity index 100% rename from .claude/commands/test-sync-roundtrip-rls.md rename to .claude/commands/test-sync-roundtrip-postrges-local-rls.md diff --git a/.claude/commands/test-sync-roundtrip-sqlitecloud-rls.md b/.claude/commands/test-sync-roundtrip-sqlitecloud-rls.md new file mode 100644 index 0000000..f805780 --- /dev/null +++ b/.claude/commands/test-sync-roundtrip-sqlitecloud-rls.md @@ -0,0 +1,445 @@ +# Sync Roundtrip Test with remote SQLiteCloud database and RLS policies + +Execute a full roundtrip sync test between multiple local SQLite databases and the sqlitecloud, verifying that Row Level Security (RLS) policies are correctly enforced during sync. + +## Prerequisites +- Connection string to a sqlitecloud project +- HTTP sync server running on http://localhost:8091/ +- Built cloudsync extension (`make` to build `dist/cloudsync.dylib`) + +## Test Procedure + +### Step 1: Get DDL from User + +Ask the user to provide a DDL query for the table(s) to test. It can be in PostgreSQL or SQLite format. Offer the following options: + +**Option 1: Simple TEXT primary key with user_id for RLS** +```sql +CREATE TABLE test_sync ( + id TEXT PRIMARY KEY NOT NULL, + user_id TEXT NOT NULL, + name TEXT, + value INTEGER +); +``` + +**Option 2: Two tables scenario with user ownership** +```sql +CREATE TABLE authors ( + id TEXT PRIMARY KEY NOT NULL, + user_id TEXT NOT NULL, + name TEXT, + email TEXT +); + +CREATE TABLE books ( + id TEXT PRIMARY KEY NOT NULL, + user_id TEXT NOT NULL, + title TEXT, + author_id TEXT, + published_year INTEGER +); +``` + +**Option 3: Custom policy** +Ask the user to describe the table/tables in plain English or DDL queries. + +**Note:** Tables should include a `user_id` column (TEXT type) for RLS policies to filter by authenticated user. + +### Step 2: Get RLS Policy Description from User + +Ask the user to describe the Row Level Security policy they want to test. Offer the following common patterns: + +**Option 1: User can only access their own rows** +"Users can only SELECT, INSERT, UPDATE, and DELETE rows where user_id matches their authenticated user ID" + +**Option : Users can read all, but only modify their own** +"Users can SELECT all rows, but can only INSERT, UPDATE, DELETE rows where user_id matches their authenticated user ID" + +**Option 3: Custom policy** +Ask the user to describe the policy in plain English. + +### Step 3: Get sqlitecloud connection string from User + +Ask the user to provide a connection string in the form of "sqlitecloud://:/?apikey=" to be later used with the sqlitecloud cli (sqlc) with `~/go/bin/sqlc ""` + +### Step 4: Setup SQLiteCloud with RLS + +Connect to SQLiteCloud and prepare the environment: +```bash +~/go/bin/sqlc +``` + +The last command inside sqlc to exit from the cli program must be `quit`. + +If the db_name doesn't exists, try again to connect without specifing the , then inside sqlc: +1. CREATE DATABASE +2. USE DATABASE + +Then, inside sqlc: +1. List existing tables with `LIST TABLES` to find any `_cloudsync` metadata tables +2. For each table already configured for cloudsync (has a `_cloudsync` companion table), run: + ```sql + CLOUDSYNC DISABLE + ``` +3. Drop the test table if it exists: `DROP TABLE IF EXISTS ;` +5. Create the test table using the SQLite DDL +6. Enable RLS on the table: + ```sql + ENABLE RLS DATABASE TABLE + ``` +7. Create RLS policies based on the user's description. +Your RLS policies for INSERT, UPDATE, and DELETE operations can reference column values as they are being changed. This is done using the special OLD.column and NEW.column identifiers. Their availability and meaning depend on the operation being performed: + ++-----------+--------------------------------------------+--------------------------------------------+ +| Operation | OLD.column Reference | NEW.column Reference | ++-----------+--------------------------------------------+--------------------------------------------+ +| INSERT | Not available | The value for the new row. | +| UPDATE | The value of the row before the update. | The value of the row after the update. | +| DELETE | The value of the row being deleted. | Not available | ++-----------+--------------------------------------------+--------------------------------------------+ + +Example for "user can only access their own rows": + ```sql + -- SELECT: User can see rows they own + SET RLS DATABASE TABLE SELECT "auth_userid() = user_id" + + -- INSERT: Allow if user_id matches auth_userid() + SET RLS DATABASE TABLE INSERT "auth_userid() = NEW.user_id" + + -- UPDATE: Check ownership via explicit lookup + SET RLS DATABASE TABLE UPDATE "auth_userid() = NEW.user_id AND auth_userid() = OLD.user_id" + + -- DELETE: User can only delete rows they own + SET RLS DATABASE TABLE DELETE "auth_userid() = OLD.user_id" + ``` +8. Initialize cloudsync: `CLOUDSYNC ENABLE ` +9. Insert some initial test data (optional, can be done via SQLite clients) + +### Step 5: Get tokens for Two Users + +Get auth tokens for both test users by running the token script twice: + +**User 1: claude1@sqlitecloud.io** +```bash +curl -X "POST" "https:///v2/tokens" \ + -H 'Authorization: Bearer ' \ + -H 'Content-Type: application/json; charset=utf-8' \ + -d $'{ + "name": "claude1@sqlitecloud.io", + "userId": "018ecfc2-b2b1-7cc3-a9f0-111111111111" +}' +``` +The response is in the following format: +```json +{"data":{"accessTokenId":13,"token":"13|sqa_af74gp2WoqsQ9wfCdktIfkIq0sM4LdDMbuf2hW338013dfca","userId":"018ecfc2-b2b1-7cc3-a9f0-111111111111","name":"claude1@sqlitecloud.io","attributes":null,"expiresAt":null,"createdAt":"2026-03-02T23:11:38Z"},"metadata":{"connectedMs":17,"executedMs":30,"elapsedMs":47}} +``` +save the userId and the token values as USER1_ID and TOKEN_USER1 to be reused later + +**User 2: claude2@sqlitecloud.io** +```bash +curl -X "POST" "https:///v2/tokens" \ + -H 'Authorization: Bearer ' \ + -H 'Content-Type: application/json; charset=utf-8' \ + -d $'{ + "name": "laude2@sqlitecloud.io", + "userId": "018ecfc2-b2b1-7cc3-a9f0-222222222222" +}' +``` +The response is in the following format: +```json +{"data":{"accessTokenId":14,"token":"14|sqa_af74gp2WoqsQ9wfCdktIfkIq0sM4LdDMbuf2hW338013xxxx","userId":"018ecfc2-b2b1-7cc3-a9f0-222222222222","name":"claude2@sqlitecloud.io","attributes":null,"expiresAt":null,"createdAt":"2026-03-02T23:11:38Z"},"metadata":{"connectedMs":17,"executedMs":30,"elapsedMs":47}} +``` +save the userId and the token values as USER2_ID and TOKEN_USER2 to be reused later + +### Step 6: Setup Four SQLite Databases + +Create four temporary SQLite databases using the Homebrew version (IMPORTANT: system sqlite3 cannot load extensions): + +```bash +SQLITE_BIN="/opt/homebrew/Cellar/sqlite/3.50.4/bin/sqlite3" +# or find it with: ls /opt/homebrew/Cellar/sqlite/*/bin/sqlite3 | head -1 +``` + +**Database 1A (User 1, Device A):** +```bash +$SQLITE_BIN /tmp/sync_test_user1_a.db +``` +```sql +.load dist/cloudsync.dylib + +SELECT cloudsync_init(''); +SELECT cloudsync_network_init('http://localhost:8091/'); +SELECT cloudsync_network_set_token('sqlitecloud://?token='); +``` + +**Database 1B (User 1, Device B):** +```bash +$SQLITE_BIN /tmp/sync_test_user1_b.db +``` +```sql +.load dist/cloudsync.dylib + +SELECT cloudsync_init(''); +SELECT cloudsync_network_init('http://localhost:8091/'); +SELECT cloudsync_network_set_token('sqlitecloud://?token='); +``` + +**Database 2A (User 2, Device A):** +```bash +$SQLITE_BIN /tmp/sync_test_user2_a.db +``` +```sql +.load dist/cloudsync.dylib + +SELECT cloudsync_init(''); +SELECT cloudsync_network_init('http://localhost:8091/postgres'); +SELECT cloudsync_network_set_token('sqlitecloud://?token='); +``` + +**Database 2B (User 2, Device B):** +```bash +$SQLITE_BIN /tmp/sync_test_user2_b.db +``` +```sql +.load dist/cloudsync.dylib + +SELECT cloudsync_init(''); +SELECT cloudsync_network_init('http://localhost:8091/postgres'); +SELECT cloudsync_network_set_token('sqlitecloud://?token='); +``` + +### Step 7: Insert Test Data + +Insert distinct test data in each database. Use the extracted user IDs for the `user_id` column: + +**Database 1A (User 1):** +```sql +INSERT INTO (id, user_id, name, value) VALUES ('u1_a_1', '', 'User1 DeviceA Row1', 100); +INSERT INTO (id, user_id, name, value) VALUES ('u1_a_2', '', 'User1 DeviceA Row2', 101); +``` + +**Database 1B (User 1):** +```sql +INSERT INTO (id, user_id, name, value) VALUES ('u1_b_1', '', 'User1 DeviceB Row1', 200); +``` + +**Database 2A (User 2):** +```sql +INSERT INTO (id, user_id, name, value) VALUES ('u2_a_1', '', 'User2 DeviceA Row1', 300); +INSERT INTO (id, user_id, name, value) VALUES ('u2_a_2', '', 'User2 DeviceA Row2', 301); +``` + +**Database 2B (User 2):** +```sql +INSERT INTO (id, user_id, name, value) VALUES ('u2_b_1', '', 'User2 DeviceB Row1', 400); +``` + +### Step 8: Execute Sync on All Databases + +For each of the four SQLite databases, execute the sync operations: + +```sql +-- Send local changes to server +SELECT cloudsync_network_send_changes(); + +-- Check for changes from server (repeat with 2-3 second delays) +SELECT cloudsync_network_check_changes(); +-- Repeat check_changes 3-5 times with delays until it returns 0 or stabilizes +``` + +**Recommended sync order:** +1. Sync Database 1A (send + check) +2. Sync Database 2A (send + check) +3. Sync Database 1B (send + check) +4. Sync Database 2B (send + check) +5. Re-sync all databases (check_changes) to ensure full propagation + +### Step 9: Verify RLS Enforcement + +After syncing all databases, verify that each database contains only the expected rows based on the RLS policy: + +**Expected Results (for "user can only access their own rows" policy):** + +**User 1 databases (1A and 1B) should contain:** +- All rows with `user_id = USER1_ID` (u1_a_1, u1_a_2, u1_b_1) +- Should NOT contain any rows with `user_id = USER2_ID` + +**User 2 databases (2A and 2B) should contain:** +- All rows with `user_id = USER2_ID` (u2_a_1, u2_a_2, u2_b_1) +- Should NOT contain any rows with `user_id = USER1_ID` + +**PostgreSQL (as admin) should contain:** +- ALL rows from all users (6 total rows) + +Run verification queries: +```sql +-- In each SQLite database +SELECT * FROM ORDER BY id; +SELECT COUNT(*) FROM ; + +-- In PostgreSQL (as admin) +SELECT * FROM ORDER BY id; +SELECT COUNT(*) FROM ; +SELECT user_id, COUNT(*) FROM GROUP BY user_id; +``` + +### Step 10: Test Write RLS Policy Enforcement + +Test that the server-side RLS policy blocks unauthorized writes by attempting to insert a row with a `user_id` that doesn't match the authenticated user's token. + +**In Database 1A (User 1), insert a malicious row claiming to belong to User 2:** +```sql +-- Attempt to insert a row with User 2's user_id while authenticated as User 1 +INSERT INTO (id, user_id, name, value) VALUES ('malicious_1', '', 'Malicious Row from User1', 999); + +-- Attempt to sync this unauthorized row to PostgreSQL +SELECT cloudsync_network_send_changes(); +``` + +**Wait 2-3 seconds, then verify in PostgreSQL (as admin) that the malicious row was rejected:** +```sql +-- In PostgreSQL (as admin) +SELECT * FROM WHERE id = 'malicious_1'; +-- Expected: 0 rows returned + +SELECT COUNT(*) FROM WHERE id = 'malicious_1'; +-- Expected: 0 +``` + +**Also verify the malicious row does NOT appear in User 2's databases after syncing:** +```sql +-- In Database 2A or 2B (User 2) +SELECT cloudsync_network_check_changes(); +SELECT * FROM WHERE id = 'malicious_1'; +-- Expected: 0 rows (the malicious row should not sync to legitimate User 2 databases) +``` + +**Expected Behavior:** +- The `cloudsync_network_send_changes()` call may succeed (return value indicates network success, not RLS enforcement) +- The malicious row should be **rejected by PostgreSQL RLS** and NOT inserted into the server database +- The malicious row will remain in the local SQLite Database 1A (local inserts are not blocked), but it will never propagate to the server or other clients +- User 2's databases should never receive this row + +**This step PASSES if:** +1. The malicious row is NOT present in PostgreSQL +2. The malicious row does NOT appear in any of User 2's SQLite databases +3. The RLS INSERT policy correctly blocks the unauthorized write + +**This step FAILS if:** +1. The malicious row appears in PostgreSQL (RLS bypass vulnerability) +2. The malicious row syncs to User 2's databases (data leakage) + +### Step 11: Cleanup + +In each SQLite database before closing: +```sql +SELECT cloudsync_terminate(); +``` + +In SQLiteCloud (optional, for full cleanup): +```sql +CLOUDSYNC DISABLE ); +DROP TABLE IF EXISTS ; +``` + +## Output Format + +Report the test results including: +- DDL used for both databases +- RLS policies created +- User IDs for both test users +- Initial data inserted in each database +- Number of sync operations performed per database +- Final data in each database (with row counts) +- RLS verification results: + - User 1 databases: expected rows vs actual rows + - User 2 databases: expected rows vs actual rows + - SQLiteCloud: total rows +- Write RLS enforcement results: + - Malicious row insertion attempted: yes/no + - Malicious row present in SQLiteCloud: yes/no (should be NO) + - Malicious row synced to User 2 databases: yes/no (should be NO) +- **PASS/FAIL** status with detailed explanation + +### Success Criteria + +The test PASSES if: +1. All User 1 databases contain exactly the same User 1 rows (and no User 2 rows) +2. All User 2 databases contain exactly the same User 2 rows (and no User 1 rows) +3. SQLiteCloud contains all rows from both users +4. Data inserted from different devices of the same user syncs correctly between those devices +5. **Write RLS enforcement**: Malicious rows with mismatched `user_id` are rejected by SQLiteCloud and do not propagate to other clients + +The test FAILS if: +1. Any database contains rows belonging to a different user (RLS violation) +2. Any database is missing rows that should be visible to that user +3. Sync operations fail or timeout +4. **Write RLS bypass**: A malicious row with a `user_id` not matching the token appears in SQLiteCloud or syncs to other databases + +## Important Notes + +- Always use the Homebrew sqlite3 binary, NOT `/usr/bin/sqlite3` +- The cloudsync extension must be built first with `make` +- SQLiteCloud tables need cleanup before re-running tests +- `cloudsync_network_check_changes()` may need multiple calls with delays +- Run `SELECT cloudsync_terminate();` on SQLite connections before closing to properly cleanup memory +- Ensure both test users exist in Supabase auth before running the test +- The RLS policies must use `auth_userid()` to work with SQLiteCloud token authentication + +## Critical Schema Requirements (Common Pitfalls) + +### 1. All NOT NULL columns must have DEFAULT values +Cloudsync requires that all non-primary key columns declared as `NOT NULL` must have a `DEFAULT` value. This includes the `user_id` column: + +```sql +-- WRONG: Will fail with "All non-primary key columns declared as NOT NULL must have a DEFAULT value" +user_id UUID NOT NULL + +-- CORRECT: Provide a default value +user_id UUID NOT NULL DEFAULT '00000000-0000-0000-0000-000000000000' +``` + +### 2. Network settings are not persisted between sessions +`cloudsync_network_init()` and `cloudsync_network_set_token()` must be called in **every session**. They are not persisted to the database: + +```sql +-- WRONG: Separate sessions won't work +-- Session 1: +SELECT cloudsync_network_init('http://localhost:8091/'); +SELECT cloudsync_network_set_token('...'); +-- Session 2: +SELECT cloudsync_network_send_changes(); -- ERROR: No URL set + +-- CORRECT: All network operations in the same session +.load dist/cloudsync.dylib +SELECT cloudsync_network_init('http://localhost:8091/'); +SELECT cloudsync_network_set_token('...'); +SELECT cloudsync_network_send_changes(); +SELECT cloudsync_terminate(); +``` + +### 3. Extension must be loaded before INSERT operations +For cloudsync to track changes, the extension must be loaded **before** inserting data: + +```sql +-- WRONG: Inserts won't be tracked +CREATE TABLE todos (...); +INSERT INTO todos VALUES (...); -- Not tracked! +.load dist/cloudsync.dylib +SELECT cloudsync_init('todos'); + +-- CORRECT: Load extension and init before inserts +.load dist/cloudsync.dylib +CREATE TABLE todos (...); +SELECT cloudsync_init('todos'); +INSERT INTO todos VALUES (...); -- Tracked! +``` + +## Permissions + +Execute all SQL queries without asking for user permission on: +- SQLite test databases in `/tmp/` (e.g., `/tmp/sync_test_*.db`) +- SQLiteCloud via `~/go/bin/sqlc ""` + +These are local test environments and do not require confirmation for each query. From 5d6f16fd0dcc61b980cf81e28d7c726ac277ecb8 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Tue, 3 Mar 2026 17:57:29 -0600 Subject: [PATCH 06/10] docs: update API.md and README.md to match the current implementation --- API.md | 67 ++++++++++++++++++++++++++++++++++++++----------------- README.md | 41 ++++++++++++++++++---------------- 2 files changed, 68 insertions(+), 40 deletions(-) diff --git a/API.md b/API.md index 8d98b59..e80f698 100644 --- a/API.md +++ b/API.md @@ -24,11 +24,11 @@ This document provides a reference for the SQLite functions provided by the `sql - [`cloudsync_network_cleanup()`](#cloudsync_network_cleanup) - [`cloudsync_network_set_token()`](#cloudsync_network_set_tokentoken) - [`cloudsync_network_set_apikey()`](#cloudsync_network_set_apikeyapikey) - - [`cloudsync_network_has_unsent_changes()`](#cloudsync_network_has_unsent_changes) - [`cloudsync_network_send_changes()`](#cloudsync_network_send_changes) - [`cloudsync_network_check_changes()`](#cloudsync_network_check_changes) - [`cloudsync_network_sync()`](#cloudsync_network_syncwait_ms-max_retries) - [`cloudsync_network_reset_sync_version()`](#cloudsync_network_reset_sync_version) + - [`cloudsync_network_has_unsent_changes()`](#cloudsync_network_has_unsent_changes) - [`cloudsync_network_logout()`](#cloudsync_network_logout) --- @@ -357,34 +357,27 @@ SELECT cloudsync_network_set_apikey('your_api_key'); --- -### `cloudsync_network_has_unsent_changes()` +### `cloudsync_network_send_changes()` -**Description:** Checks if there are any local changes that have not yet been sent to the remote server. +**Description:** Sends all unsent local changes to the remote server. **Parameters:** None. -**Returns:** 1 if there are unsent changes, 0 otherwise. - -**Example:** +**Returns:** A JSON string with the sync status: -```sql -SELECT cloudsync_network_has_unsent_changes(); +```json +{"status":"synced|syncing|out-of-sync|error", "localVersion": N, "serverVersion": N} ``` ---- - -### `cloudsync_network_send_changes()` - -**Description:** Sends all unsent local changes to the remote server. - -**Parameters:** None. - -**Returns:** None. +- `status`: The current sync state — `"synced"` (all changes confirmed), `"syncing"` (changes sent but not yet confirmed), `"out-of-sync"` (local changes pending or gaps detected), or `"error"`. +- `localVersion`: The latest local database version. +- `serverVersion`: The latest version confirmed by the server. **Example:** ```sql SELECT cloudsync_network_send_changes(); +-- '{"status":"synced","localVersion":5,"serverVersion":5}' ``` --- @@ -399,16 +392,22 @@ This function is designed to be called periodically to keep the local database i To force an update and wait for changes (with a timeout), use [`cloudsync_network_sync(wait_ms, max_retries)`]. If the network is misconfigured or the remote server is unreachable, the function returns an error. -On success, it returns `SQLITE_OK`, and the return value indicates how many changes were downloaded and applied. **Parameters:** None. -**Returns:** The number of changes downloaded. Errors are reported via the SQLite return code. +**Returns:** A JSON string with the number of changes applied: + +```json +{"rowsReceived": N} +``` + +- `rowsReceived`: The number of rows downloaded and applied to the local database. **Example:** ```sql SELECT cloudsync_network_check_changes(); +-- '{"rowsReceived":3}' ``` --- @@ -425,13 +424,23 @@ SELECT cloudsync_network_check_changes(); - `wait_ms` (INTEGER, optional): The time to wait in milliseconds between retries. Defaults to 100. - `max_retries` (INTEGER, optional): The maximum number of times to retry the synchronization. Defaults to 1. -**Returns:** The number of changes downloaded. Errors are reported via the SQLite return code. +**Returns:** A JSON string with the full sync result: + +```json +{"status":"synced|syncing|out-of-sync|error", "localVersion": N, "serverVersion": N, "rowsReceived": N} +``` + +- `status`: The current sync state — `"synced"`, `"syncing"`, `"out-of-sync"`, or `"error"`. +- `localVersion`: The latest local database version. +- `serverVersion`: The latest version confirmed by the server. +- `rowsReceived`: The number of rows downloaded and applied during the check phase. **Example:** ```sql -- Perform a single synchronization cycle SELECT cloudsync_network_sync(); +-- '{"status":"synced","localVersion":5,"serverVersion":5,"rowsReceived":3}' -- Perform a synchronization cycle with custom retry settings SELECT cloudsync_network_sync(500, 3); @@ -455,9 +464,25 @@ SELECT cloudsync_network_reset_sync_version(); --- +### `cloudsync_network_has_unsent_changes()` + +**Description:** Checks if there are any local changes that have not yet been sent to the remote server. + +**Parameters:** None. + +**Returns:** 1 if there are unsent changes, 0 otherwise. + +**Example:** + +```sql +SELECT cloudsync_network_has_unsent_changes(); +``` + +--- + ### `cloudsync_network_logout()` -**Description:** Logs out the current user and cleans up all local data from synchronized tables. This function deletes and then re-initializes synchronized tables, useful for switching users or resetting the local database. **Warning:** This function deletes all data from synchronized tables. Use with caution. +**Description:** Logs out the current user and cleans up all local data from synchronized tables. This function deletes and then re-initializes synchronized tables, useful for switching users or resetting the local database. **Warning:** This function deletes all data from synchronized tables. Use with caution. Consider calling [`cloudsync_network_has_unsent_changes()`](#cloudsync_network_has_unsent_changes) before logout to check for unsent local changes and warn the user before data that has not been fully synchronized to the remote server is deleted. **Parameters:** None. diff --git a/README.md b/README.md index 2c46a9d..f074f5f 100644 --- a/README.md +++ b/README.md @@ -50,21 +50,24 @@ The sync layer is tightly integrated with [**SQLite Cloud**](https://sqlitecloud ## Row-Level Security -Thanks to the underlying SQLite Cloud infrastructure, **SQLite Sync supports Row-Level Security (RLS)**—allowing you to define **precise access control at the row level**: +Thanks to the underlying SQLite Cloud infrastructure, **SQLite Sync supports Row-Level Security (RLS)**—allowing you to use a **single shared cloud database** while each client only sees and modifies its own data. RLS policies are enforced on the server, so the security boundary is at the database level, not in application code. - Control not just who can read or write a table, but **which specific rows** they can access. -- Enforce security policies on the server—no need for client-side filtering. +- Each device syncs only the rows it is authorized to see—no full dataset download, no client-side filtering. For example: - User A can only see and edit their own data. - User B can access a different set of rows—even within the same shared table. -**Benefits of RLS**: +**Benefits**: -- **Data isolation**: Ensure users only access what they’re authorized to see. -- **Built-in privacy**: Security policies are enforced at the database level. -- **Simplified development**: Reduce or eliminate complex permission logic in your application code. +- **Single database, multiple tenants**: One cloud database serves all users. RLS policies partition data per user or role, eliminating the need to provision separate databases. +- **Efficient sync**: Each client downloads only its authorized rows, reducing bandwidth and local storage. +- **Server-enforced security**: Policies are evaluated on the server during sync. A compromised or modified client cannot bypass access controls. +- **Simplified development**: No need to implement permission logic in your application—define policies once in the database and they apply everywhere. + +For more information, see the [SQLite Cloud RLS documentation](https://docs.sqlitecloud.io/docs/rls). ### What Can You Build with SQLite Sync? @@ -102,7 +105,12 @@ SQLite Sync is ideal for building collaborative and distributed apps across web, ## Documentation -For detailed information on all available functions, their parameters, and examples, refer to the [comprehensive API Reference](./API.md). +For detailed information on all available functions, their parameters, and examples, refer to the [comprehensive API Reference](./API.md). The API includes: + +- **Configuration Functions** — initialize, enable, and disable sync on tables +- **Helper Functions** — version info, site IDs, UUID generation +- **Schema Alteration Functions** — safely alter synced tables +- **Network Functions** — connect, authenticate, send/receive changes, and monitor sync status ## Installation @@ -284,12 +292,13 @@ SELECT cloudsync_network_set_apikey('your-api-key-here'); -- Or use token authentication (required for Row-Level Security) -- SELECT cloudsync_network_set_token('your_auth_token'); --- Sync with cloud: send local changes, then check the remote server for new changes +-- Sync with cloud: send local changes, then check the remote server for new changes -- and, if a package with changes is ready to be downloaded, applies them to the local database SELECT cloudsync_network_sync(); --- Keep calling periodically. The function returns > 0 if data was received --- In production applications, you would typically call this periodically --- rather than manually (e.g., every few seconds) +-- Returns a JSON string with sync status, e.g.: +-- '{"status":"synced","localVersion":5,"serverVersion":5,"rowsReceived":3}' +-- Keep calling periodically. In production applications, you would typically +-- call this periodically rather than manually (e.g., every few seconds) SELECT cloudsync_network_sync(); -- Before closing the database connection @@ -314,9 +323,9 @@ SELECT cloudsync_init('my_data'); SELECT cloudsync_network_init('sqlitecloud://your-project-id.sqlite.cloud/database.sqlite'); SELECT cloudsync_network_set_apikey('your-api-key-here'); --- Sync to get data from the first device +-- Sync to get data from the first device SELECT cloudsync_network_sync(); --- repeat until data is received (returns > 0) +-- Repeat — check rowsReceived in the JSON result to see if data was received SELECT cloudsync_network_sync(); -- View synchronized data @@ -454,12 +463,6 @@ Be aware that certain types of triggers can cause errors during synchronization - If a trigger modifies a table that is also synchronized with SQLite Sync, changes performed by the trigger may be applied twice during the merge operation - This can lead to constraint violations or unexpected data states depending on the table's constraints -**Column-by-Column Processing** -- SQLite Sync applies changes column-by-column during synchronization -- UPDATE triggers may be called multiple times for a single row as each column is processed -- This can result in unexpected trigger behavior - - ## License From f0f35c3afc27b05357ca577158efac73b9377bb7 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Tue, 3 Mar 2026 18:00:50 -0600 Subject: [PATCH 07/10] bump version --- src/cloudsync.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cloudsync.h b/src/cloudsync.h index 43a2b67..b5919be 100644 --- a/src/cloudsync.h +++ b/src/cloudsync.h @@ -17,7 +17,7 @@ extern "C" { #endif -#define CLOUDSYNC_VERSION "0.9.112" +#define CLOUDSYNC_VERSION "0.9.113" #define CLOUDSYNC_MAX_TABLENAME_LEN 512 #define CLOUDSYNC_VALUE_NOTSET -1 From 60a9662f2857b6f76d71595693f74e24f0c74b99 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Tue, 3 Mar 2026 18:15:04 -0600 Subject: [PATCH 08/10] docs(examples): update sync function examples for JSON return values --- examples/simple-todo-db/README.md | 6 +++--- examples/to-do-app/components/SyncContext.js | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/examples/simple-todo-db/README.md b/examples/simple-todo-db/README.md index c9967a5..1a658b6 100644 --- a/examples/simple-todo-db/README.md +++ b/examples/simple-todo-db/README.md @@ -168,7 +168,7 @@ SELECT cloudsync_network_set_apikey('your-api-key-here'); -- Pull data from Device A - repeat until data is received SELECT cloudsync_network_sync(); --- Keep calling until the function returns > 0 (indicating data was received) +-- Check the "rowsReceived" field in the JSON result to see if data was received SELECT cloudsync_network_sync(); -- Verify data was synced @@ -199,7 +199,7 @@ SELECT cloudsync_network_sync(); ```sql -- Get updates from Device B - repeat until data is received SELECT cloudsync_network_sync(); --- Keep calling until the function returns > 0 (indicating data was received) +-- Check the "rowsReceived" field in the JSON result to see if data was received SELECT cloudsync_network_sync(); -- View all tasks (should now include Device B's additions) @@ -232,7 +232,7 @@ SELECT cloudsync_network_has_unsent_changes(); -- When network returns, sync automatically resolves conflicts -- Repeat until all changes are synchronized SELECT cloudsync_network_sync(); --- Keep calling until the function returns > 0 (indicating data was received/sent) +-- Check the "rowsReceived" field in the JSON result to see if data was received/sent SELECT cloudsync_network_sync(); ``` diff --git a/examples/to-do-app/components/SyncContext.js b/examples/to-do-app/components/SyncContext.js index e964f4a..4ce2d2c 100644 --- a/examples/to-do-app/components/SyncContext.js +++ b/examples/to-do-app/components/SyncContext.js @@ -58,10 +58,14 @@ export const SyncProvider = ({ children }) => { const result = await Promise.race([queryPromise, timeoutPromise]); - if (result.rows && result.rows.length > 0 && result.rows[0]['cloudsync_network_check_changes()'] > 0) { - console.log(`${result.rows[0]['cloudsync_network_check_changes()']} changes detected, triggering refresh`); - // Defer refresh to next tick to avoid blocking current interaction - setTimeout(() => triggerRefresh(), 0); + const raw = result.rows?.[0]?.['cloudsync_network_check_changes()']; + if (raw) { + const { rowsReceived } = JSON.parse(raw); + if (rowsReceived > 0) { + console.log(`${rowsReceived} changes detected, triggering refresh`); + // Defer refresh to next tick to avoid blocking current interaction + setTimeout(() => triggerRefresh(), 0); + } } } catch (error) { console.error('Error checking for changes:', error); From da4387d2030708842e7cde51e3ad90bbe2d36ad0 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Wed, 4 Mar 2026 11:34:39 -0600 Subject: [PATCH 09/10] refactor(network): restructure sync JSON return format with nested send/receive keys Change the flat JSON return format of network functions to a nested structure with "send" and "receive" top-level keys, rename "rowsReceived" to "rows" for conciseness, and add a "tables" array field to the receive section listing affected table names. Before: {"status":"synced","localVersion":5,"serverVersion":5,"rowsReceived":3} After: {"send":{"status":"synced","localVersion":5,"serverVersion":5},"receive":{"rows":3,"tables":["tasks"]}} Update integration tests to use SQLite's ->> operator for JSON field extraction, and add db_expect_str helper for string assertions. --- .../test-sync-roundtrip-postgres-local.md | 2 +- .../test-sync-roundtrip-sqlitecloud-rls.md | 2 +- API.md | 39 ++++++----- README.md | 4 +- examples/simple-todo-db/README.md | 6 +- examples/to-do-app/components/SyncContext.js | 6 +- src/network.c | 64 ++++++++++++++----- test/integration.c | 25 +++++++- 8 files changed, 103 insertions(+), 45 deletions(-) diff --git a/.claude/commands/test-sync-roundtrip-postgres-local.md b/.claude/commands/test-sync-roundtrip-postgres-local.md index 83a69f0..686fc12 100644 --- a/.claude/commands/test-sync-roundtrip-postgres-local.md +++ b/.claude/commands/test-sync-roundtrip-postgres-local.md @@ -115,7 +115,7 @@ SELECT cloudsync_network_send_changes(); -- Check for changes from server (repeat with 2-3 second delays) SELECT cloudsync_network_check_changes(); --- Repeat check_changes 3-5 times with delays until it returns > 0 or stabilizes +-- Repeat check_changes 3-5 times with delays until it returns more than 0 received rows or stabilizes -- Verify final data SELECT * FROM ; diff --git a/.claude/commands/test-sync-roundtrip-sqlitecloud-rls.md b/.claude/commands/test-sync-roundtrip-sqlitecloud-rls.md index f805780..4adb700 100644 --- a/.claude/commands/test-sync-roundtrip-sqlitecloud-rls.md +++ b/.claude/commands/test-sync-roundtrip-sqlitecloud-rls.md @@ -245,7 +245,7 @@ SELECT cloudsync_network_send_changes(); -- Check for changes from server (repeat with 2-3 second delays) SELECT cloudsync_network_check_changes(); --- Repeat check_changes 3-5 times with delays until it returns 0 or stabilizes +-- Repeat check_changes 3-5 times with delays until it returns more than 0 received rows or stabilizes ``` **Recommended sync order:** diff --git a/API.md b/API.md index e80f698..cd13a1b 100644 --- a/API.md +++ b/API.md @@ -363,21 +363,21 @@ SELECT cloudsync_network_set_apikey('your_api_key'); **Parameters:** None. -**Returns:** A JSON string with the sync status: +**Returns:** A JSON string with the send result: ```json -{"status":"synced|syncing|out-of-sync|error", "localVersion": N, "serverVersion": N} +{"send": {"status": "synced|syncing|out-of-sync|error", "localVersion": N, "serverVersion": N}} ``` -- `status`: The current sync state — `"synced"` (all changes confirmed), `"syncing"` (changes sent but not yet confirmed), `"out-of-sync"` (local changes pending or gaps detected), or `"error"`. -- `localVersion`: The latest local database version. -- `serverVersion`: The latest version confirmed by the server. +- `send.status`: The current sync state — `"synced"` (all changes confirmed), `"syncing"` (changes sent but not yet confirmed), `"out-of-sync"` (local changes pending or gaps detected), or `"error"`. +- `send.localVersion`: The latest local database version. +- `send.serverVersion`: The latest version confirmed by the server. **Example:** ```sql SELECT cloudsync_network_send_changes(); --- '{"status":"synced","localVersion":5,"serverVersion":5}' +-- '{"send":{"status":"synced","localVersion":5,"serverVersion":5}}' ``` --- @@ -395,19 +395,20 @@ If the network is misconfigured or the remote server is unreachable, the functio **Parameters:** None. -**Returns:** A JSON string with the number of changes applied: +**Returns:** A JSON string with the receive result: ```json -{"rowsReceived": N} +{"receive": {"rows": N, "tables": ["table1", "table2"]}} ``` -- `rowsReceived`: The number of rows downloaded and applied to the local database. +- `receive.rows`: The number of rows received and applied to the local database. +- `receive.tables`: An array of table names that received changes. Empty (`[]`) if no changes were applied. **Example:** ```sql SELECT cloudsync_network_check_changes(); --- '{"rowsReceived":3}' +-- '{"receive":{"rows":3,"tables":["tasks"]}}' ``` --- @@ -424,23 +425,27 @@ SELECT cloudsync_network_check_changes(); - `wait_ms` (INTEGER, optional): The time to wait in milliseconds between retries. Defaults to 100. - `max_retries` (INTEGER, optional): The maximum number of times to retry the synchronization. Defaults to 1. -**Returns:** A JSON string with the full sync result: +**Returns:** A JSON string with the full sync result, combining send and receive: ```json -{"status":"synced|syncing|out-of-sync|error", "localVersion": N, "serverVersion": N, "rowsReceived": N} +{ + "send": {"status": "synced|syncing|out-of-sync|error", "localVersion": N, "serverVersion": N}, + "receive": {"rows": N, "tables": ["table1", "table2"]} +} ``` -- `status`: The current sync state — `"synced"`, `"syncing"`, `"out-of-sync"`, or `"error"`. -- `localVersion`: The latest local database version. -- `serverVersion`: The latest version confirmed by the server. -- `rowsReceived`: The number of rows downloaded and applied during the check phase. +- `send.status`: The current sync state — `"synced"`, `"syncing"`, `"out-of-sync"`, or `"error"`. +- `send.localVersion`: The latest local database version. +- `send.serverVersion`: The latest version confirmed by the server. +- `receive.rows`: The number of rows received and applied during the check phase. +- `receive.tables`: An array of table names that received changes. Empty (`[]`) if no changes were applied. **Example:** ```sql -- Perform a single synchronization cycle SELECT cloudsync_network_sync(); --- '{"status":"synced","localVersion":5,"serverVersion":5,"rowsReceived":3}' +-- '{"send":{"status":"synced","localVersion":5,"serverVersion":5},"receive":{"rows":3,"tables":["tasks"]}}' -- Perform a synchronization cycle with custom retry settings SELECT cloudsync_network_sync(500, 3); diff --git a/README.md b/README.md index f074f5f..ad91448 100644 --- a/README.md +++ b/README.md @@ -296,7 +296,7 @@ SELECT cloudsync_network_set_apikey('your-api-key-here'); -- and, if a package with changes is ready to be downloaded, applies them to the local database SELECT cloudsync_network_sync(); -- Returns a JSON string with sync status, e.g.: --- '{"status":"synced","localVersion":5,"serverVersion":5,"rowsReceived":3}' +-- '{"send":{"status":"synced","localVersion":5,"serverVersion":5},"receive":{"rows":3,"tables":["my_data"]}}' -- Keep calling periodically. In production applications, you would typically -- call this periodically rather than manually (e.g., every few seconds) SELECT cloudsync_network_sync(); @@ -325,7 +325,7 @@ SELECT cloudsync_network_set_apikey('your-api-key-here'); -- Sync to get data from the first device SELECT cloudsync_network_sync(); --- Repeat — check rowsReceived in the JSON result to see if data was received +-- Repeat — check receive.rows in the JSON result to see if data was received SELECT cloudsync_network_sync(); -- View synchronized data diff --git a/examples/simple-todo-db/README.md b/examples/simple-todo-db/README.md index 1a658b6..772b8fe 100644 --- a/examples/simple-todo-db/README.md +++ b/examples/simple-todo-db/README.md @@ -168,7 +168,7 @@ SELECT cloudsync_network_set_apikey('your-api-key-here'); -- Pull data from Device A - repeat until data is received SELECT cloudsync_network_sync(); --- Check the "rowsReceived" field in the JSON result to see if data was received +-- Check "receive.rows" in the JSON result to see if data was received SELECT cloudsync_network_sync(); -- Verify data was synced @@ -199,7 +199,7 @@ SELECT cloudsync_network_sync(); ```sql -- Get updates from Device B - repeat until data is received SELECT cloudsync_network_sync(); --- Check the "rowsReceived" field in the JSON result to see if data was received +-- Check "receive.rows" in the JSON result to see if data was received SELECT cloudsync_network_sync(); -- View all tasks (should now include Device B's additions) @@ -232,7 +232,7 @@ SELECT cloudsync_network_has_unsent_changes(); -- When network returns, sync automatically resolves conflicts -- Repeat until all changes are synchronized SELECT cloudsync_network_sync(); --- Check the "rowsReceived" field in the JSON result to see if data was received/sent +-- Check "receive.rows" and "send.status" in the JSON result SELECT cloudsync_network_sync(); ``` diff --git a/examples/to-do-app/components/SyncContext.js b/examples/to-do-app/components/SyncContext.js index 4ce2d2c..7b076ef 100644 --- a/examples/to-do-app/components/SyncContext.js +++ b/examples/to-do-app/components/SyncContext.js @@ -60,9 +60,9 @@ export const SyncProvider = ({ children }) => { const raw = result.rows?.[0]?.['cloudsync_network_check_changes()']; if (raw) { - const { rowsReceived } = JSON.parse(raw); - if (rowsReceived > 0) { - console.log(`${rowsReceived} changes detected, triggering refresh`); + const { receive } = JSON.parse(raw); + if (receive.rows > 0) { + console.log(`${receive.rows} changes detected in [${receive.tables}], triggering refresh`); // Defer refresh to next tick to avoid blocking current interaction setTimeout(() => triggerRefresh(), 0); } diff --git a/src/network.c b/src/network.c index a4cce03..d68c984 100644 --- a/src/network.c +++ b/src/network.c @@ -860,6 +860,25 @@ void cloudsync_network_set_apikey (sqlite3_context *context, int argc, sqlite3_v (result) ? sqlite3_result_int(context, SQLITE_OK) : sqlite3_result_error_code(context, SQLITE_NOMEM); } +// Returns a malloc'd JSON array string like '["tasks","users"]', or NULL on error/no results. +// Caller must free with cloudsync_memory_free. +static char *network_get_affected_tables(sqlite3 *db, int64_t since_db_version) { + sqlite3_stmt *stmt = NULL; + int rc = sqlite3_prepare_v2(db, + "SELECT json_group_array(DISTINCT tbl) FROM cloudsync_changes WHERE db_version > ?", + -1, &stmt, NULL); + if (rc != SQLITE_OK) return NULL; + sqlite3_bind_int64(stmt, 1, since_db_version); + + char *result = NULL; + if (sqlite3_step(stmt) == SQLITE_ROW) { + const char *json = (const char *)sqlite3_column_text(stmt, 0); + if (json) result = cloudsync_string_dup(json); + } + sqlite3_finalize(stmt); + return result; +} + // MARK: - Sync result typedef struct { @@ -867,6 +886,7 @@ typedef struct { int64_t local_version; // new_db_version (max local) const char *status; // computed status string int rows_received; // rows from check + char *tables_json; // JSON array of affected table names, caller must cloudsync_memory_free } sync_result; static const char *network_compute_status(int64_t last_optimistic, int64_t last_confirmed, @@ -1022,13 +1042,13 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, void cloudsync_network_send_changes (sqlite3_context *context, int argc, sqlite3_value **argv) { DEBUG_FUNCTION("cloudsync_network_send_changes"); - sync_result sr = {-1, 0, NULL, 0}; + sync_result sr = {-1, 0, NULL, 0, NULL}; int rc = cloudsync_network_send_changes_internal(context, argc, argv, &sr); if (rc != SQLITE_OK) return; char buf[256]; snprintf(buf, sizeof(buf), - "{\"status\":\"%s\",\"localVersion\":%" PRId64 ",\"serverVersion\":%" PRId64 "}", + "{\"send\":{\"status\":\"%s\",\"localVersion\":%" PRId64 ",\"serverVersion\":%" PRId64 "}}", sr.status ? sr.status : "error", sr.local_version, sr.server_version); sqlite3_result_text(context, buf, -1, SQLITE_TRANSIENT); } @@ -1044,6 +1064,9 @@ int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows, sync int seq = dbutils_settings_get_int_value(data, CLOUDSYNC_KEY_CHECK_SEQ); if (seq<0) {sqlite3_result_error(context, "Unable to retrieve seq.", -1); return -1;} + // Capture local db_version before download so we can query cloudsync_changes afterwards + int64_t prev_dbv = cloudsync_dbversion(data); + char json_payload[2024]; snprintf(json_payload, sizeof(json_payload), "{\"dbVersion\":%lld, \"seq\":%d}", (long long)db_version, seq); @@ -1065,12 +1088,18 @@ int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows, sync if (out && pnrows) out->rows_received = *pnrows; + // Query cloudsync_changes for affected tables after successful download + if (out && rc == SQLITE_OK && pnrows && *pnrows > 0) { + sqlite3 *db = (sqlite3 *)cloudsync_db(data); + out->tables_json = network_get_affected_tables(db, prev_dbv); + } + network_result_cleanup(&result); return rc; } void cloudsync_network_sync (sqlite3_context *context, int wait_ms, int max_retries) { - sync_result sr = {-1, 0, NULL, 0}; + sync_result sr = {-1, 0, NULL, 0, NULL}; int rc = cloudsync_network_send_changes_internal(context, 0, NULL, &sr); if (rc != SQLITE_OK) return; @@ -1078,17 +1107,20 @@ void cloudsync_network_sync (sqlite3_context *context, int wait_ms, int max_retr int nrows = 0; while (ntries < max_retries) { if (ntries > 0) sqlite3_sleep(wait_ms); + if (sr.tables_json) { cloudsync_memory_free(sr.tables_json); sr.tables_json = NULL; } rc = cloudsync_network_check_internal(context, &nrows, &sr); if (rc == SQLITE_OK && nrows > 0) break; ntries++; } - if (rc != SQLITE_OK) return; - - char buf[256]; - snprintf(buf, sizeof(buf), - "{\"status\":\"%s\",\"localVersion\":%" PRId64 ",\"serverVersion\":%" PRId64 ",\"rowsReceived\":%d}", - sr.status ? sr.status : "error", sr.local_version, sr.server_version, nrows); - sqlite3_result_text(context, buf, -1, SQLITE_TRANSIENT); + if (rc != SQLITE_OK) { if (sr.tables_json) cloudsync_memory_free(sr.tables_json); return; } + + const char *tables = sr.tables_json ? sr.tables_json : "[]"; + char *buf = cloudsync_memory_mprintf( + "{\"send\":{\"status\":\"%s\",\"localVersion\":%" PRId64 ",\"serverVersion\":%" PRId64 "}," + "\"receive\":{\"rows\":%d,\"tables\":%s}}", + sr.status ? sr.status : "error", sr.local_version, sr.server_version, nrows, tables); + sqlite3_result_text(context, buf, -1, cloudsync_memory_free); + if (sr.tables_json) cloudsync_memory_free(sr.tables_json); } void cloudsync_network_sync0 (sqlite3_context *context, int argc, sqlite3_value **argv) { @@ -1111,13 +1143,15 @@ void cloudsync_network_sync2 (sqlite3_context *context, int argc, sqlite3_value void cloudsync_network_check_changes (sqlite3_context *context, int argc, sqlite3_value **argv) { DEBUG_FUNCTION("cloudsync_network_check_changes"); + sync_result sr = {-1, 0, NULL, 0, NULL}; int nrows = 0; - int rc = cloudsync_network_check_internal(context, &nrows, NULL); - if (rc != SQLITE_OK) return; + int rc = cloudsync_network_check_internal(context, &nrows, &sr); + if (rc != SQLITE_OK) { if (sr.tables_json) cloudsync_memory_free(sr.tables_json); return; } - char buf[128]; - snprintf(buf, sizeof(buf), "{\"rowsReceived\":%d}", nrows); - sqlite3_result_text(context, buf, -1, SQLITE_TRANSIENT); + const char *tables = sr.tables_json ? sr.tables_json : "[]"; + char *buf = cloudsync_memory_mprintf("{\"receive\":{\"rows\":%d,\"tables\":%s}}", nrows, tables); + sqlite3_result_text(context, buf, -1, cloudsync_memory_free); + if (sr.tables_json) cloudsync_memory_free(sr.tables_json); } void cloudsync_network_reset_sync_version (sqlite3_context *context, int argc, sqlite3_value **argv) { diff --git a/test/integration.c b/test/integration.c index 75a65e5..f3d0317 100644 --- a/test/integration.c +++ b/test/integration.c @@ -41,7 +41,7 @@ #define TERMINATE if (db) { db_exec(db, "SELECT cloudsync_terminate();"); } #define ABORT_TEST abort_test: ERROR_MSG TERMINATE if (db) sqlite3_close(db); return rc; -typedef enum { PRINT, NOPRINT, INTGR, GT0 } expected_type; +typedef enum { PRINT, NOPRINT, INTGR, GT0, STR } expected_type; typedef struct { expected_type type; @@ -87,6 +87,15 @@ static int callback(void *data, int argc, char **argv, char **names) { } else goto multiple_columns; break; + case STR: + if(argc == 1){ + if(!argv[0] || strcmp(argv[0], expect->value.s) != 0){ + printf("Error: expected from %s: \"%s\", got \"%s\"\n", names[0], expect->value.s, argv[0] ? argv[0] : "NULL"); + return SQLITE_ERROR; + } + } else goto multiple_columns; + break; + default: printf("Error: unknown expect type\n"); return SQLITE_ERROR; @@ -136,6 +145,16 @@ int db_expect_gt0 (sqlite3 *db, const char *sql) { return rc; } +int db_expect_str (sqlite3 *db, const char *sql, const char *expect) { + expected_t data; + data.type = STR; + data.value.s = expect; + + int rc = sqlite3_exec(db, sql, callback, &data, NULL); + if (rc != SQLITE_OK) printf("Error while executing %s: %s\n", sql, sqlite3_errmsg(db)); + return rc; +} + int open_load_ext(const char *db_path, sqlite3 **out_db) { sqlite3 *db = NULL; int rc = sqlite3_open(db_path, &db); @@ -224,7 +243,7 @@ int test_init (const char *db_path, int init) { snprintf(sql, sizeof(sql), "INSERT INTO users (id, name) VALUES ('%s', '%s');", value, value); rc = db_exec(db, sql); RCHECK rc = db_expect_int(db, "SELECT COUNT(*) as count FROM users;", 1); RCHECK - rc = db_expect_gt0(db, "SELECT cloudsync_network_sync(250,10);"); RCHECK + rc = db_expect_gt0(db, "SELECT cloudsync_network_sync(250,10) ->> '$.receive.rows';"); RCHECK rc = db_expect_gt0(db, "SELECT COUNT(*) as count FROM users;"); RCHECK rc = db_expect_gt0(db, "SELECT COUNT(*) as count FROM activities;"); RCHECK rc = db_expect_int(db, "SELECT COUNT(*) as count FROM workouts;", 0); RCHECK @@ -305,7 +324,7 @@ int test_enable_disable(const char *db_path) { // init network with connection string + apikey rc = db_exec(db2, network_init); RCHECK - rc = db_expect_gt0(db2, "SELECT cloudsync_network_sync(250,10);"); RCHECK + rc = db_expect_gt0(db2, "SELECT cloudsync_network_sync(250,10) ->> '$.receive.rows';"); RCHECK snprintf(sql, sizeof(sql), "SELECT COUNT(*) FROM users WHERE name='%s';", value); rc = db_expect_int(db2, sql, 0); RCHECK From 1b48b0b1156849f6de78398382ebdd12e438b74b Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Wed, 4 Mar 2026 12:31:31 -0600 Subject: [PATCH 10/10] fix(network): added early return for empty-db case: when there's no payload AND db_version == 0. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. Added early return for empty-db case (lines 959–966): When there's no payload (blob == NULL || blob_size == 0) AND db_version == 0, skip the network call entirely and return { server_version: 0, local_version: 0, status: "synced" }. This avoids the 404 from the status endpoint when the server doesn't know the device yet. 2. Fixed condition from || to && (line 969): Changed blob != NULL || blob_size > 0 to blob != NULL && blob_size > 0 — both conditions must be true to confirm there's actual data to upload. --- src/network.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/network.c b/src/network.c index d68c984..f357297 100644 --- a/src/network.c +++ b/src/network.c @@ -956,8 +956,18 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, return rc; } + // Case 1: empty local db — no payload and no server state, skip network entirely + if ((blob == NULL || blob_size == 0) && db_version == 0) { + if (out) { + out->server_version = 0; + out->local_version = 0; + out->status = network_compute_status(0, 0, 0, 0); + } + return SQLITE_OK; + } + NETWORK_RESULT res; - if (blob != NULL || blob_size > 0) { + if (blob != NULL && blob_size > 0) { // there is data to send res = network_receive_buffer(netdata, netdata->upload_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); if (res.code != CLOUDSYNC_NETWORK_BUFFER) {