forked from streamingfast/substreams-sink-sql
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdialect_clickhouse.go
More file actions
441 lines (383 loc) · 15.2 KB
/
dialect_clickhouse.go
File metadata and controls
441 lines (383 loc) · 15.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
package db
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"math/big"
"reflect"
"sort"
"strconv"
"strings"
"time"
clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser"
_ "github.com/ClickHouse/clickhouse-go/v2"
"github.com/streamingfast/cli"
sink "github.com/streamingfast/substreams-sink"
"go.uber.org/zap"
"golang.org/x/exp/maps"
)
type ClickhouseDialect struct {
cursorTableName string
cluster string
schemaName string
}
func NewClickhouseDialect(schemaName string, cursorTableName string, cluster string) *ClickhouseDialect {
return &ClickhouseDialect{
cursorTableName: cursorTableName,
cluster: cluster,
schemaName: schemaName,
}
}
// Clickhouse should be used to insert a lot of data in batches. The current official clickhouse
// driver doesn't support Transactions for multiple tables. The only way to add in batches is
// creating a transaction for a table, adding all rows and commiting it.
func (d ClickhouseDialect) Flush(tx Tx, ctx context.Context, l *Loader, outputModuleHash string, lastFinalBlock uint64) (int, error) {
var entryCount int
for entriesPair := l.entries.Oldest(); entriesPair != nil; entriesPair = entriesPair.Next() {
tableName := entriesPair.Key
entries := entriesPair.Value
tx, err := l.DB.BeginTx(ctx, nil)
if err != nil {
return entryCount, fmt.Errorf("failed to begin db transaction")
}
if l.tracer.Enabled() {
l.logger.Debug("flushing table entries", zap.String("table_name", tableName), zap.Int("entry_count", entries.Len()))
}
info := l.tables[tableName]
columns := make([]string, 0, len(info.columnsByName))
for column := range info.columnsByName {
columns = append(columns, column)
}
sort.Strings(columns)
query := fmt.Sprintf(
"INSERT INTO %s.%s (%s)",
EscapeIdentifier(d.schemaName),
EscapeIdentifier(tableName),
strings.Join(columns, ","))
batch, err := tx.Prepare(query)
if err != nil {
return entryCount, fmt.Errorf("failed to prepare insert into %q: %w", tableName, err)
}
for entryPair := entries.Oldest(); entryPair != nil; entryPair = entryPair.Next() {
entry := entryPair.Value
if l.tracer.Enabled() {
l.logger.Debug("adding query from operation to transaction", zap.Stringer("op", entry), zap.String("query", query))
}
values, err := convertOpToClickhouseValues(entry)
if err != nil {
return entryCount, fmt.Errorf("failed to get values: %w", err)
}
if _, err := batch.ExecContext(ctx, values...); err != nil {
return entryCount, fmt.Errorf("executing for entry %q: %w", values, err)
}
}
if err := tx.Commit(); err != nil {
return entryCount, fmt.Errorf("failed to commit db transaction: %w", err)
}
entryCount += entries.Len()
}
return entryCount, nil
}
func (d ClickhouseDialect) Revert(tx Tx, ctx context.Context, l *Loader, lastValidFinalBlock uint64) error {
return fmt.Errorf("clickhouse driver does not support reorg management.")
}
func (d ClickhouseDialect) GetCreateCursorQuery(schema string, withPostgraphile bool) string {
_ = withPostgraphile // TODO: see if this can work
clusterClause := ""
engine := "ReplacingMergeTree()"
if d.cluster != "" {
clusterClause = fmt.Sprintf("ON CLUSTER %s", EscapeIdentifier(d.cluster))
engine = "ReplicatedReplacingMergeTree()"
}
tableName := d.cursorTableName
if schema != "" {
tableName = EscapeIdentifier(schema) + "." + EscapeIdentifier(d.cursorTableName)
}
return fmt.Sprintf(cli.Dedent(`
CREATE TABLE IF NOT EXISTS %s %s
(
id String,
cursor String,
block_num Int64,
block_id String
) Engine = %s ORDER BY id;
`), tableName, clusterClause, engine)
}
func (d ClickhouseDialect) GetCreateHistoryQuery(schema string, withPostgraphile bool) string {
panic("clickhouse does not support reorg management")
}
func (d ClickhouseDialect) ExecuteSetupScript(ctx context.Context, l *Loader, schemaSql string) error {
if d.cluster != "" {
stmts, err := clickhouse.NewParser(schemaSql).ParseStmts()
if err != nil {
return fmt.Errorf("parsing schemaName: %w", err)
}
for _, stmt := range stmts {
if createDatabase, ok := stmt.(*clickhouse.CreateDatabase); ok {
l.logger.Debug("appending 'ON CLUSTER' clause to 'CREATE DATABASE'", zap.String("cluster", d.cluster), zap.Stringer("database", createDatabase.Name))
createDatabase.OnCluster = &clickhouse.ClusterClause{Expr: &clickhouse.StringLiteral{Literal: d.cluster}}
}
if createTable, ok := stmt.(*clickhouse.CreateTable); ok {
l.logger.Debug("appending 'ON CLUSTER' clause to 'CREATE TABLE'", zap.String("cluster", d.cluster), zap.String("table", createTable.Name.String()))
createTable.OnCluster = &clickhouse.ClusterClause{Expr: &clickhouse.StringLiteral{Literal: d.cluster}}
if !strings.HasPrefix(createTable.Engine.Name, "Replicated") &&
strings.HasSuffix(createTable.Engine.Name, "MergeTree") {
newEngine := "Replicated" + createTable.Engine.Name
l.logger.Debug("replacing table engine with replicated one", zap.String("table", createTable.Name.String()), zap.String("engine", createTable.Engine.Name), zap.String("new_engine", newEngine))
createTable.Engine.Name = newEngine
}
}
if createMaterializedView, ok := stmt.(*clickhouse.CreateMaterializedView); ok {
l.logger.Debug("appending 'ON CLUSTER' clause to 'CREATE MATERIALIZED VIEW'", zap.String("cluster", d.cluster), zap.Stringer("materialized_view", createMaterializedView.Name))
createMaterializedView.OnCluster = &clickhouse.ClusterClause{Expr: &clickhouse.StringLiteral{Literal: d.cluster}}
if createMaterializedView.Engine != nil && !strings.HasPrefix(createMaterializedView.Engine.Name, "Replicated") &&
strings.HasSuffix(createMaterializedView.Engine.Name, "MergeTree") {
newEngine := "Replicated" + createMaterializedView.Engine.Name
l.logger.Debug("replacing table engine with replicated one", zap.Stringer("materialized_view", createMaterializedView.Name), zap.String("engine", createMaterializedView.Engine.Name), zap.String("new_engine", newEngine))
createMaterializedView.Engine.Name = newEngine
}
}
if createView, ok := stmt.(*clickhouse.CreateView); ok {
l.logger.Debug("appending 'ON CLUSTER' clause to 'CREATE VIEW'", zap.String("cluster", d.cluster), zap.Stringer("view", createView.Name))
createView.OnCluster = &clickhouse.ClusterClause{Expr: &clickhouse.StringLiteral{Literal: d.cluster}}
}
if createFunction, ok := stmt.(*clickhouse.CreateFunction); ok {
l.logger.Debug("appending 'ON CLUSTER' clause to 'CREATE FUNCTION'", zap.String("cluster", d.cluster), zap.Stringer("function", createFunction.FunctionName))
createFunction.OnCluster = &clickhouse.ClusterClause{Expr: &clickhouse.StringLiteral{Literal: d.cluster}}
}
if _, err := l.ExecContext(ctx, stmt.String()); err != nil {
l.logger.Error("failed to execute schema statement", zap.String("statement", stmt.String()), zap.Error(err))
return fmt.Errorf("exec schemaName: %w", err)
}
}
} else {
for _, query := range strings.Split(schemaSql, ";") {
query = strings.TrimSpace(query)
if len(query) == 0 {
continue
}
// Add ENGINE clause to CREATE TABLE statements that don't have one
if strings.HasPrefix(strings.ToUpper(query), "CREATE TABLE") &&
!strings.Contains(strings.ToUpper(query), "ENGINE") {
// Choose appropriate engine based on cluster setting
engine := "ReplacingMergeTree()"
if d.cluster != "" {
engine = "ReplicatedReplacingMergeTree()"
}
// Insert ENGINE clause after the table definition
query = query + " ENGINE = " + engine
}
if _, err := l.ExecContext(ctx, query); err != nil {
return fmt.Errorf("exec schemaName: %w", err)
}
}
}
return nil
}
func (d ClickhouseDialect) GetUpdateCursorQuery(table, moduleHash string, cursor *sink.Cursor, block_num uint64, block_id string) string {
return query(`
INSERT INTO %s (id, cursor, block_num, block_id) values ('%s', '%s', %d, '%s')
`, table, moduleHash, cursor, block_num, block_id)
}
func (d ClickhouseDialect) GetAllCursorsQuery(table string) string {
return fmt.Sprintf("SELECT id, cursor, block_num, block_id FROM %s FINAL", table)
}
func (d ClickhouseDialect) ParseDatetimeNormalization(value string) string {
return fmt.Sprintf("parseDateTimeBestEffort(%s)", escapeStringValue(value))
}
func (d ClickhouseDialect) DriverSupportRowsAffected() bool {
return false
}
func (d ClickhouseDialect) OnlyInserts() bool {
return true
}
func (d ClickhouseDialect) AllowPkDuplicates() bool {
return true
}
func (d ClickhouseDialect) CreateUser(tx Tx, ctx context.Context, l *Loader, username string, password string, _database string, readOnly bool) error {
user, pass := EscapeIdentifier(username), escapeStringValue(password)
onClusterClause := ""
if d.cluster != "" {
onClusterClause = fmt.Sprintf("ON CLUSTER %s", EscapeIdentifier(d.cluster))
}
createUserQ := fmt.Sprintf("CREATE USER IF NOT EXISTS %s %s IDENTIFIED WITH plaintext_password BY %s;", user, onClusterClause, pass)
_, err := tx.ExecContext(ctx, createUserQ)
if err != nil {
return fmt.Errorf("executing create user query %q: %w", createUserQ, err)
}
var grantQ string
if readOnly {
grantQ = fmt.Sprintf(`
GRANT %s SELECT ON *.* TO %s;
`, onClusterClause, user)
} else {
grantQ = fmt.Sprintf(`
GRANT %s ALL ON *.* TO %s;
`, onClusterClause, user)
}
_, err = tx.ExecContext(ctx, grantQ)
if err != nil {
return fmt.Errorf("executing grant query %q: %w", grantQ, err)
}
return nil
}
func convertOpToClickhouseValues(o *Operation) ([]any, error) {
columns := make([]string, len(o.data))
i := 0
for column := range o.data {
columns[i] = column
i++
}
sort.Strings(columns)
values := make([]any, len(o.data))
for i, v := range columns {
if col, exists := o.table.columnsByName[v]; exists {
convertedType, err := convertToType(o.data[v], col.scanType)
if err != nil {
return nil, fmt.Errorf("converting value %q to type %q in column %q: %w", o.data[v], col.scanType, v, err)
}
values[i] = convertedType
} else {
return nil, fmt.Errorf("cannot find column %q for table %q (valid columns are %q)", v, o.table.identifier, strings.Join(maps.Keys(o.table.columnsByName), ", "))
}
}
return values, nil
}
func convertToType(value string, valueType reflect.Type) (any, error) {
switch valueType.Kind() {
case reflect.String:
return value, nil
case reflect.Slice:
if valueType.Elem().Kind() == reflect.Struct || valueType.Elem().Kind() == reflect.Ptr {
return nil, fmt.Errorf("%q is not supported as Clickhouse Array type", valueType.Elem().Name())
}
res := reflect.New(reflect.SliceOf(valueType.Elem()))
if err := json.Unmarshal([]byte(value), res.Interface()); err != nil {
return "", fmt.Errorf("could not JSON unmarshal slice value %q: %w", value, err)
}
return res.Elem().Interface(), nil
case reflect.Bool:
return strconv.ParseBool(value)
case reflect.Int:
v, err := strconv.ParseInt(value, 10, 0)
return int(v), err
case reflect.Int8:
v, err := strconv.ParseInt(value, 10, 8)
return int8(v), err
case reflect.Int16:
v, err := strconv.ParseInt(value, 10, 16)
return int16(v), err
case reflect.Int32:
v, err := strconv.ParseInt(value, 10, 32)
return int32(v), err
case reflect.Int64:
return strconv.ParseInt(value, 10, 64)
case reflect.Uint:
v, err := strconv.ParseUint(value, 10, 0)
return uint(v), err
case reflect.Uint8:
v, err := strconv.ParseUint(value, 10, 8)
return uint8(v), err
case reflect.Uint16:
v, err := strconv.ParseUint(value, 10, 16)
return uint16(v), err
case reflect.Uint32:
v, err := strconv.ParseUint(value, 10, 32)
return uint32(v), err
case reflect.Uint64:
return strconv.ParseUint(value, 10, 0)
case reflect.Float32, reflect.Float64:
return strconv.ParseFloat(value, 10)
case reflect.Struct:
if valueType == reflectTypeTime {
if integerRegex.MatchString(value) {
i, err := strconv.Atoi(value)
if err != nil {
return "", fmt.Errorf("could not convert %s to int: %w", value, err)
}
return int64(i), nil
}
var v time.Time
var err error
if strings.Contains(value, "T") && strings.HasSuffix(value, "Z") {
v, err = time.Parse("2006-01-02T15:04:05Z", value)
} else if dateRegex.MatchString(value) {
// This is a Clickhouse Date field. The Clickhouse Go client doesn't convert unix timestamp into Date,
// so we just validate the format here and return a string.
_, err = time.Parse("2006-01-02", value)
if err != nil {
return "", fmt.Errorf("could not convert %s to date: %w", value, err)
}
return value, nil
} else {
v, err = time.Parse("2006-01-02 15:04:05", value)
}
if err != nil {
return "", fmt.Errorf("could not convert %s to time: %w", value, err)
}
return v.Unix(), nil
}
return "", fmt.Errorf("unsupported struct type %s", valueType)
case reflect.Ptr:
if valueType.String() == "*big.Int" {
newInt := new(big.Int)
newInt.SetString(value, 10)
return newInt, nil
}
elemType := valueType.Elem()
val, err := convertToType(value, elemType)
if err != nil {
return nil, fmt.Errorf("invalid pointer type: %w", err)
}
// We cannot just return &val here as this will return an *interface{} that the Clickhouse Go client won't be
// able to convert on inserting. Instead, we create a new variable using the type that valueType has been
// pointing to, assign the converted value from convertToType to that and then return a pointer to the new variable.
result := reflect.New(elemType).Elem()
result.Set(reflect.ValueOf(val))
return result.Addr().Interface(), nil
default:
return value, nil
}
}
func (d ClickhouseDialect) GetTableColumns(db *sql.DB, schemaName, tableName string) ([]*sql.ColumnType, error) {
// For ClickHouse, use DESCRIBE TABLE to filter out AggregateFunction columns
describeQuery := fmt.Sprintf("DESCRIBE TABLE %s.%s",
EscapeIdentifier(schemaName),
EscapeIdentifier(tableName))
describeRows, err := db.Query(describeQuery)
if err != nil {
return nil, fmt.Errorf("describing table structure: %w", err)
}
defer describeRows.Close()
var nonAggregateColumns []string
// Parse DESCRIBE results to filter out AggregateFunction columns
for describeRows.Next() {
var name, dataType, defaultKind, defaultExpression, comment, codecExpression, ttlExpression string
err := describeRows.Scan(&name, &dataType, &defaultKind, &defaultExpression, &comment, &codecExpression, &ttlExpression)
if err != nil {
return nil, fmt.Errorf("scanning describe results: %w", err)
}
if !strings.Contains(dataType, "AggregateFunction") {
nonAggregateColumns = append(nonAggregateColumns, EscapeIdentifier(name))
}
}
if err := describeRows.Err(); err != nil {
return nil, fmt.Errorf("iterating describe results: %w", err)
}
if len(nonAggregateColumns) == 0 {
return nil, fmt.Errorf("no non-aggregate columns found in table %s.%s", schemaName, tableName)
}
// Now query for column types with only the non-aggregate columns
columnList := strings.Join(nonAggregateColumns, ", ")
selectQuery := fmt.Sprintf("SELECT %s FROM %s.%s WHERE 1=0",
columnList,
EscapeIdentifier(schemaName),
EscapeIdentifier(tableName))
rows, err := db.Query(selectQuery)
if err != nil {
return nil, fmt.Errorf("querying filtered table structure: %w", err)
}
defer rows.Close()
return rows.ColumnTypes()
}