Skip to content

Commit 9479377

Browse files
committed
config.go: set smaller batchlimit and deactivate MetadataLoadedAt by default for protondb
reason: 1. protondb seems has some issue in go driver, smaller batchlimit a more stable way to send. 2. the protondb internal host a timestamp column forcely, that's why we do not need another one. _sling_loaded_at
1 parent 1b8319a commit 9479377

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

core/sling/config.go

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,14 +145,20 @@ func (cfg *Config) SetDefault() {
145145
case dbio.TypeDbBigQuery, dbio.TypeDbBigTable:
146146
cfg.Source.Options.MaxDecimals = g.Int(9)
147147
cfg.Target.Options.MaxDecimals = g.Int(9)
148-
case dbio.TypeDbClickhouse, dbio.TypeDbProton:
148+
case dbio.TypeDbClickhouse:
149149
cfg.Source.Options.MaxDecimals = g.Int(11)
150150
cfg.Target.Options.MaxDecimals = g.Int(11)
151151
if cfg.Target.Options.BatchLimit == nil {
152152
// set default batch_limit to limit memory usage. Bug in clickhouse driver?
153153
// see https://github.com/ClickHouse/clickhouse-go/issues/1293
154154
cfg.Target.Options.BatchLimit = g.Int64(100000)
155155
}
156+
case dbio.TypeDbProton:
157+
cfg.Source.Options.MaxDecimals = g.Int(11)
158+
cfg.Target.Options.MaxDecimals = g.Int(11)
159+
if cfg.Target.Options.BatchLimit == nil {
160+
cfg.Target.Options.BatchLimit = g.Int64(1000)
161+
}
156162
}
157163

158164
// set default transforms
@@ -168,6 +174,8 @@ func (cfg *Config) SetDefault() {
168174
cfg.extraTransforms = append(cfg.extraTransforms, "parse_bit")
169175
case g.In(cfg.TgtConn.Type, dbio.TypeDbBigQuery):
170176
cfg.Target.Options.DatetimeFormat = "2006-01-02 15:04:05.000000-07"
177+
case g.In(cfg.TgtConn.Type, dbio.TypeDbProton):
178+
cfg.MetadataLoadedAt = g.Bool(false)
171179
}
172180

173181
// set vars

0 commit comments

Comments
 (0)