Skip to content

Commit bdfb377

Browse files
authored
Merge branch 'ArcadeData:main' into fixissue3476
2 parents 41fdc93 + 5ebbdb7 commit bdfb377

File tree

193 files changed

+2952
-2463
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

193 files changed

+2952
-2463
lines changed

bolt/src/test/java/com/arcadedb/bolt/BoltChunkedIOTest.java

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222

2323
import java.io.ByteArrayInputStream;
2424
import java.io.ByteArrayOutputStream;
25-
import java.io.IOException;
2625
import java.util.Arrays;
2726

2827
import static org.assertj.core.api.Assertions.assertThat;
@@ -35,7 +34,7 @@ class BoltChunkedIOTest {
3534
// ============ BoltChunkedOutput tests ============
3635

3736
@Test
38-
void writeEmptyMessage() throws IOException {
37+
void writeEmptyMessage() throws Exception {
3938
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
4039
final BoltChunkedOutput output = new BoltChunkedOutput(baos);
4140

@@ -49,7 +48,7 @@ void writeEmptyMessage() throws IOException {
4948
}
5049

5150
@Test
52-
void writeSmallMessage() throws IOException {
51+
void writeSmallMessage() throws Exception {
5352
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
5453
final BoltChunkedOutput output = new BoltChunkedOutput(baos);
5554

@@ -74,7 +73,7 @@ void writeSmallMessage() throws IOException {
7473
}
7574

7675
@Test
77-
void writeMessageExactlyMaxChunkSize() throws IOException {
76+
void writeMessageExactlyMaxChunkSize() throws Exception {
7877
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
7978
final BoltChunkedOutput output = new BoltChunkedOutput(baos);
8079

@@ -92,7 +91,7 @@ void writeMessageExactlyMaxChunkSize() throws IOException {
9291
}
9392

9493
@Test
95-
void writeMessageLargerThanMaxChunkSize() throws IOException {
94+
void writeMessageLargerThanMaxChunkSize() throws Exception {
9695
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
9796
final BoltChunkedOutput output = new BoltChunkedOutput(baos);
9897

@@ -122,7 +121,7 @@ void writeMessageLargerThanMaxChunkSize() throws IOException {
122121
}
123122

124123
@Test
125-
void writeRawBytes() throws IOException {
124+
void writeRawBytes() throws Exception {
126125
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
127126
final BoltChunkedOutput output = new BoltChunkedOutput(baos);
128127

@@ -133,7 +132,7 @@ void writeRawBytes() throws IOException {
133132
}
134133

135134
@Test
136-
void writeRawInt() throws IOException {
135+
void writeRawInt() throws Exception {
137136
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
138137
final BoltChunkedOutput output = new BoltChunkedOutput(baos);
139138

@@ -150,7 +149,7 @@ void writeRawInt() throws IOException {
150149
// ============ BoltChunkedInput tests ============
151150

152151
@Test
153-
void readEmptyMessage() throws IOException {
152+
void readEmptyMessage() throws Exception {
154153
// Just end marker
155154
final byte[] input = { 0x00, 0x00 };
156155
final BoltChunkedInput chunkedInput = new BoltChunkedInput(new ByteArrayInputStream(input));
@@ -160,7 +159,7 @@ void readEmptyMessage() throws IOException {
160159
}
161160

162161
@Test
163-
void readSmallMessage() throws IOException {
162+
void readSmallMessage() throws Exception {
164163
// Size (5) + data + end marker
165164
final byte[] input = {
166165
0x00, 0x05, // chunk size = 5
@@ -174,7 +173,7 @@ void readSmallMessage() throws IOException {
174173
}
175174

176175
@Test
177-
void readMultiChunkMessage() throws IOException {
176+
void readMultiChunkMessage() throws Exception {
178177
// Two chunks: 3 bytes + 2 bytes
179178
final byte[] input = {
180179
0x00, 0x03, // first chunk size = 3
@@ -190,7 +189,7 @@ void readMultiChunkMessage() throws IOException {
190189
}
191190

192191
@Test
193-
void readRawBytes() throws IOException {
192+
void readRawBytes() throws Exception {
194193
final byte[] input = { 0x60, 0x60, (byte) 0xB0, 0x17, 0x00, 0x00 };
195194
final BoltChunkedInput chunkedInput = new BoltChunkedInput(new ByteArrayInputStream(input));
196195

@@ -199,7 +198,7 @@ void readRawBytes() throws IOException {
199198
}
200199

201200
@Test
202-
void readRawInt() throws IOException {
201+
void readRawInt() throws Exception {
203202
final byte[] input = { 0x00, 0x00, 0x01, 0x04 };
204203
final BoltChunkedInput chunkedInput = new BoltChunkedInput(new ByteArrayInputStream(input));
205204

@@ -208,7 +207,7 @@ void readRawInt() throws IOException {
208207
}
209208

210209
@Test
211-
void readRawShort() throws IOException {
210+
void readRawShort() throws Exception {
212211
final byte[] input = { 0x00, 0x05 };
213212
final BoltChunkedInput chunkedInput = new BoltChunkedInput(new ByteArrayInputStream(input));
214213

@@ -217,7 +216,7 @@ void readRawShort() throws IOException {
217216
}
218217

219218
@Test
220-
void readLargeChunkSize() throws IOException {
219+
void readLargeChunkSize() throws Exception {
221220
// Chunk size 0xFFFF (65535)
222221
final byte[] input = new byte[65535 + 4]; // size header + data + end marker
223222
input[0] = (byte) 0xFF;
@@ -235,7 +234,7 @@ void readLargeChunkSize() throws IOException {
235234
}
236235

237236
@Test
238-
void available() throws IOException {
237+
void available() throws Exception {
239238
final byte[] input = { 0x01, 0x02, 0x03 };
240239
final BoltChunkedInput chunkedInput = new BoltChunkedInput(new ByteArrayInputStream(input));
241240

@@ -245,7 +244,7 @@ void available() throws IOException {
245244
// ============ Round-trip tests ============
246245

247246
@Test
248-
void roundTripSmallMessage() throws IOException {
247+
void roundTripSmallMessage() throws Exception {
249248
final byte[] originalMessage = { 0x01, 0x02, 0x03, 0x04, 0x05 };
250249

251250
// Write
@@ -261,7 +260,7 @@ void roundTripSmallMessage() throws IOException {
261260
}
262261

263262
@Test
264-
void roundTripLargeMessage() throws IOException {
263+
void roundTripLargeMessage() throws Exception {
265264
// Message larger than max chunk size
266265
final byte[] originalMessage = new byte[100000];
267266
for (int i = 0; i < originalMessage.length; i++) {
@@ -281,7 +280,7 @@ void roundTripLargeMessage() throws IOException {
281280
}
282281

283282
@Test
284-
void roundTripEmptyMessage() throws IOException {
283+
void roundTripEmptyMessage() throws Exception {
285284
final byte[] originalMessage = new byte[0];
286285

287286
// Write

console/src/test/java/com/arcadedb/console/ConsoleBatchTest.java

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -57,16 +57,18 @@ void okSqlCreationIfNotExists() throws Exception {
5757
@Test
5858
void okBatchMultiLine() throws Exception {
5959
Console.execute(new String[] { "-b",
60-
"create database console;" +
61-
"create vertex type Batchtest;" +
62-
"create vertex Batchtest set id = 1;" +
63-
"create vertex Batchtest set id = 2;" +
64-
"create vertex Batchtest set id = 3;" +
65-
"LET x=SELECT FROM Batchtest;" +
66-
"if($x.size()>0){ \n" +
67-
" return true; \n" +
68-
"} \n" +
69-
"return false;" });
60+
"""
61+
create database console;\
62+
create vertex type Batchtest;\
63+
create vertex Batchtest set id = 1;\
64+
create vertex Batchtest set id = 2;\
65+
create vertex Batchtest set id = 3;\
66+
LET x=SELECT FROM Batchtest;\
67+
if($x.size()>0){\s
68+
return true;\s
69+
}\s
70+
return false;\
71+
""" });
7072

7173
final Database db = new DatabaseFactory("./target/databases/console").open();
7274
assertThat(db.getSchema().existsType("Batchtest")).isTrue();

e2e-perf/pom.xml

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -37,22 +37,6 @@
3737
<name>ArcadeDB performance tests</name>
3838
<packaging>jar</packaging>
3939

40-
<build>
41-
<plugins>
42-
<plugin>
43-
<groupId>org.apache.maven.plugins</groupId>
44-
<artifactId>maven-jar-plugin</artifactId>
45-
<executions>
46-
<execution>
47-
<goals>
48-
<goal>test-jar</goal>
49-
</goals>
50-
</execution>
51-
</executions>
52-
</plugin>
53-
</plugins>
54-
</build>
55-
5640
<dependencies>
5741
<dependency>
5842
<groupId>ch.qos.logback</groupId>

e2e/pom.xml

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -40,23 +40,6 @@
4040
<packaging>jar</packaging>
4141
<name>ArcadeDB End-to-End Tests</name>
4242

43-
<build>
44-
<plugins>
45-
<plugin>
46-
<groupId>org.apache.maven.plugins</groupId>
47-
<artifactId>maven-jar-plugin</artifactId>
48-
<executions>
49-
<execution>
50-
<goals>
51-
<goal>test-jar</goal>
52-
</goals>
53-
</execution>
54-
</executions>
55-
</plugin>
56-
57-
</plugins>
58-
</build>
59-
6043
<dependencies>
6144
<dependency>
6245
<groupId>org.junit.jupiter</groupId>

engine/src/main/java/com/arcadedb/GlobalConfiguration.java

Lines changed: 39 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -258,8 +258,9 @@ public Object call(final Object value) {
258258
Integer.class, 300),
259259

260260
SQL_PARSER_IMPLEMENTATION("arcadedb.sql.parserImplementation", SCOPE.DATABASE,
261-
"SQL parser implementation to use. 'antlr' (default) uses the new ANTLR4-based parser with improved error messages. " +
262-
"'javacc' uses the legacy JavaCC-based parser for backward compatibility.",
261+
"""
262+
SQL parser implementation to use. 'antlr' (default) uses the new ANTLR4-based parser with improved error messages. \
263+
'javacc' uses the legacy JavaCC-based parser for backward compatibility.""",
263264
String.class, "antlr", Set.of("antlr", "javacc")),
264265

265266
// OPENCYPHER
@@ -270,18 +271,21 @@ public Object call(final Object value) {
270271
"Maximum number of OpenCypher execution plans to keep in cache (frequency-based eviction)", Integer.class, 300),
271272

272273
OPENCYPHER_BULK_CREATE_BATCH_SIZE("arcadedb.opencypher.bulkCreateBatchSize", SCOPE.DATABASE,
273-
"Batch size for bulk CREATE operations. When a CREATE follows an UNWIND producing multiple rows, records are accumulated and created in batches to reduce transaction overhead. " +
274-
"Higher values improve performance but consume more memory. Default: 20000. Recommended range: 10000-100000. Set to 0 to disable batching.",
274+
"""
275+
Batch size for bulk CREATE operations. When a CREATE follows an UNWIND producing multiple rows, records are accumulated and created in batches to reduce transaction overhead. \
276+
Higher values improve performance but consume more memory. Default: 20000. Recommended range: 10000-100000. Set to 0 to disable batching.""",
275277
Integer.class, 20_000),
276278

277279
OPENCYPHER_LOAD_CSV_ALLOW_FILE_URLS("arcadedb.opencypher.loadCsv.allowFileUrls", SCOPE.DATABASE,
278-
"Allow LOAD CSV to access local files via file:/// URLs and bare file paths. "
279-
+ "Disable for security in multi-tenant server deployments.",
280+
"""
281+
Allow LOAD CSV to access local files via file:/// URLs and bare file paths. \
282+
Disable for security in multi-tenant server deployments.""",
280283
Boolean.class, true),
281284

282285
OPENCYPHER_LOAD_CSV_IMPORT_DIRECTORY("arcadedb.opencypher.loadCsv.importDirectory", SCOPE.DATABASE,
283-
"Root directory for LOAD CSV file:/// URLs. When set, file paths are resolved relative to this "
284-
+ "directory and path traversal (../) is blocked. Empty string means no restriction.",
286+
"""
287+
Root directory for LOAD CSV file:/// URLs. When set, file paths are resolved relative to this \
288+
directory and path traversal (../) is blocked. Empty string means no restriction.""",
285289
String.class, ""),
286290

287291
// COMMAND
@@ -292,9 +296,10 @@ public Object call(final Object value) {
292296
Integer.class, 100),
293297

294298
GREMLIN_ENGINE("arcadedb.gremlin.engine", SCOPE.DATABASE,
295-
"Gremlin engine to use. 'java' (default, secure) uses the native Gremlin parser - recommended for production. " +
296-
"'groovy' enables the legacy Groovy engine with security restrictions (use only if needed for compatibility). " +
297-
"'auto' attempts Java first, falls back to Groovy if needed (not recommended for security-critical deployments).",
299+
"""
300+
Gremlin engine to use. 'java' (default, secure) uses the native Gremlin parser - recommended for production. \
301+
'groovy' enables the legacy Groovy engine with security restrictions (use only if needed for compatibility). \
302+
'auto' attempts Java first, falls back to Groovy if needed (not recommended for security-critical deployments).""",
298303
String.class, "java", Set.of("auto", "groovy", "java")),
299304

300305
/**
@@ -320,10 +325,11 @@ This setting is intended as a safety measure against excessive resource consumpt
320325

321326
// INDEXES
322327
INDEX_BUILD_CHUNK_SIZE_MB("arcadedb.index.buildChunkSizeMB", SCOPE.DATABASE,
323-
"Size in MB for transaction chunks during bulk index creation with WAL disabled. " +
324-
"Larger chunks reduce commit overhead but use more memory. " +
325-
"Smaller chunks reduce memory pressure but add commit overhead. " +
326-
"Recommended: 50MB for typical workloads, 100MB for high-memory systems, 25MB for constrained environments.",
328+
"""
329+
Size in MB for transaction chunks during bulk index creation with WAL disabled. \
330+
Larger chunks reduce commit overhead but use more memory. \
331+
Smaller chunks reduce memory pressure but add commit overhead. \
332+
Recommended: 50MB for typical workloads, 100MB for high-memory systems, 25MB for constrained environments.""",
327333
Long.class, 50L),
328334

329335
INDEX_COMPACTION_RAM_MB("arcadedb.indexCompactionRAM", SCOPE.DATABASE, "Maximum amount of RAM to use for index compaction, in MB",
@@ -333,29 +339,33 @@ This setting is intended as a safety measure against excessive resource consumpt
333339
"Minimum number of mutable pages for an index to be schedule for automatic compaction. 0 = disabled", Integer.class, 10),
334340

335341
VECTOR_INDEX_LOCATION_CACHE_SIZE("arcadedb.vectorIndex.locationCacheSize", SCOPE.DATABASE,
336-
"Maximum number of vector locations to cache in memory per vector index. " +
337-
"Set to -1 for unlimited (backward compatible). " +
338-
"Each entry uses ~56 bytes. Recommended: 100000 for datasets with 1M+ vectors (~5.6MB), " +
339-
"-1 for smaller datasets.",
342+
"""
343+
Maximum number of vector locations to cache in memory per vector index. \
344+
Set to -1 for unlimited (backward compatible). \
345+
Each entry uses ~56 bytes. Recommended: 100000 for datasets with 1M+ vectors (~5.6MB), \
346+
-1 for smaller datasets.""",
340347
Integer.class, -1),
341348

342349
VECTOR_INDEX_GRAPH_BUILD_CACHE_SIZE("arcadedb.vectorIndex.graphBuildCacheSize", SCOPE.DATABASE,
343-
"Maximum number of vectors to cache in memory during HNSW graph building. " +
344-
"Higher values speed up construction but use more RAM. " +
345-
"RAM usage = cacheSize * (dimensions * 4 + 64) bytes. " +
346-
"Recommended: 100000 for 768-dim vectors (~30MB), scale based on dimensionality.",
350+
"""
351+
Maximum number of vectors to cache in memory during HNSW graph building. \
352+
Higher values speed up construction but use more RAM. \
353+
RAM usage = cacheSize * (dimensions * 4 + 64) bytes. \
354+
Recommended: 100000 for 768-dim vectors (~30MB), scale based on dimensionality.""",
347355
Integer.class, 100_000),
348356

349357
VECTOR_INDEX_MUTATIONS_BEFORE_REBUILD("arcadedb.vectorIndex.mutationsBeforeRebuild", SCOPE.DATABASE,
350-
"Number of mutations (inserts/updates/deletes) before rebuilding the HNSW graph index. " +
351-
"Higher values reduce rebuild cost but may return slightly stale results in queries. " +
352-
"Lower values provide fresher results but rebuild more frequently. " +
353-
"Recommended: 50-200 for read-heavy, 200-500 for write-heavy workloads.",
358+
"""
359+
Number of mutations (inserts/updates/deletes) before rebuilding the HNSW graph index. \
360+
Higher values reduce rebuild cost but may return slightly stale results in queries. \
361+
Lower values provide fresher results but rebuild more frequently. \
362+
Recommended: 50-200 for read-heavy, 200-500 for write-heavy workloads.""",
354363
Integer.class, 100),
355364

356365
VECTOR_INDEX_GRAPH_BUILD_DIAGNOSTICS("arcadedb.vectorIndex.graphBuildDiagnostics", SCOPE.DATABASE,
357-
"Enable diagnostic logging during vector graph build progress (heap/off-heap memory and index file sizes). " +
358-
"This provides visibility during graph construction; disable if any logging overhead is a concern.",
366+
"""
367+
Enable diagnostic logging during vector graph build progress (heap/off-heap memory and index file sizes). \
368+
This provides visibility during graph construction; disable if any logging overhead is a concern.""",
359369
Boolean.class, true),
360370

361371
// NETWORK

0 commit comments

Comments
 (0)