tinykvs

package module
v0.11.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 21, 2026 License: MIT Imports: 28 Imported by: 0

README

TinyKVS

CI Coverage Status Go Reference Go Report Card Quality Gate Status Snyk

A low-memory, sorted key-value store for Go built on LSM-tree architecture with configurable compression (zstd, snappy, or none).

Features

  • Sorted storage - Lexicographic key ordering, efficient range scans
  • Ultra-low memory - Runs 1B+ records on t4g.micro (1GB RAM) with swap
  • Configurable memory - Block cache, memtable size, bloom filters all tunable
  • Concurrent access - Concurrent reads and writes, optimized for read-heavy workloads
  • Durability - Write-ahead log with configurable sync modes
  • Compression - zstd (default), snappy, or none with configurable levels
  • Bloom filters - Fast negative lookups (can be disabled to save memory)

Installation

go get github.com/freeeve/tinykvs

Quick Start

package main

import (
    "fmt"
    "log"

    "github.com/freeeve/tinykvs"
)

func main() {
    // Open a store
    store, err := tinykvs.Open("/tmp/mydb", tinykvs.DefaultOptions("/tmp/mydb"))
    if err != nil {
        log.Fatal(err)
    }
    defer store.Close()

    // Write values
    store.PutString([]byte("name"), "Alice")
    store.PutInt64([]byte("age"), 30)
    store.PutFloat64([]byte("score"), 95.5)
    store.PutBool([]byte("active"), true)

    // Read values
    name, _ := store.GetString([]byte("name"))
    age, _ := store.GetInt64([]byte("age"))

    fmt.Printf("Name: %s, Age: %d\n", name, age)

    // Flush to disk
    store.Flush()
}

API

Store Operations
// Open or create a store
func Open(path string, opts Options) (*Store, error)

// Close the store
func (s *Store) Close() error

// Flush all data to disk
func (s *Store) Flush() error
Read/Write
// Generic value operations
func (s *Store) Put(key []byte, value Value) error
func (s *Store) Get(key []byte) (Value, error)
func (s *Store) Delete(key []byte) error

// Typed convenience methods
func (s *Store) PutString(key []byte, value string) error
func (s *Store) PutInt64(key []byte, value int64) error
func (s *Store) PutFloat64(key []byte, value float64) error
func (s *Store) PutBool(key []byte, value bool) error
func (s *Store) PutBytes(key []byte, value []byte) error

func (s *Store) GetString(key []byte) (string, error)
func (s *Store) GetInt64(key []byte) (int64, error)
func (s *Store) GetFloat64(key []byte) (float64, error)
func (s *Store) GetBool(key []byte) (bool, error)
func (s *Store) GetBytes(key []byte) ([]byte, error)

// Struct and map storage (uses msgpack internally)
func (s *Store) PutStruct(key []byte, v any) error
func (s *Store) GetStruct(key []byte, dest any) error
func (s *Store) PutMap(key []byte, fields map[string]any) error
func (s *Store) GetMap(key []byte) (map[string]any, error)

// JSON storage (stores as string, queryable in shell)
func (s *Store) PutJson(key []byte, v any) error
func (s *Store) GetJson(key []byte, dest any) error
Batch Operations
// Create a batch for atomic writes
batch := tinykvs.NewBatch()
batch.Put(key, value)
batch.PutString(key, "value")
batch.PutInt64(key, 42)
batch.PutStruct(key, myStruct)
batch.PutMap(key, map[string]any{"field": "value"})
batch.Delete(key)

// Apply atomically
store.WriteBatch(batch)
Range Scans
// Iterate over all keys with a given prefix (sorted order)
// Return false from callback to stop iteration
func (s *Store) ScanPrefix(prefix []byte, fn func(key []byte, value Value) bool) error
Value Types
type Value struct {
    Type    ValueType
    Int64   int64
    Float64 float64
    Bool    bool
    Bytes   []byte
    Record  map[string]any  // For struct/map storage
}

// Value constructors
func Int64Value(v int64) Value
func Float64Value(v float64) Value
func BoolValue(v bool) Value
func StringValue(v string) Value
func BytesValue(v []byte) Value
func RecordValue(v map[string]any) Value
Configuration
type Options struct {
    Dir              string          // Data directory
    MemtableSize     int64           // Max memtable size before flush (default: 4MB)
    BlockCacheSize   int64           // LRU cache size (default: 64MB, 0 to disable)
    BlockSize        int             // Target block size (default: 16KB)
    CompressionType  CompressionType // zstd, snappy, or none (default: zstd)
    CompressionLevel int             // zstd level 1-4 (default: 1 = fastest)
    BloomFPRate      float64         // Bloom filter false positive rate (default: 0.01)
    WALSyncMode      WALSyncMode     // WAL sync behavior
    VerifyChecksums  bool            // Verify on read (default: true)
}

// Compression types
const (
    CompressionZstd   // Default, good compression and speed
    CompressionSnappy // Faster, less compression
    CompressionNone   // No compression
)

// Preset configurations
func DefaultOptions(dir string) Options          // Balanced defaults
func LowMemoryOptions(dir string) Options        // Minimal memory (4MB memtable, no cache, no bloom)
func HighPerformanceOptions(dir string) Options  // Max throughput

Architecture

┌─────────────────────────────────────────────────────────┐
│                        Store                            │
├─────────────────────────────────────────────────────────┤
│  Write Path                    Read Path                │
│  ┌─────────┐                   ┌─────────────────────┐  │
│  │   WAL   │                   │ Memtable (newest)   │  │
│  └────┬────┘                   ├─────────────────────┤  │
│       │                        │ Immutable Memtables │  │
│       v                        ├─────────────────────┤  │
│  ┌─────────┐                   │ L0 SSTables         │  │
│  │Memtable │                   ├─────────────────────┤  │
│  └────┬────┘                   │ L1+ SSTables        │  │
│       │ flush                  └─────────┬───────────┘  │
│       v                                  │              │
│  ┌─────────┐    ┌───────────┐            │              │
│  │ SSTable │◄───│ LRU Cache │◄───────────┘              │
│  └─────────┘    └───────────┘                           │
│       │                                                 │
│       v compaction                                      │
│  ┌─────────────────────────────────────────────────┐    │
│  │ L0 → L1 → L2 → ... → L6 (leveled compaction)    │    │
│  └─────────────────────────────────────────────────┘    │
└─────────────────────────────────────────────────────────┘
Key Design Decisions
Component Choice Rationale
Compression zstd/snappy/none Configurable speed vs size tradeoff
I/O Explicit syscalls Control over caching
Index Sparse (per block) Low memory footprint
Compaction Leveled Read-optimized
Concurrency RWMutex Simple, read-optimized
L1+ Scans Lazy loading Only load tables when needed for LIMIT queries
SSTable Format
┌────────────────────────────────────┐
│ Data Block 0 (compressed)          │
│ Data Block 1                       │
│ ...                                │
│ Data Block N                       │
├────────────────────────────────────┤
│ Bloom Filter                       │
├────────────────────────────────────┤
│ Index Block (sparse)               │
├────────────────────────────────────┤
│ Metadata Block                     │
├────────────────────────────────────┤
│ Footer (64 bytes)                  │
└────────────────────────────────────┘
Version Compatibility

Store files are not compatible between minor versions (e.g., v0.3.x stores cannot be read by v0.4.x). If upgrading, export your data first or recreate the store.

Memory Usage

Component Memory
Block cache Configurable (default 64MB)
Memtable Configurable (default 4MB)
Bloom filters ~1.2MB per 1M keys
Sparse index ~140KB per 1M keys (with 16KB blocks)

For minimal memory (billions of records), use LowMemoryOptions():

  • 4MB memtable
  • No block cache
  • No bloom filters
  • Index: ~140MB for 1B keys

Performance

Apple M3 Max
Operation Latency Throughput
Sequential read 304 ns 3.3M ops/sec
Sequential write 465 ns 2.2M ops/sec
Mixed (80% read) 392 ns 2.6M ops/sec
SSTable read (cached) 300 ns 3.3M ops/sec

Block cache impact (random reads, 100K keys):

Cache Latency Hit Rate
0 MB 42 µs 0%
64 MB 300 ns 99.9%
AWS t4g.micro (1GB RAM, ARM64)

1 billion record benchmark with GOMEMLIMIT=700MiB:

zstd compression (default), 100M records

Operation Throughput
Sequential write 579K ops/sec
Random read (no cache) 16K ops/sec
Random read (64MB cache) 16K ops/sec
Full scan 1.4M keys/sec
Random prefix scan 15K scans/sec
Prefix scan with LIMIT 100 7K scans/sec

Prefix scans with LIMIT benefit from lazy loading: L1+ tables are sorted and non-overlapping, so only the tables actually needed are loaded.

Write time: ~3 min for 100M records, ~1.5h for 1B records

Memory usage during benchmark:

  • Heap: 50-200 MB
  • Sys: 450-700 MB
  • Index: ~35 MB (for 1B records)

Configuration:

  • 4MB memtable
  • 16KB block size
  • No block cache
  • No bloom filters
  • WAL sync disabled (for throughput)

Complexity

  • Writes: O(log n) memtable insert, sequential I/O for WAL
  • Reads: O(L × log n) where L is number of levels (max 7), bloom filters skip levels without matches
  • Space: Varies by data - sequential keys compress to ~0.1x with zstd, random data ~0.5-0.8x

Examples

Persistence and Recovery
// Data persists across restarts
store, _ := tinykvs.Open("/tmp/mydb", tinykvs.DefaultOptions("/tmp/mydb"))
store.PutString([]byte("key"), "value")
store.Flush() // Ensure durability
store.Close()

// Reopen - data is still there
store, _ = tinykvs.Open("/tmp/mydb", tinykvs.DefaultOptions("/tmp/mydb"))
val, _ := store.GetString([]byte("key"))
fmt.Println(val) // "value"
Low Memory Configuration
opts := tinykvs.LowMemoryOptions("/tmp/mydb")
opts.MemtableSize = 512 * 1024 // 512KB
store, _ := tinykvs.Open("/tmp/mydb", opts)
Low Memory (Billions of Records)

For running on memory-constrained systems like t4g.micro (1GB RAM) with billions of records:

// Use LowMemoryOptions: 4MB memtable, no cache, no bloom filters
opts := tinykvs.LowMemoryOptions("/data/mydb")
store, _ := tinykvs.Open("/data/mydb", opts)

// Combined with GOMEMLIMIT for Go runtime memory control:
// GOMEMLIMIT=600MiB ./myapp

This configuration can handle 1B+ records while staying within tight memory limits.

Prefix Scanning
// Store user data with prefixed keys
store.PutString([]byte("user:001:name"), "Alice")
store.PutInt64([]byte("user:001:age"), 30)
store.PutString([]byte("user:002:name"), "Bob")
store.PutInt64([]byte("user:002:age"), 25)

// Scan all keys for user:001
store.ScanPrefix([]byte("user:001:"), func(key []byte, value tinykvs.Value) bool {
    fmt.Printf("%s = %v\n", key, value)
    return true // continue scanning
})

// Scan all users (returns keys in sorted order)
store.ScanPrefix([]byte("user:"), func(key []byte, value tinykvs.Value) bool {
    fmt.Printf("%s\n", key)
    return true
})
Statistics
stats := store.Stats()
fmt.Printf("Memtable: %d bytes, %d keys\n", stats.MemtableSize, stats.MemtableCount)
fmt.Printf("Cache hit rate: %.1f%%\n", stats.CacheStats.HitRate())
for _, level := range stats.Levels {
    fmt.Printf("L%d: %d tables, %d keys\n", level.Level, level.NumTables, level.NumKeys)
}
Storing Structs and Maps

TinyKVS has built-in support for storing Go structs and maps using msgpack serialization:

type Address struct {
    City    string `msgpack:"city"`
    Country string `msgpack:"country"`
}

type User struct {
    Name    string  `msgpack:"name"`
    Email   string  `msgpack:"email"`
    Age     int     `msgpack:"age"`
    Address Address `msgpack:"address"`
}

// Store a struct
user := User{
    Name:    "Alice",
    Email:   "[email protected]",
    Age:     30,
    Address: Address{City: "NYC", Country: "USA"},
}
store.PutStruct([]byte("user:1"), user)

// Retrieve into a struct
var retrieved User
store.GetStruct([]byte("user:1"), &retrieved)

// Store a map directly
store.PutMap([]byte("config:app"), map[string]any{
    "debug":   true,
    "timeout": 30,
})

// Retrieve as map
config, _ := store.GetMap([]byte("config:app"))

Nested structs are fully supported and can be queried in the interactive shell.

JSON Storage

For human-readable storage or shell querying:

// Store as JSON string
store.PutJson([]byte("user:2"), User{Name: "Bob", Age: 25})

// Retrieve from JSON
var user User
store.GetJson([]byte("user:2"), &user)
Manual Serialization

For other formats (Gob, Protobuf, etc.), serialize to bytes:

// Gob
var buf bytes.Buffer
gob.NewEncoder(&buf).Encode(user)
store.PutBytes([]byte("user:1"), buf.Bytes())

// Protobuf
data, _ := proto.Marshal(user)
store.PutBytes([]byte("user:1"), data)

Interactive Shell

TinyKVS includes an interactive SQL-like shell for exploring and manipulating data:

go install github.com/freeeve/tinykvs/cmd/tinykvs@latest
tinykvs shell -dir /path/to/db

# Or use environment variable
export TINYKVS_STORE=/path/to/db
tinykvs shell

Results are displayed in a formatted table:

┌────────┬───────────────────────────┐
│ k      │ v                         │
├────────┼───────────────────────────┤
│ user:1 │ {"age":30,"name":"Alice"} │
│ user:2 │ {"age":25,"name":"Bob"}   │
└────────┴───────────────────────────┘
(2 rows) scanned 2 keys, 0 blocks, 0ms
SQL Commands
-- Query data
SELECT * FROM kv WHERE k = 'user:1'
SELECT * FROM kv WHERE k LIKE 'user:%'
SELECT * FROM kv WHERE k BETWEEN 'a' AND 'z' LIMIT 10
SELECT * FROM kv LIMIT 100

-- Extract record fields
SELECT v.name, v.age FROM kv WHERE k = 'user:1'
SELECT v.address.city FROM kv WHERE k = 'user:1'

-- ORDER BY (buffers results for sorting)
SELECT * FROM kv ORDER BY k DESC LIMIT 10
SELECT v.name, v.age FROM kv ORDER BY v.age DESC, v.name
SELECT * FROM kv WHERE k LIKE 'user:%' ORDER BY v.score LIMIT 100

-- Insert data (JSON auto-detected as records)
INSERT INTO kv VALUES ('user:1', '{"name":"Alice","age":30}')
INSERT INTO kv VALUES ('key', 'simple string value')
INSERT INTO kv VALUES ('bin', x'deadbeef')  -- hex bytes

-- Update and delete
UPDATE kv SET v = 'newvalue' WHERE k = 'key'
DELETE FROM kv WHERE k = 'key'
DELETE FROM kv WHERE k LIKE 'temp:%'
Shell Commands
\help, \h, \?      Show help
\stats             Show store statistics
\flush             Flush memtable to disk
\compact           Run compaction
\tables            Show table schema
\export <file>     Export to CSV
\import <file>     Import from CSV
\q, \quit          Exit shell
Binary Key Functions

The shell supports functions for constructing binary keys:

-- uint64_be(n) - 8-byte big-endian encoding
SELECT * FROM kv WHERE k = x'14' || uint64_be(28708)

-- uint64_le(n) - 8-byte little-endian encoding
-- uint32_be(n) - 4-byte big-endian encoding
-- uint32_le(n) - 4-byte little-endian encoding

-- byte(n) - single byte (0-255)
SELECT * FROM kv WHERE k = byte(0x14) || uint64_be(12345)

-- fnv64(s) - FNV-1a 64-bit hash of string
SELECT * FROM kv WHERE k LIKE byte(0x10) || fnv64('user-123') || '%'

-- Hex concatenation
SELECT * FROM kv WHERE k = x'14' || uint64_be(28708) || fnv64('item-456')

These are useful for querying data with composite binary keys.

CSV Import/Export

Export creates a simple key,value CSV:

key,value
user:1,{"name":"Alice","age":30}
counter,42
flag,true

Import auto-detects the format:

2 columns (key,value) - values auto-detect type:

key,value
user:1,hello
user:2,42
user:3,{"name":"Bob"}

3+ columns - first column is key, rest become record fields:

id,name,age,active
user:1,Alice,30,true
user:2,Bob,25,false

This creates records like {"name":"Alice","age":30,"active":true}

Type hints - prevent unwanted auto-detection (e.g., zip codes):

id,zip:string,count:int,price:float,active:bool,data:json
item:1,02134,100,19.99,true,{"x":1}

Supported hints: string, int, float, bool, json

Nested Field Access

Records with nested structures support dot notation for field access:

-- Given: {"name":"Alice","address":{"city":"NYC","geo":{"lat":40.7}}}

SELECT v.name FROM kv WHERE k = 'user:1'           -- Alice
SELECT v.address.city FROM kv WHERE k = 'user:1'   -- NYC
SELECT v.`address.geo.lat` FROM kv WHERE k = 'user:1'  -- 40.7 (3+ levels need backticks)
Streaming Aggregations

Aggregation functions compute results in a single pass with O(1) memory:

SELECT count() FROM kv                              -- count all rows
SELECT count() FROM kv WHERE k LIKE 'user:%'        -- count with filter
SELECT sum(v.age), avg(v.age) FROM kv               -- sum and average
SELECT min(v.score), max(v.score) FROM kv           -- min and max
SELECT count(), sum(v.price), avg(v.price) FROM kv  -- multiple aggregates
SELECT sum(v.stats.count) FROM kv                   -- nested fields work too

License

MIT

Documentation

Index

Constants

View Source
const (
	SSTableMagic   uint64 = 0x544B5653_00000001 // "TKVS" + version 1
	SSTableVersion uint32 = 1
)

SSTable magic number and version

View Source
const SSTableFooterSize = 64

SSTableFooterSize is the fixed size of the footer in bytes.

Variables

View Source
var (
	ErrInvalidManifest  = errors.New("invalid manifest file")
	ErrManifestCorrupt  = errors.New("manifest file corrupted")
	ErrManifestNotFound = errors.New("manifest file not found")
)
View Source
var (
	ErrStoreClosed     = errors.New("store is closed")
	ErrStoreLocked     = errors.New("store is locked by another process")
	ErrKeyExists       = errors.New("key already exists")
	ErrConditionFailed = errors.New("condition failed")
	ErrTypeMismatch    = errors.New("value type mismatch")
)

Errors

View Source
var (
	ErrKeyNotFound      = errors.New("key not found")
	ErrInvalidValue     = errors.New("invalid value encoding")
	ErrCorruptedData    = errors.New("corrupted data")
	ErrChecksumMismatch = errors.New("checksum mismatch")
)

Common errors

View Source
var (
	ErrInvalidSSTable = errors.New("invalid sstable format")
)

Errors

Functions

func BatchPutStruct added in v0.10.0

func BatchPutStruct[T any](b *Batch, key []byte, v *T) error

BatchPutStruct adds a struct to a batch using cached encoder.

func CompareKeys

func CompareKeys(a, b []byte) int

CompareKeys performs lexicographic comparison of two keys. Returns -1 if a < b, 0 if a == b, 1 if a > b. Uses bytes.Compare which is assembly-optimized on most platforms.

func DecodeJson added in v0.7.0

func DecodeJson(data []byte, dest any) error

DecodeJson decodes JSON bytes into the provided destination.

func DecodeMsgpack added in v0.7.0

func DecodeMsgpack(data []byte) (map[string]any, error)

DecodeMsgpack decodes msgpack bytes into a record map.

func EncodeEntry

func EncodeEntry(e Entry) []byte

EncodeEntry serializes an entry (key + value + sequence) to bytes.

func EncodeJson added in v0.7.0

func EncodeJson(data any) ([]byte, error)

EncodeJson encodes any value to JSON bytes.

func EncodeMsgpack added in v0.7.0

func EncodeMsgpack(record map[string]any) ([]byte, error)

EncodeMsgpack encodes a record map to msgpack bytes.

func EncodeValue

func EncodeValue(v Value) []byte

EncodeValue serializes a value to bytes.

func GetStruct added in v0.10.0

func GetStruct[T any](s *Store, key []byte) (*T, error)

GetStruct retrieves and decodes a struct using pre-registered decoder (faster than method version). Use this in hot paths where performance matters.

func GetStructInto added in v0.10.0

func GetStructInto[T any](s *Store, key []byte, dest *T) error

GetStructInto retrieves and decodes a struct into an existing pointer. Avoids allocation when you already have a destination.

func GetStructZeroCopy added in v0.10.0

func GetStructZeroCopy[T any](s *Store, key []byte, fn func(v *T) error) error

GetStructZeroCopy retrieves a struct with zero-copy strings. Strings are only valid within the callback - they point into the database buffer. This is the fastest way to read structs when you only need temporary access.

func NewLRUCache

func NewLRUCache(capacity int64) *lruCache

NewLRUCache creates a new LRU cache with the given capacity in bytes.

func NewSSTableWriter

func NewSSTableWriter(id uint32, path string, numKeys uint, opts Options) (*sstableWriter, error)

NewSSTableWriter creates a new SSTable writer with bloom filter enabled. For internal use, use newSSTableWriter which allows disabling bloom filters.

func PutStruct added in v0.10.0

func PutStruct[T any](s *Store, key []byte, v *T) error

PutStruct stores a struct using cached encoder.

func PutStructs added in v0.10.0

func PutStructs[T any](s *Store, items []KeyValue[T]) error

PutStructs writes multiple structs in parallel using cached encoder. Parallelizes encoding across CPU cores, then writes atomically.

func SSTableFilename added in v0.5.0

func SSTableFilename(id uint32) string

SSTableFilename returns the filename for an SSTable with the given ID.

func SSTablePath added in v0.5.0

func SSTablePath(dir string, id uint32) string

SSTablePath returns the path for an SSTable with the given ID.

func ScanPrefixJson added in v0.7.1

func ScanPrefixJson[T any](s *Store, prefix []byte, fn func(key []byte, val *T) bool) error

ScanPrefixJson scans keys with the given prefix and decodes JSON values into a struct.

func ScanPrefixStructs added in v0.7.1

func ScanPrefixStructs[T any](s *Store, prefix []byte, fn func(key []byte, val *T) bool) error

ScanPrefixStructs scans keys with the given prefix and decodes each value into a struct. Uses pre-registered decoder for best performance.

func ScanPrefixStructsZeroCopy added in v0.10.0

func ScanPrefixStructsZeroCopy[T any](s *Store, prefix []byte, fn func(key []byte, val *T) bool) error

ScanPrefixStructsZeroCopy scans keys with prefix using zero-copy decoding. Struct string fields are only valid within the callback - copy any you need.

func ScanRangeJson added in v0.7.1

func ScanRangeJson[T any](s *Store, start, end []byte, fn func(key []byte, val *T) bool) error

ScanRangeJson scans keys in [start, end) and decodes JSON values into a struct.

func ScanRangeStructs added in v0.7.1

func ScanRangeStructs[T any](s *Store, start, end []byte, fn func(key []byte, val *T) bool) error

ScanRangeStructs scans keys in [start, end) and decodes each value into a struct. Uses pre-registered decoder for best performance.

func ScanRangeStructsZeroCopy added in v0.10.0

func ScanRangeStructsZeroCopy[T any](s *Store, start, end []byte, fn func(key []byte, val *T) bool) error

ScanRangeStructsZeroCopy scans keys in [start, end) using zero-copy decoding.

Types

type AggregateResult added in v0.7.0

type AggregateResult struct {
	Count int64
	Sum   float64
	Min   float64
	Max   float64
	// contains filtered or unexported fields
}

AggregateResult holds the result of an aggregation query.

func (AggregateResult) Avg added in v0.7.0

func (r AggregateResult) Avg() float64

Avg returns the average, or 0 if no values.

type Batch added in v0.6.0

type Batch struct {
	// contains filtered or unexported fields
}

Batch accumulates multiple operations to be applied atomically.

func NewBatch added in v0.6.0

func NewBatch() *Batch

NewBatch creates a new batch for atomic writes.

func (*Batch) Delete added in v0.6.0

func (b *Batch) Delete(key []byte)

Delete adds a delete operation to the batch.

func (*Batch) Len added in v0.6.0

func (b *Batch) Len() int

Len returns the number of operations in the batch.

func (*Batch) Put added in v0.6.0

func (b *Batch) Put(key []byte, value Value)

Put adds a put operation to the batch.

func (*Batch) PutBytes added in v0.6.0

func (b *Batch) PutBytes(key, value []byte)

PutBytes adds a bytes put operation.

func (*Batch) PutInt64 added in v0.6.0

func (b *Batch) PutInt64(key []byte, value int64)

PutInt64 adds an int64 put operation.

func (*Batch) PutJson added in v0.7.0

func (b *Batch) PutJson(key []byte, data any) error

PutJson adds a JSON string put operation.

func (*Batch) PutMap added in v0.7.0

func (b *Batch) PutMap(key []byte, fields map[string]any) error

PutMap adds a record put operation.

func (*Batch) PutString added in v0.6.0

func (b *Batch) PutString(key []byte, value string)

PutString adds a string put operation.

func (*Batch) Reset added in v0.6.0

func (b *Batch) Reset()

Reset clears the batch for reuse.

type Block

type Block struct {
	Type    uint8
	Entries []BlockEntry
	// contains filtered or unexported fields
}

Block represents a decompressed data block.

func DecodeBlock

func DecodeBlock(data []byte, verifyChecksum bool) (*Block, error)

DecodeBlock decompresses and parses a block. The returned Block's entries reference the internal decompressed buffer, so the Block should not be modified and is only valid while cached. Call Block.Release() when done to return the buffer to the pool.

func (*Block) DecRef added in v0.11.1

func (b *Block) DecRef() bool

DecRef decrements the block's reference count. When the count reaches zero, the buffer is returned to the pool. Returns true if the block was released (refcount hit 0).

func (*Block) IncRef added in v0.11.1

func (b *Block) IncRef()

IncRef increments the block's reference count. Call this when taking a reference to a cached block.

func (*Block) RefCount added in v0.11.1

func (b *Block) RefCount() int32

RefCount returns the current reference count.

func (*Block) Release added in v0.4.0

func (b *Block) Release()

Release returns the block's buffer to the pool. Deprecated: Use DecRef for reference-counted blocks. This method is kept for non-cached blocks that aren't reference counted.

type BlockEntry

type BlockEntry struct {
	Key   []byte
	Value []byte // Encoded value bytes
}

BlockEntry represents a key-value pair within a block.

type BloomFilter

type BloomFilter struct {
	// contains filtered or unexported fields
}

BloomFilter wraps a bloom filter with serialization.

func DeserializeBloomFilter

func DeserializeBloomFilter(data []byte) (*BloomFilter, error)

DeserializeBloomFilter recreates a bloom filter from bytes.

func NewBloomFilter

func NewBloomFilter(numKeys uint, fpRate float64) *BloomFilter

NewBloomFilter creates a bloom filter for the expected number of keys.

func (*BloomFilter) Add

func (bf *BloomFilter) Add(key []byte)

Add adds a key to the bloom filter.

func (*BloomFilter) MayContain

func (bf *BloomFilter) MayContain(key []byte) bool

MayContain returns true if the key might be in the set. False positives are possible, but false negatives are not.

func (*BloomFilter) Serialize

func (bf *BloomFilter) Serialize() ([]byte, error)

Serialize encodes the bloom filter for storage.

type CacheStats

type CacheStats struct {
	Hits     uint64
	Misses   uint64
	Size     int64
	Capacity int64
	Entries  int
}

CacheStats contains cache statistics.

func (CacheStats) HitRate

func (s CacheStats) HitRate() float64

HitRate returns the cache hit rate as a percentage.

type CompactionStyle

type CompactionStyle int

CompactionStyle determines the compaction strategy.

const (
	// CompactionStyleLeveled uses leveled compaction (read-optimized).
	CompactionStyleLeveled CompactionStyle = iota
	// CompactionStyleSizeTiered uses size-tiered compaction (write-optimized).
	CompactionStyleSizeTiered
)

type CompressionType added in v0.4.0

type CompressionType int

CompressionType determines the compression algorithm.

const (
	// CompressionZstd uses zstd compression (good compression, fast).
	CompressionZstd CompressionType = iota
	// CompressionSnappy uses snappy compression (faster, less compression).
	CompressionSnappy
	// CompressionNone disables compression.
	CompressionNone
	// CompressionMinLZ uses minlz compression (very fast, good compression).
	CompressionMinLZ
)

type Entry

type Entry struct {
	Key      []byte
	Value    Value
	Sequence uint64 // Monotonic sequence number for ordering
}

Entry represents a key-value pair with metadata.

func DecodeEntry

func DecodeEntry(data []byte) (Entry, int, error)

DecodeEntry deserializes an entry from bytes.

type Index

type Index struct {
	Entries []IndexEntry
	MinKey  []byte
	MaxKey  []byte
	NumKeys uint64
}

Index provides efficient key lookup within an SSTable.

func DeserializeIndex

func DeserializeIndex(data []byte) (*Index, error)

DeserializeIndex recreates an index from bytes.

func (*Index) MemorySize added in v0.4.0

func (idx *Index) MemorySize() int64

MemorySize returns the exact memory usage of this index in bytes.

func (*Index) Search

func (idx *Index) Search(key []byte) int

Search finds the block that may contain the key. Returns the index of the block, or -1 if key is out of range.

func (*Index) Serialize

func (idx *Index) Serialize() []byte

Serialize encodes the index for storage.

type IndexEntry

type IndexEntry struct {
	Key         []byte // First key in the block (separator key)
	BlockOffset uint64 // File offset to the block
	BlockSize   uint32 // Size of the compressed block
}

IndexEntry represents a sparse index entry pointing to a data block.

type KeyLocation added in v0.6.0

type KeyLocation struct {
	Level   int
	TableID uint32
}

KeyLocation describes where a key is stored.

type KeyValue added in v0.10.0

type KeyValue[T any] struct {
	Key   []byte
	Value *T
}

KeyValue pairs a key with a struct value for bulk operations.

type LevelStats

type LevelStats struct {
	Level     int
	NumTables int
	Size      int64
	NumKeys   uint64
}

LevelStats contains statistics for a single level.

type Manifest added in v0.5.0

type Manifest struct {
	// contains filtered or unexported fields
}

Manifest tracks all SSTables and their metadata. It's an append-only log that allows fast store recovery without reading every SSTable file.

func OpenManifest added in v0.5.0

func OpenManifest(path string) (*Manifest, error)

OpenManifest opens or creates a manifest file.

func (*Manifest) AddTable added in v0.5.0

func (m *Manifest) AddTable(meta *TableMeta) error

AddTable records a new SSTable in the manifest.

func (*Manifest) Close added in v0.5.0

func (m *Manifest) Close() error

Close closes the manifest file.

func (*Manifest) DeleteTables added in v0.5.0

func (m *Manifest) DeleteTables(ids []uint32) error

DeleteTables records removal of multiple SSTables atomically.

func (*Manifest) MaxID added in v0.5.0

func (m *Manifest) MaxID() uint32

MaxID returns the highest SSTable ID seen.

func (*Manifest) Tables added in v0.5.0

func (m *Manifest) Tables() map[uint32]*TableMeta

Tables returns all tracked tables.

type Options

type Options struct {
	// Dir is the base directory for all data files.
	Dir string

	// MemtableSize is the max memtable size in bytes before flush.
	// Default: 4MB
	MemtableSize int64

	// BlockCacheSize is the LRU cache size in bytes.
	// Set to 0 for minimal memory usage (no caching).
	// Default: 64MB
	BlockCacheSize int64

	// BlockSize is the target block size before compression.
	// Default: 16KB
	BlockSize int

	// CompressionType determines which compression algorithm to use.
	// Default: CompressionMinLZ
	CompressionType CompressionType

	// CompressionLevel is the compression level (ignored for snappy/none).
	// For zstd: 1 = fastest, 3 = default, higher = better compression.
	// For minlz: 1 = fastest, 2 = balanced, 3 = smallest.
	// Default: 1 (fastest)
	CompressionLevel int

	// BloomFPRate is the target false positive rate for bloom filters.
	// Default: 0.01 (1%)
	BloomFPRate float64

	// CompactionStyle determines the compaction strategy.
	// Default: CompactionStyleLeveled
	CompactionStyle CompactionStyle

	// L0CompactionTrigger is the number of L0 files that triggers compaction.
	// Default: 4
	L0CompactionTrigger int

	// L0CompactionBatchSize limits how many L0 files to compact at once.
	// Lower values mean faster individual compactions but more total compactions.
	// Default: 0 (no limit - compact all L0 files)
	L0CompactionBatchSize int

	// MaxLevels is the maximum number of LSM levels.
	// Default: 7
	MaxLevels int

	// LevelSizeMultiplier is the size ratio between adjacent levels.
	// Default: 10
	LevelSizeMultiplier int

	// L1MaxSize is the maximum size of L1 level in bytes.
	// L2+ sizes are calculated as L1MaxSize * LevelSizeMultiplier^(level-1).
	// Default: 10MB (0 = use default)
	L1MaxSize int64

	// WALSyncMode determines when WAL is synced to disk.
	// Default: WALSyncPerBatch
	WALSyncMode WALSyncMode

	// FlushInterval is the automatic flush interval.
	// Default: 30s
	FlushInterval time.Duration

	// CompactionInterval is how often to check for compaction.
	// Default: 1s
	CompactionInterval time.Duration

	// VerifyChecksums enables checksum verification on reads.
	// Default: true
	VerifyChecksums bool

	// DisableBloomFilter disables bloom filters for minimal memory.
	// Default: false
	DisableBloomFilter bool
}

Options configures the Store behavior.

func DefaultOptions

func DefaultOptions(dir string) Options

DefaultOptions returns production-ready defaults for the given directory.

func HighPerformanceOptions

func HighPerformanceOptions(dir string) Options

HighPerformanceOptions returns options optimized for performance.

func LowMemoryOptions

func LowMemoryOptions(dir string) Options

LowMemoryOptions returns options for memory-constrained environments. Suitable for running billions of records on systems with <1GB RAM.

type PrefixTableInfo added in v0.9.1

type PrefixTableInfo struct {
	Level      int
	TableID    uint32
	MinKey     []byte
	MaxKey     []byte
	NumKeys    uint64
	InRange    bool   // Prefix is within [MinKey, MaxKey]
	HasMatch   bool   // Table actually contains keys with this prefix
	FirstMatch []byte // First matching key (if HasMatch)
}

PrefixTableInfo describes an SSTable's relationship to a prefix.

type SSTable

type SSTable struct {
	ID          uint32
	Path        string
	Level       int
	Footer      SSTableFooter
	Meta        SSTableMeta
	Index       *Index
	BloomFilter *BloomFilter
	// contains filtered or unexported fields
}

SSTable represents an on-disk sorted string table.

func OpenSSTable

func OpenSSTable(id uint32, path string) (*SSTable, error)

OpenSSTable opens an existing SSTable file (eagerly loads index and bloom filter).

func OpenSSTableFromManifest added in v0.5.0

func OpenSSTableFromManifest(meta *TableMeta, dir string) (*SSTable, error)

OpenSSTableFromManifest opens an SSTable using metadata from the manifest. Index and bloom filter are loaded lazily on first access.

func (*SSTable) Close

func (sst *SSTable) Close() error

Close closes the SSTable file.

func (*SSTable) DecRef added in v0.10.1

func (sst *SSTable) DecRef()

DecRef decrements the reference count. If the count reaches zero and the table is marked for removal, the file is closed and removed.

func (*SSTable) Get

func (sst *SSTable) Get(key []byte, cache *lruCache, verifyChecksum bool) (Entry, bool, error)

Get retrieves a value from the SSTable. Returns the entry, whether it was found, and any error.

func (*SSTable) IncRef added in v0.10.1

func (sst *SSTable) IncRef()

IncRef increments the reference count. Call this before using the SSTable in a goroutine that may outlive the current scope.

func (*SSTable) MarkForRemoval added in v0.10.1

func (sst *SSTable) MarkForRemoval()

MarkForRemoval marks this SSTable for removal after compaction. The file will be closed when all references are released.

func (*SSTable) MaxKey

func (sst *SSTable) MaxKey() []byte

MaxKey returns the maximum key in this SSTable.

func (*SSTable) MemorySize added in v0.4.0

func (sst *SSTable) MemorySize() int64

MemorySize returns the in-memory size (index + bloom filter).

func (*SSTable) MinKey

func (sst *SSTable) MinKey() []byte

MinKey returns the minimum key in this SSTable.

func (*SSTable) Size

func (sst *SSTable) Size() int64

Size returns the file size in bytes.

type SSTableFooter

type SSTableFooter struct {
	BloomOffset   uint64 // Offset to bloom filter block
	BloomSize     uint32 // Size of bloom filter block
	IndexOffset   uint64 // Offset to index block
	IndexSize     uint32 // Size of index block
	MetaOffset    uint64 // Offset to metadata block
	MetaSize      uint32 // Size of metadata block
	NumDataBlocks uint32 // Number of data blocks
	NumKeys       uint64 // Total number of keys
	FileSize      uint64 // Total file size for validation
	Magic         uint64 // Magic number for validation
}

SSTableFooter is the fixed-size footer at the end of each SSTable.

type SSTableMeta

type SSTableMeta struct {
	Level         int
	MinSequence   uint64
	MaxSequence   uint64
	NumTombstones uint64
	CreatedAt     int64
}

SSTableMeta contains metadata about the SSTable.

type ScanProgress added in v0.8.0

type ScanProgress func(stats ScanStats) bool

ScanProgress is called periodically during scan operations to report progress. Return false to stop the scan early.

type ScanStats added in v0.8.0

type ScanStats struct {
	BlocksLoaded   int64 // Number of SSTable blocks accessed (disk + cache)
	BlocksCacheHit int64 // Number of blocks served from cache
	BlocksDiskRead int64 // Number of blocks read from disk
	KeysExamined   int64 // Total keys examined (including duplicates and tombstones)
	TablesChecked  int64 // Number of SSTables checked for prefix
	TablesAdded    int64 // Number of SSTables added to scanner (had matching entries)
}

ScanStats tracks statistics during scan operations.

type Store

type Store struct {
	// contains filtered or unexported fields
}

Store is the main key-value store.

func Open

func Open(path string, opts Options) (*Store, error)

Open opens or creates a store at the given path.

func (*Store) Aggregate added in v0.7.0

func (s *Store) Aggregate(prefix []byte, field string) (AggregateResult, error)

Aggregate computes multiple aggregations in a single scan. For simple numeric values, pass empty string for field.

func (*Store) Avg added in v0.7.0

func (s *Store) Avg(prefix []byte, field string) (float64, error)

Avg returns the average of a numeric field across all records matching the prefix.

func (*Store) Close

func (s *Store) Close() error

Close closes the store.

func (*Store) Compact

func (s *Store) Compact() error

Compact forces compaction of all L0 tables to L1. This makes reads faster by converting overlapping L0 tables to disjoint L1 tables.

func (*Store) Count added in v0.7.0

func (s *Store) Count(prefix []byte) (int64, error)

Count returns the number of keys matching the prefix.

func (*Store) Delete

func (s *Store) Delete(key []byte) error

Delete removes a key.

func (*Store) DeletePrefix added in v0.6.0

func (s *Store) DeletePrefix(prefix []byte) (int64, error)

DeletePrefix deletes all keys with the given prefix. Returns the number of keys deleted.

func (*Store) DeleteRange added in v0.6.0

func (s *Store) DeleteRange(start, end []byte) (int64, error)

DeleteRange deletes all keys in the range [start, end). This is more efficient than deleting keys one by one. Returns the number of keys deleted.

func (*Store) ExplainPrefix added in v0.9.1

func (s *Store) ExplainPrefix(prefix []byte) []PrefixTableInfo

ExplainPrefix returns information about which tables contain a given prefix.

func (*Store) FindKey added in v0.6.0

func (s *Store) FindKey(key []byte) *KeyLocation

FindKey returns the location of a key, or nil if in memtable or not found.

func (*Store) Flush

func (s *Store) Flush() error

Flush forces all data to disk.

func (*Store) Get

func (s *Store) Get(key []byte) (Value, error)

Get retrieves a value by key.

func (*Store) GetBool

func (s *Store) GetBool(key []byte) (bool, error)

GetBool retrieves a bool value by key.

func (*Store) GetBytes

func (s *Store) GetBytes(key []byte) ([]byte, error)

GetBytes retrieves a byte slice value by key.

func (*Store) GetFloat64

func (s *Store) GetFloat64(key []byte) (float64, error)

GetFloat64 retrieves a float64 value by key.

func (*Store) GetInt64

func (s *Store) GetInt64(key []byte) (int64, error)

GetInt64 retrieves an int64 value by key.

func (*Store) GetJson added in v0.7.0

func (s *Store) GetJson(key []byte, dest any) error

GetJson retrieves a JSON string and decodes it into the provided destination.

func (*Store) GetMap added in v0.7.0

func (s *Store) GetMap(key []byte) (map[string]any, error)

GetMap retrieves a structured record by key.

func (*Store) GetMapZeroCopy added in v0.10.0

func (s *Store) GetMapZeroCopy(key []byte, fn func(m map[string]any) error) error

GetMapZeroCopy retrieves a map with zero-copy strings. Strings are only valid within the callback - they point into the database buffer. Copy any strings you need to retain before the callback returns.

func (*Store) GetString

func (s *Store) GetString(key []byte) (string, error)

GetString retrieves a string value by key.

func (*Store) Increment added in v0.6.0

func (s *Store) Increment(key []byte, delta int64) (int64, error)

Increment atomically adds delta to an int64 value and returns the new value. If key doesn't exist, it's treated as 0. Returns error if existing value is not an int64.

func (*Store) Max added in v0.7.0

func (s *Store) Max(prefix []byte, field string) (float64, error)

Max returns the maximum of a numeric field across all records matching the prefix.

func (*Store) Min added in v0.7.0

func (s *Store) Min(prefix []byte, field string) (float64, error)

Min returns the minimum of a numeric field across all records matching the prefix.

func (*Store) Put

func (s *Store) Put(key []byte, value Value) error

Put stores a key-value pair.

func (*Store) PutBool

func (s *Store) PutBool(key []byte, value bool) error

PutBool stores a bool value.

func (*Store) PutBytes

func (s *Store) PutBytes(key, value []byte) error

PutBytes stores a byte slice value.

func (*Store) PutFloat64

func (s *Store) PutFloat64(key []byte, value float64) error

PutFloat64 stores a float64 value.

func (*Store) PutIfEquals added in v0.6.0

func (s *Store) PutIfEquals(key []byte, value Value, expected Value) error

PutIfEquals stores a value only if the current value equals expected. Returns ErrConditionFailed if values don't match, ErrKeyNotFound if key doesn't exist.

func (*Store) PutIfNotExists added in v0.6.0

func (s *Store) PutIfNotExists(key []byte, value Value) error

PutIfNotExists stores a value only if the key doesn't exist. Returns ErrKeyExists if the key already exists.

func (*Store) PutInt64

func (s *Store) PutInt64(key []byte, value int64) error

PutInt64 stores an int64 value.

func (*Store) PutJson added in v0.7.0

func (s *Store) PutJson(key []byte, data any) error

PutJson stores a record as a JSON string. Use this when you want human-readable storage instead of binary msgpack.

func (*Store) PutMap added in v0.7.0

func (s *Store) PutMap(key []byte, fields map[string]any) error

PutMap stores a structured record with named fields.

func (*Store) PutString

func (s *Store) PutString(key []byte, value string) error

PutString stores a string value.

func (*Store) ScanPrefix

func (s *Store) ScanPrefix(prefix []byte, fn func(key []byte, value Value) bool) error

ScanPrefix iterates over all keys with the given prefix in sorted order. The callback receives the key and value bytes directly (zero-copy). Return false from the callback to stop iteration. Keys are deduplicated (newest version wins) and tombstones are skipped.

func (*Store) ScanPrefixMaps added in v0.7.1

func (s *Store) ScanPrefixMaps(prefix []byte, fn func(key []byte, m map[string]any) bool) error

ScanPrefixMaps scans keys with the given prefix and decodes each value as a map.

func (*Store) ScanPrefixMapsZeroCopy added in v0.10.0

func (s *Store) ScanPrefixMapsZeroCopy(prefix []byte, fn func(key []byte, m map[string]any) bool) error

ScanPrefixMapsZeroCopy scans keys with prefix using zero-copy decoding. Map strings are only valid within the callback - copy any you need to retain.

func (*Store) ScanPrefixWithStats added in v0.8.0

func (s *Store) ScanPrefixWithStats(prefix []byte, fn func(key []byte, value Value) bool, progress ScanProgress) (ScanStats, error)

ScanPrefixWithStats is like ScanPrefix but also returns scan statistics. The progress callback (if non-nil) is called periodically during the scan.

func (*Store) ScanRange added in v0.7.0

func (s *Store) ScanRange(start, end []byte, fn func(key []byte, value Value) bool) error

ScanRange iterates over all keys in the range [start, end) in sorted order. The callback receives the key and value bytes directly (zero-copy). Return false from the callback to stop iteration.

func (*Store) ScanRangeMaps added in v0.7.1

func (s *Store) ScanRangeMaps(start, end []byte, fn func(key []byte, m map[string]any) bool) error

ScanRangeMaps scans keys in [start, end) and decodes each value as a map.

func (*Store) Stats

func (s *Store) Stats() StoreStats

Stats returns store statistics.

func (*Store) Sum added in v0.7.0

func (s *Store) Sum(prefix []byte, field string) (float64, error)

Sum returns the sum of a numeric field across all records matching the prefix. For simple numeric values (no field), pass empty string for field.

func (*Store) Sync added in v0.8.3

func (s *Store) Sync() error

Sync ensures all written data is durable by syncing the WAL. This is faster than Flush() because it doesn't create an SSTable. Data remains in the memtable and will be recovered from WAL on restart. Use this for frequent durability checkpoints; use Flush() less frequently to convert memtable data to SSTables.

Sync does not block on pending writes - it syncs whatever is currently in the WAL buffer. This allows Sync to proceed even during backpressure.

func (*Store) WriteBatch added in v0.6.0

func (s *Store) WriteBatch(batch *Batch) error

WriteBatch atomically applies all operations in the batch. All operations are written to wal together before applying to memtable.

type StoreStats

type StoreStats struct {
	MemtableSize  int64
	MemtableCount int64
	IndexMemory   int64 // Total in-memory size of indexes and bloom filters
	CacheStats    CacheStats
	Levels        []LevelStats
}

StoreStats contains store statistics.

type TableMeta added in v0.5.0

type TableMeta struct {
	ID       uint32
	Level    int
	MinKey   []byte
	MaxKey   []byte
	NumKeys  uint64
	FileSize int64

	// Offsets for lazy loading (relative to SSTable file)
	IndexOffset uint64
	IndexSize   uint32
	BloomOffset uint64
	BloomSize   uint32
}

TableMeta contains SSTable metadata stored in the manifest. This allows opening the store without reading each SSTable's index.

type Value

type Value struct {
	Type ValueType

	// Inline storage for primitives
	Int64   int64
	Float64 float64
	Bool    bool

	// For strings/bytes - either inline data or pointer to block
	Bytes   []byte       // Used for inline storage (small values)
	Pointer *dataPointer // Used for large values stored in data blocks

	// For records - map of field name to value
	Record map[string]any // Used for ValueTypeRecord
}

Value represents a typed value in the store.

func BoolValue

func BoolValue(v bool) Value

BoolValue creates a Value containing a bool.

func BytesValue

func BytesValue(v []byte) Value

BytesValue creates a Value containing bytes.

func DecodeValue

func DecodeValue(data []byte) (Value, int, error)

DecodeValue deserializes a value from bytes. Returns the value and number of bytes consumed.

func DecodeValueZeroCopy

func DecodeValueZeroCopy(data []byte) (Value, int, error)

DecodeValueZeroCopy deserializes a value without copying byte data. The returned Value's Bytes field points into the input data slice. Caller must ensure data outlives the returned Value. This is faster but the Value is only valid while data is valid.

func Float64Value

func Float64Value(v float64) Value

Float64Value creates a Value containing a float64.

func Int64Value

func Int64Value(v int64) Value

Int64Value creates a Value containing an int64.

func MsgpackValue added in v0.7.0

func MsgpackValue(data []byte) Value

MsgpackValue creates a Value containing raw msgpack bytes. This is more efficient than RecordValue for storing structs.

func RecordValue added in v0.7.0

func RecordValue(fields map[string]any) Value

RecordValue creates a Value containing a structured record.

func StringValue

func StringValue(v string) Value

StringValue creates a Value containing a string.

func TombstoneValue

func TombstoneValue() Value

TombstoneValue creates a tombstone Value for deletions.

func (*Value) EncodedSize

func (v *Value) EncodedSize() int

EncodedSize returns the serialized size of a value.

func (Value) GetBytes

func (v Value) GetBytes() []byte

GetBytes returns the byte slice for string/bytes values.

func (Value) IsTombstone

func (v Value) IsTombstone() bool

IsTombstone returns true if this value represents a deletion.

func (Value) String

func (v Value) String() string

String returns the string representation for string values.

type ValueType

type ValueType uint8

ValueType represents the type of stored value.

const (
	ValueTypeInt64 ValueType = iota + 1
	ValueTypeFloat64
	ValueTypeBool
	ValueTypeString
	ValueTypeBytes
	ValueTypeTombstone // Special type for deletions
	ValueTypeRecord    // Structured record with named fields
	ValueTypeMsgpack   // Raw msgpack bytes for efficient struct storage
)

type WALSyncMode

type WALSyncMode int

WALSyncMode determines when WAL is synced to disk.

const (
	// WALSyncNone never syncs. Fastest but may lose data on crash.
	WALSyncNone WALSyncMode = iota
	// WALSyncPerBatch syncs after each batch of writes. Good balance.
	WALSyncPerBatch
	// WALSyncPerWrite syncs after each write. Slowest but safest.
	WALSyncPerWrite
)

Directories

Path Synopsis
cmd
tinykvs command
tinykvs-bench command

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL