|
@@ -4,6 +4,7 @@ import (
|
|
|
"fmt"
|
|
"fmt"
|
|
|
"math/rand"
|
|
"math/rand"
|
|
|
"os"
|
|
"os"
|
|
|
|
|
+ "runtime"
|
|
|
"sync"
|
|
"sync"
|
|
|
"sync/atomic"
|
|
"sync/atomic"
|
|
|
"time"
|
|
"time"
|
|
@@ -12,37 +13,78 @@ import (
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
const (
|
|
|
- TotalKeys = 100000
|
|
|
|
|
- UpdateCount = 10000
|
|
|
|
|
- DeleteCount = 10000
|
|
|
|
|
- QueryCount = 100000
|
|
|
|
|
- DataDir = "bench_db_data"
|
|
|
|
|
- ValueBaseSize = 32 // Base size for values
|
|
|
|
|
|
|
+ TotalKeys = 100000
|
|
|
|
|
+ DataDir = "bench_db_data"
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
func main() {
|
|
func main() {
|
|
|
|
|
+ // Scenario: Key Query Performance and Correctness
|
|
|
|
|
+ fmt.Println("==================================================")
|
|
|
|
|
+ fmt.Println("SCENARIO: Key Query Performance (Flat Array Index)")
|
|
|
|
|
+ fmt.Println("==================================================")
|
|
|
|
|
+
|
|
|
|
|
+ // Value Index Disabled
|
|
|
|
|
+ runBenchmark(false)
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func runBenchmark(enableValueIndex bool) {
|
|
|
// Clean up previous run
|
|
// Clean up previous run
|
|
|
os.RemoveAll(DataDir)
|
|
os.RemoveAll(DataDir)
|
|
|
|
|
|
|
|
- fmt.Printf("Initializing DB Engine in %s...\n", DataDir)
|
|
|
|
|
- e, err := db.NewEngine(DataDir)
|
|
|
|
|
|
|
+ fmt.Printf("Initializing DB Engine in %s (ValueIndex=%v)...\n", DataDir, enableValueIndex)
|
|
|
|
|
+
|
|
|
|
|
+ startMem := getMemUsage()
|
|
|
|
|
+
|
|
|
|
|
+ e, err := db.NewEngine(DataDir, db.WithValueIndex(enableValueIndex))
|
|
|
if err != nil {
|
|
if err != nil {
|
|
|
panic(err)
|
|
panic(err)
|
|
|
}
|
|
}
|
|
|
defer e.Close()
|
|
defer e.Close()
|
|
|
|
|
+ defer os.RemoveAll(DataDir) // Cleanup after run
|
|
|
|
|
|
|
|
- // 1. Bulk Insert 100k keys
|
|
|
|
|
- fmt.Println("\n--- Phase 1: Bulk Insert 100k Keys ---")
|
|
|
|
|
|
|
+ // 1. Bulk Insert 100k keys with varied segments (1 to 5)
|
|
|
|
|
+ fmt.Println("\n--- Phase 1: Bulk Insert 100k Keys (Varied Segments) ---")
|
|
|
keys := make([]string, TotalKeys)
|
|
keys := make([]string, TotalKeys)
|
|
|
start := time.Now()
|
|
start := time.Now()
|
|
|
|
|
|
|
|
- // Pre-generate keys to avoid benchmark overhead
|
|
|
|
|
|
|
+ // Generate keys with 5 patterns
|
|
|
|
|
+ // Pattern 1 (Len 1): item.<id> (20k)
|
|
|
|
|
+ // Pattern 2 (Len 2): user.<id>.profile (20k)
|
|
|
|
|
+ // Pattern 3 (Len 3): team.<team_id>.user.<user_id> (20k) -> 200 users per team
|
|
|
|
|
+ // Pattern 4 (Len 4): region.<r>.zone.<z>.node.<n> (20k) -> 100 zones, 200 nodes
|
|
|
|
|
+ // Pattern 5 (Len 5): a.b.c.d.<id> (20k)
|
|
|
|
|
+
|
|
|
for i := 0; i < TotalKeys; i++ {
|
|
for i := 0; i < TotalKeys; i++ {
|
|
|
- keys[i] = fmt.Sprintf("bench.key.%d", i)
|
|
|
|
|
|
|
+ group := i % 5
|
|
|
|
|
+ id := i / 5
|
|
|
|
|
+
|
|
|
|
|
+ switch group {
|
|
|
|
|
+ case 0:
|
|
|
|
|
+ // Len 1: item.<id>
|
|
|
|
|
+ // But to test prefix well, let's group them slightly?
|
|
|
|
|
+ // Actually "item.1" is 2 segments if split by dot.
|
|
|
|
|
+ // User said "one to 5 segments".
|
|
|
|
|
+ // Let's treat "." as separator.
|
|
|
|
|
+ // "root<id>" is 1 segment.
|
|
|
|
|
+ keys[i] = fmt.Sprintf("root%d", id)
|
|
|
|
|
+ case 1:
|
|
|
|
|
+ // Len 2: user.<id>
|
|
|
|
|
+ keys[i] = fmt.Sprintf("user.%d", id)
|
|
|
|
|
+ case 2:
|
|
|
|
|
+ // Len 3: group.<id%100>.member.<id>
|
|
|
|
|
+ // group.0.member.0 ... group.0.member.199
|
|
|
|
|
+ keys[i] = fmt.Sprintf("group.%d.member.%d", id%100, id)
|
|
|
|
|
+ case 3:
|
|
|
|
|
+ // Len 4: app.<id%10>.ver.<id%10>.config.<id>
|
|
|
|
|
+ keys[i] = fmt.Sprintf("app.%d.ver.%d.config.%d", id%10, id%10, id)
|
|
|
|
|
+ case 4:
|
|
|
|
|
+ // Len 5: log.2023.01.01.<id>
|
|
|
|
|
+ keys[i] = fmt.Sprintf("log.2023.01.01.%d", id)
|
|
|
|
|
+ }
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
var wg sync.WaitGroup
|
|
|
- workers := 10 // Concurrent workers
|
|
|
|
|
|
|
+ workers := 10
|
|
|
chunkSize := TotalKeys / workers
|
|
chunkSize := TotalKeys / workers
|
|
|
var insertOps int64
|
|
var insertOps int64
|
|
|
|
|
|
|
@@ -53,9 +95,9 @@ func main() {
|
|
|
base := id * chunkSize
|
|
base := id * chunkSize
|
|
|
for i := 0; i < chunkSize; i++ {
|
|
for i := 0; i < chunkSize; i++ {
|
|
|
idx := base + i
|
|
idx := base + i
|
|
|
- // Random value string of approx ValueBaseSize
|
|
|
|
|
- // Using consistent size initially to test reuse later
|
|
|
|
|
- val := fmt.Sprintf("value-data-%d-%s", idx, randomString(15))
|
|
|
|
|
|
|
+ if idx >= TotalKeys { continue }
|
|
|
|
|
+ // Small value
|
|
|
|
|
+ val := "v"
|
|
|
if err := e.Set(keys[idx], val, uint64(idx)); err != nil {
|
|
if err := e.Set(keys[idx], val, uint64(idx)); err != nil {
|
|
|
panic(err)
|
|
panic(err)
|
|
|
}
|
|
}
|
|
@@ -67,292 +109,73 @@ func main() {
|
|
|
|
|
|
|
|
duration := time.Since(start)
|
|
duration := time.Since(start)
|
|
|
qps := float64(TotalKeys) / duration.Seconds()
|
|
qps := float64(TotalKeys) / duration.Seconds()
|
|
|
- printStats("Insert", TotalKeys, duration, qps, getFileSize(DataDir+"/values.data"))
|
|
|
|
|
-
|
|
|
|
|
- // 2. Update 10k Keys (Mixed Strategy)
|
|
|
|
|
- // Strategy:
|
|
|
|
|
- // - 50% Longer: Triggers Append + Mark Old Deleted (Old slots become Free)
|
|
|
|
|
- // - 50% Shorter: Triggers In-Place Update (No FreeList change)
|
|
|
|
|
- // But wait, with FreeList, the "Longer" updates will generate Free slots.
|
|
|
|
|
- // Can we verify if subsequent inserts reuse them?
|
|
|
|
|
- fmt.Println("\n--- Phase 2: Update 10k Keys (50% Long, 50% Short) ---")
|
|
|
|
|
- start = time.Now()
|
|
|
|
|
-
|
|
|
|
|
- updateKeys := keys[:UpdateCount]
|
|
|
|
|
- wg = sync.WaitGroup{}
|
|
|
|
|
- chunkSize = UpdateCount / workers
|
|
|
|
|
|
|
+ currentMem := getMemUsage()
|
|
|
|
|
+ printStats("Insert", TotalKeys, duration, qps, getFileSize(DataDir+"/values.data"), currentMem - startMem)
|
|
|
|
|
|
|
|
- for w := 0; w < workers; w++ {
|
|
|
|
|
- wg.Add(1)
|
|
|
|
|
- go func(id int) {
|
|
|
|
|
- defer wg.Done()
|
|
|
|
|
- base := id * chunkSize
|
|
|
|
|
- for i := 0; i < chunkSize; i++ {
|
|
|
|
|
- idx := base + i
|
|
|
|
|
- key := updateKeys[idx]
|
|
|
|
|
- var val string
|
|
|
|
|
- if idx%2 == 0 {
|
|
|
|
|
- // Longer value (Trigger Append, Release Old Slot to FreeList)
|
|
|
|
|
- // Old cap was likely 32 or 48. New cap will be larger.
|
|
|
|
|
- val = fmt.Sprintf("updated-long-value-%d-%s-padding-padding", idx, randomString(40))
|
|
|
|
|
- } else {
|
|
|
|
|
- // Shorter value (Trigger In-Place)
|
|
|
|
|
- val = "short"
|
|
|
|
|
- }
|
|
|
|
|
- if err := e.Set(key, val, uint64(TotalKeys+idx)); err != nil {
|
|
|
|
|
- panic(err)
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- }(w)
|
|
|
|
|
- }
|
|
|
|
|
- wg.Wait()
|
|
|
|
|
-
|
|
|
|
|
- duration = time.Since(start)
|
|
|
|
|
- qps = float64(UpdateCount) / duration.Seconds()
|
|
|
|
|
- printStats("Update", UpdateCount, duration, qps, getFileSize(DataDir+"/values.data"))
|
|
|
|
|
|
|
+ // 2. Query Consistency Verification
|
|
|
|
|
+ fmt.Println("\n--- Phase 2: Query Consistency Verification ---")
|
|
|
|
|
|
|
|
- // 3. Insert New Data to Test Reuse
|
|
|
|
|
- // We generated ~5000 free slots (from the Long updates) in Phase 2.
|
|
|
|
|
- // Let's insert 5000 new keys with size matching the old slots (approx 32-48 bytes).
|
|
|
|
|
- // If reuse works, file size should NOT increase significantly.
|
|
|
|
|
- fmt.Println("\n--- Phase 3: Insert 5k New Keys (Test Reuse) ---")
|
|
|
|
|
- reuseKeysCount := 5000
|
|
|
|
|
- start = time.Now()
|
|
|
|
|
|
|
+ verifyQuery(e, "Exact Match (Len 1)", `key = "root0"`, 1)
|
|
|
|
|
+ verifyQuery(e, "Exact Match (Len 2)", `key = "user.0"`, 1)
|
|
|
|
|
+ verifyQuery(e, "Prefix Scan (Len 3)", `key like "group.0.member.*"`, 200) // 20000 items / 5 types = 4000 items. id%100 -> 100 groups. 4000/100 = 40? Wait.
|
|
|
|
|
+ // Total items in group 2: 20,000.
|
|
|
|
|
+ // id goes from 0 to 19999.
|
|
|
|
|
+ // id%100 goes 0..99.
|
|
|
|
|
+ // So each group (0..99) appears 20000/100 = 200 times.
|
|
|
|
|
+ // So "group.0.member.*" should have 200 matches. Correct.
|
|
|
|
|
|
|
|
- for i := 0; i < reuseKeysCount; i++ {
|
|
|
|
|
- key := fmt.Sprintf("reuse.key.%d", i)
|
|
|
|
|
- // Length matches initial inserts (approx 30-40 bytes)
|
|
|
|
|
- val := fmt.Sprintf("new-value-reuse-%d-%s", i, randomString(15))
|
|
|
|
|
- if err := e.Set(key, val, uint64(TotalKeys+UpdateCount+i)); err != nil {
|
|
|
|
|
- panic(err)
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ verifyQuery(e, "Prefix Scan (Len 4)", `key like "app.0.ver.0.config.*"`, 2000)
|
|
|
|
|
+ // Group 3 items: 20000.
|
|
|
|
|
+ // Key format: app.<id%10>.ver.<id%10>.config.<id>
|
|
|
|
|
+ // Condition: id%10 == 0.
|
|
|
|
|
+ // Since id goes 0..19999, exactly 1/10 of ids satisfy (id%10 == 0).
|
|
|
|
|
+ // Count = 20000 / 10 = 2000.
|
|
|
|
|
|
|
|
- duration = time.Since(start)
|
|
|
|
|
- qps = float64(reuseKeysCount) / duration.Seconds()
|
|
|
|
|
- printStats("InsertReuse", reuseKeysCount, duration, qps, getFileSize(DataDir+"/values.data"))
|
|
|
|
|
-
|
|
|
|
|
|
|
+ verifyQuery(e, "Prefix Scan (Len 5)", `key like "log.2023.*"`, 20000) // All items in group 4
|
|
|
|
|
|
|
|
- // 4. Delete 10k Keys
|
|
|
|
|
- fmt.Println("\n--- Phase 4: Delete 10k Keys (Simulated via Update) ---")
|
|
|
|
|
- // Using "DELETED_MARKER" which is short, so it will be In-Place update.
|
|
|
|
|
- // This won't test FreeList populating, but In-Place logic.
|
|
|
|
|
- // To test FreeList populating from Delete, we need a real Delete() or Update to smaller size that frees extra space?
|
|
|
|
|
- // But our storage doesn't split blocks.
|
|
|
|
|
- // So let's stick to standard benchmark.
|
|
|
|
|
- deleteKeys := keys[UpdateCount : UpdateCount+DeleteCount]
|
|
|
|
|
- start = time.Now()
|
|
|
|
|
|
|
+ // 3. Query Performance Test (Prefix)
|
|
|
|
|
+ fmt.Println("\n--- Phase 3: Query Performance (Prefix Scan) ---")
|
|
|
|
|
|
|
|
- wg = sync.WaitGroup{}
|
|
|
|
|
- chunkSize = DeleteCount / workers
|
|
|
|
|
-
|
|
|
|
|
- for w := 0; w < workers; w++ {
|
|
|
|
|
- wg.Add(1)
|
|
|
|
|
- go func(id int) {
|
|
|
|
|
- defer wg.Done()
|
|
|
|
|
- base := id * chunkSize
|
|
|
|
|
- for i := 0; i < chunkSize; i++ {
|
|
|
|
|
- idx := base + i
|
|
|
|
|
- key := deleteKeys[idx]
|
|
|
|
|
- if err := e.Set(key, "DELETED_MARKER", uint64(TotalKeys+UpdateCount+reuseKeysCount+idx)); err != nil {
|
|
|
|
|
- panic(err)
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- }(w)
|
|
|
|
|
- }
|
|
|
|
|
- wg.Wait()
|
|
|
|
|
-
|
|
|
|
|
- duration = time.Since(start)
|
|
|
|
|
- qps = float64(DeleteCount) / duration.Seconds()
|
|
|
|
|
- printStats("Delete", DeleteCount, duration, qps, getFileSize(DataDir+"/values.data"))
|
|
|
|
|
-
|
|
|
|
|
- // 5. Query Performance Test
|
|
|
|
|
- fmt.Println("\n--- Phase 5: Query Performance Test ---")
|
|
|
|
|
-
|
|
|
|
|
- // 5.1 Single Key Point Lookup
|
|
|
|
|
start = time.Now()
|
|
start = time.Now()
|
|
|
- qPointCount := 100000
|
|
|
|
|
|
|
+ qCount := 10000
|
|
|
wg = sync.WaitGroup{}
|
|
wg = sync.WaitGroup{}
|
|
|
- chunkSize = qPointCount / workers
|
|
|
|
|
|
|
+ chunkSize = qCount / workers
|
|
|
|
|
|
|
|
- for w := 0; w < workers; w++ {
|
|
|
|
|
- wg.Add(1)
|
|
|
|
|
- go func(id int) {
|
|
|
|
|
- defer wg.Done()
|
|
|
|
|
- base := id * chunkSize
|
|
|
|
|
- for i := 0; i < chunkSize; i++ {
|
|
|
|
|
- // Query: key = "bench.key.12345"
|
|
|
|
|
- idx := base + i
|
|
|
|
|
- key := keys[idx]
|
|
|
|
|
- query := fmt.Sprintf(`key = "%s"`, key)
|
|
|
|
|
- _, _ = e.Query(query)
|
|
|
|
|
- }
|
|
|
|
|
- }(w)
|
|
|
|
|
- }
|
|
|
|
|
- wg.Wait()
|
|
|
|
|
- duration = time.Since(start)
|
|
|
|
|
- qps = float64(qPointCount) / duration.Seconds()
|
|
|
|
|
- printStats("Query(Point)", qPointCount, duration, qps, 0)
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
- // 5.2 Query Metadata Only (Key/CommitIndex) - Should be fast (No IO)
|
|
|
|
|
- start = time.Now()
|
|
|
|
|
- qMetaCount := 50000
|
|
|
|
|
- var metaHits int64
|
|
|
|
|
-
|
|
|
|
|
- wg = sync.WaitGroup{}
|
|
|
|
|
- chunkSize = qMetaCount / workers
|
|
|
|
|
-
|
|
|
|
|
- for w := 0; w < workers; w++ {
|
|
|
|
|
- wg.Add(1)
|
|
|
|
|
- go func() {
|
|
|
|
|
- defer wg.Done()
|
|
|
|
|
- // Query for keys starting with "bench.key.99" (Last 1000 keys)
|
|
|
|
|
- // This exercises the scan but filters on Key (Metadata)
|
|
|
|
|
- // Note: Our Query implementation scans ALL keys, so total items processed = TotalKeys * qMetaCount / workers
|
|
|
|
|
- // This is extremely heavy if not optimized.
|
|
|
|
|
- // Let's run a smaller number of queries.
|
|
|
|
|
- for i := 0; i < chunkSize; i++ {
|
|
|
|
|
- // Query: key like "bench.key.999*"
|
|
|
|
|
- res, _ := e.Query(`key like "bench.key.999*"`)
|
|
|
|
|
- atomic.AddInt64(&metaHits, int64(len(res)))
|
|
|
|
|
- }
|
|
|
|
|
- }()
|
|
|
|
|
- }
|
|
|
|
|
- wg.Wait()
|
|
|
|
|
- duration = time.Since(start)
|
|
|
|
|
- qps = float64(qMetaCount) / duration.Seconds()
|
|
|
|
|
- printStats("Query(Meta)", qMetaCount, duration, qps, 0)
|
|
|
|
|
-
|
|
|
|
|
- // 5.3 Query with Pagination (LIMIT/OFFSET)
|
|
|
|
|
- // We use the same Meta query but with LIMIT 10 OFFSET 20 to test pagination speedup
|
|
|
|
|
- start = time.Now()
|
|
|
|
|
- qPageCount := 50000
|
|
|
|
|
- var pageHits int64
|
|
|
|
|
-
|
|
|
|
|
- wg = sync.WaitGroup{}
|
|
|
|
|
- chunkSize = qPageCount / workers
|
|
|
|
|
-
|
|
|
|
|
- for w := 0; w < workers; w++ {
|
|
|
|
|
- wg.Add(1)
|
|
|
|
|
- go func() {
|
|
|
|
|
- defer wg.Done()
|
|
|
|
|
- for i := 0; i < chunkSize; i++ {
|
|
|
|
|
- // Query with LIMIT 10 OFFSET 20
|
|
|
|
|
- res, _ := e.Query(`key like "bench.key.999*" LIMIT 10 OFFSET 20`)
|
|
|
|
|
- atomic.AddInt64(&pageHits, int64(len(res)))
|
|
|
|
|
- }
|
|
|
|
|
- }()
|
|
|
|
|
- }
|
|
|
|
|
- wg.Wait()
|
|
|
|
|
- duration = time.Since(start)
|
|
|
|
|
- qps = float64(qPageCount) / duration.Seconds()
|
|
|
|
|
- printStats("Query(Lim10Off20)", qPageCount, duration, qps, 0)
|
|
|
|
|
-
|
|
|
|
|
- // 5.4 Query Value (Needs IO)
|
|
|
|
|
- start = time.Now()
|
|
|
|
|
- qValCount := 100 // Smaller count because full scan IO is slow
|
|
|
|
|
- var valHits int64
|
|
|
|
|
-
|
|
|
|
|
- wg = sync.WaitGroup{}
|
|
|
|
|
- chunkSize = qValCount / workers
|
|
|
|
|
- if chunkSize == 0 { chunkSize = 1; workers = qValCount }
|
|
|
|
|
-
|
|
|
|
|
for w := 0; w < workers; w++ {
|
|
for w := 0; w < workers; w++ {
|
|
|
wg.Add(1)
|
|
wg.Add(1)
|
|
|
go func() {
|
|
go func() {
|
|
|
defer wg.Done()
|
|
defer wg.Done()
|
|
|
for i := 0; i < chunkSize; i++ {
|
|
for i := 0; i < chunkSize; i++ {
|
|
|
- // Query: value like "*data-999*"
|
|
|
|
|
- res, _ := e.Query(`value like "*data-999*"`)
|
|
|
|
|
- atomic.AddInt64(&valHits, int64(len(res)))
|
|
|
|
|
|
|
+ // Query overlapping prefixes to stress test binary search + scan
|
|
|
|
|
+ // "group.50.member.*" -> 200 items scan
|
|
|
|
|
+ _, _ = e.Query(`key like "group.50.member.*"`)
|
|
|
}
|
|
}
|
|
|
}()
|
|
}()
|
|
|
}
|
|
}
|
|
|
wg.Wait()
|
|
wg.Wait()
|
|
|
duration = time.Since(start)
|
|
duration = time.Since(start)
|
|
|
- qps = float64(qValCount) / duration.Seconds()
|
|
|
|
|
- printStats("Query(Val)", qValCount, duration, qps, 0)
|
|
|
|
|
|
|
+ qps = float64(qCount) / duration.Seconds()
|
|
|
|
|
+ printStats("Query(Prefix)", qCount, duration, qps, 0, 0)
|
|
|
|
|
|
|
|
|
|
+ // Final Memory Report
|
|
|
|
|
+ runtime.GC()
|
|
|
|
|
+ finalMem := getMemUsage()
|
|
|
|
|
+ fmt.Printf("\n[Final Stats] Total Keys: %d | Memory Usage: %.2f MB\n", TotalKeys, float64(finalMem-startMem)/1024/1024)
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
- // 6. Verification
|
|
|
|
|
- fmt.Println("\n--- Phase 6: Verification (Full Scan) ---")
|
|
|
|
|
-
|
|
|
|
|
- errors := 0
|
|
|
|
|
-
|
|
|
|
|
- // 1. Check Updated Keys (0-9999)
|
|
|
|
|
- for i := 0; i < UpdateCount; i++ {
|
|
|
|
|
- val, ok := e.Get(keys[i])
|
|
|
|
|
- if !ok {
|
|
|
|
|
- fmt.Printf("Error: Key %s not found\n", keys[i])
|
|
|
|
|
- errors++
|
|
|
|
|
- continue
|
|
|
|
|
- }
|
|
|
|
|
- if i%2 == 0 {
|
|
|
|
|
- // Should be long
|
|
|
|
|
- if len(val) < 40 { // Our long value is > 40 chars
|
|
|
|
|
- fmt.Printf("Error: Key %s should be long, got: %s\n", keys[i], val)
|
|
|
|
|
- errors++
|
|
|
|
|
- }
|
|
|
|
|
- } else {
|
|
|
|
|
- // Should be "short"
|
|
|
|
|
- if val != "short" {
|
|
|
|
|
- fmt.Printf("Error: Key %s should be 'short', got: %s\n", keys[i], val)
|
|
|
|
|
- errors++
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- // 2. Check Deleted Keys (10000-19999)
|
|
|
|
|
- for i := UpdateCount; i < UpdateCount+DeleteCount; i++ {
|
|
|
|
|
- val, ok := e.Get(keys[i])
|
|
|
|
|
- if !ok {
|
|
|
|
|
- fmt.Printf("Error: Key %s not found\n", keys[i])
|
|
|
|
|
- errors++
|
|
|
|
|
- continue
|
|
|
|
|
- }
|
|
|
|
|
- if val != "DELETED_MARKER" {
|
|
|
|
|
- fmt.Printf("Error: Key %s should be deleted, got: %s\n", keys[i], val)
|
|
|
|
|
- errors++
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- // 3. Check Original Keys (20000-99999)
|
|
|
|
|
- for i := UpdateCount + DeleteCount; i < TotalKeys; i++ {
|
|
|
|
|
- val, ok := e.Get(keys[i])
|
|
|
|
|
- if !ok {
|
|
|
|
|
- fmt.Printf("Error: Key %s not found\n", keys[i])
|
|
|
|
|
- errors++
|
|
|
|
|
- continue
|
|
|
|
|
- }
|
|
|
|
|
- // Original values start with "value-data-"
|
|
|
|
|
- if len(val) < 10 || val[:11] != "value-data-" {
|
|
|
|
|
- fmt.Printf("Error: Key %s should be original, got: %s\n", keys[i], val)
|
|
|
|
|
- errors++
|
|
|
|
|
- }
|
|
|
|
|
|
|
+func verifyQuery(e *db.Engine, name, sql string, expected int) {
|
|
|
|
|
+ start := time.Now()
|
|
|
|
|
+ res, err := e.Query(sql)
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ fmt.Printf("FAIL: %s - Error: %v\n", name, err)
|
|
|
|
|
+ return
|
|
|
}
|
|
}
|
|
|
|
|
+ duration := time.Since(start)
|
|
|
|
|
|
|
|
- // 4. Check Reused Keys (Extra 5000)
|
|
|
|
|
- for i := 0; i < 5000; i++ { // reuseKeysCount was hardcoded as 5000 inside main
|
|
|
|
|
- key := fmt.Sprintf("reuse.key.%d", i)
|
|
|
|
|
- val, ok := e.Get(key)
|
|
|
|
|
- if !ok {
|
|
|
|
|
- fmt.Printf("Error: Reuse Key %s not found\n", key)
|
|
|
|
|
- errors++
|
|
|
|
|
- continue
|
|
|
|
|
- }
|
|
|
|
|
- if len(val) < 10 || val[:16] != "new-value-reuse-" {
|
|
|
|
|
- fmt.Printf("Error: Reuse Key %s mismatch, got: %s\n", key, val)
|
|
|
|
|
- errors++
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if errors == 0 {
|
|
|
|
|
- fmt.Println("Integrity Check: PASS (All keys verified successfully)")
|
|
|
|
|
- // Cleanup if successful
|
|
|
|
|
- os.RemoveAll(DataDir)
|
|
|
|
|
|
|
+ if len(res) == expected {
|
|
|
|
|
+ fmt.Printf("PASS: %-25s | Count: %5d | Time: %v\n", name, len(res), duration)
|
|
|
} else {
|
|
} else {
|
|
|
- fmt.Printf("Integrity Check: FAIL (%d errors found)\n", errors)
|
|
|
|
|
|
|
+ fmt.Printf("FAIL: %-25s | Expected: %d, Got: %d\n", name, expected, len(res))
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -373,10 +196,20 @@ func getFileSize(path string) int64 {
|
|
|
return fi.Size()
|
|
return fi.Size()
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-func printStats(op string, count int, d time.Duration, qps float64, size int64) {
|
|
|
|
|
|
|
+func getMemUsage() uint64 {
|
|
|
|
|
+ var m runtime.MemStats
|
|
|
|
|
+ runtime.ReadMemStats(&m)
|
|
|
|
|
+ return m.Alloc
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func printStats(op string, count int, d time.Duration, qps float64, size int64, memDelta uint64) {
|
|
|
sizeStr := ""
|
|
sizeStr := ""
|
|
|
if size > 0 {
|
|
if size > 0 {
|
|
|
- sizeStr = fmt.Sprintf(" | DB Size: %.2f MB", float64(size)/1024/1024)
|
|
|
|
|
|
|
+ sizeStr = fmt.Sprintf(" | Disk: %.2f MB", float64(size)/1024/1024)
|
|
|
|
|
+ }
|
|
|
|
|
+ memStr := ""
|
|
|
|
|
+ if memDelta > 0 {
|
|
|
|
|
+ memStr = fmt.Sprintf(" | Mem+: %.2f MB", float64(memDelta)/1024/1024)
|
|
|
}
|
|
}
|
|
|
- fmt.Printf("%s: %d ops in %v | QPS: %.0f%s\n", op, count, d, qps, sizeStr)
|
|
|
|
|
|
|
+ fmt.Printf("%-15s: %6d ops in %7v | QPS: %8.0f%s%s\n", op, count, d, qps, sizeStr, memStr)
|
|
|
}
|
|
}
|