storage.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. package raft
  2. import (
  3. "bufio"
  4. "encoding/binary"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "io"
  9. "os"
  10. "path/filepath"
  11. "sync"
  12. )
  13. var (
  14. ErrNotFound = errors.New("not found")
  15. ErrCorrupted = errors.New("corrupted data")
  16. ErrOutOfRange = errors.New("index out of range")
  17. ErrCompacted = errors.New("log has been compacted")
  18. )
  19. // Storage interface defines the persistent storage operations
  20. type Storage interface {
  21. // State operations
  22. GetState() (*PersistentState, error)
  23. SaveState(state *PersistentState) error
  24. // Log operations
  25. GetFirstIndex() uint64
  26. GetLastIndex() uint64
  27. GetEntry(index uint64) (*LogEntry, error)
  28. GetEntries(startIndex, endIndex uint64) ([]LogEntry, error)
  29. AppendEntries(entries []LogEntry) error
  30. TruncateAfter(index uint64) error
  31. TruncateBefore(index uint64) error
  32. // Snapshot operations
  33. GetSnapshot() ([]byte, uint64, uint64, error) // data, lastIndex, lastTerm, error
  34. SaveSnapshot(data []byte, lastIndex, lastTerm uint64) error
  35. // Cluster configuration operations
  36. GetClusterConfig() (*ClusterConfig, error)
  37. SaveClusterConfig(config *ClusterConfig) error
  38. // Lifecycle
  39. Close() error
  40. Sync() error // fsync (slow, safe)
  41. Flush() error // write to OS cache (fast)
  42. }
  43. // HybridStorage implements a high-performance hybrid memory + file storage
  44. type HybridStorage struct {
  45. mu sync.RWMutex
  46. dataDir string
  47. logger Logger
  48. // In-memory cache for fast reads
  49. memoryLog []LogEntry // Recent entries in memory
  50. memoryStart uint64 // Start index of entries in memory
  51. memoryCapacity int
  52. // File-based persistent storage
  53. logFile *os.File
  54. logWriter *bufio.Writer
  55. stateFile string
  56. snapshotFile string
  57. clusterFile string // Cluster configuration file
  58. // Index tracking
  59. firstIndex uint64 // First index in storage (after compaction)
  60. lastIndex uint64 // Last index in storage
  61. // Entry offset index for fast file seeks
  62. entryOffsets map[uint64]int64 // index -> file offset
  63. // State cache
  64. stateCache *PersistentState
  65. // Cluster config cache
  66. clusterCache *ClusterConfig
  67. }
  68. // NewHybridStorage creates a new hybrid storage instance
  69. func NewHybridStorage(dataDir string, memoryCapacity int, logger Logger) (*HybridStorage, error) {
  70. if logger == nil {
  71. logger = &NoopLogger{}
  72. }
  73. if memoryCapacity <= 0 {
  74. memoryCapacity = 1000 // Safe default
  75. }
  76. if err := os.MkdirAll(dataDir, 0755); err != nil {
  77. return nil, fmt.Errorf("failed to create data directory: %w", err)
  78. }
  79. s := &HybridStorage{
  80. dataDir: dataDir,
  81. logger: logger,
  82. memoryLog: make([]LogEntry, 0, memoryCapacity),
  83. memoryCapacity: memoryCapacity,
  84. stateFile: filepath.Join(dataDir, "state.json"),
  85. snapshotFile: filepath.Join(dataDir, "snapshot.bin"),
  86. clusterFile: filepath.Join(dataDir, "cluster.json"),
  87. firstIndex: 0,
  88. lastIndex: 0,
  89. entryOffsets: make(map[uint64]int64),
  90. }
  91. if err := s.recover(); err != nil {
  92. return nil, fmt.Errorf("failed to recover storage: %w", err)
  93. }
  94. return s, nil
  95. }
  96. // recover loads existing data from disk
  97. func (s *HybridStorage) recover() error {
  98. // Load state
  99. if _, err := s.loadState(); err != nil && !os.IsNotExist(err) {
  100. return fmt.Errorf("failed to load state: %w", err)
  101. }
  102. // Open or create log file
  103. logPath := filepath.Join(s.dataDir, "log.bin")
  104. var err error
  105. s.logFile, err = os.OpenFile(logPath, os.O_RDWR|os.O_CREATE, 0644)
  106. if err != nil {
  107. return fmt.Errorf("failed to open log file: %w", err)
  108. }
  109. // Load snapshot to get compaction point
  110. if snapData, lastIndex, lastTerm, err := s.loadSnapshot(); err == nil && len(snapData) > 0 {
  111. s.firstIndex = lastIndex
  112. s.lastIndex = lastIndex
  113. s.logger.Info("Loaded snapshot at index %d, term %d", lastIndex, lastTerm)
  114. }
  115. // Build index and load recent entries
  116. if err := s.rebuildIndex(); err != nil {
  117. return fmt.Errorf("failed to rebuild index: %w", err)
  118. }
  119. s.logWriter = bufio.NewWriterSize(s.logFile, 1024*1024) // 1MB buffer
  120. // s.logWriter = bufio.NewWriterSize(s.logFile, 64*1024) // 64KB buffer
  121. return nil
  122. }
  123. // rebuildIndex scans the log file and rebuilds the offset index
  124. func (s *HybridStorage) rebuildIndex() error {
  125. s.logFile.Seek(0, io.SeekStart)
  126. reader := bufio.NewReader(s.logFile)
  127. var offset int64 = 0
  128. var entries []LogEntry
  129. for {
  130. entry, bytesRead, err := s.readEntryAt(reader)
  131. if err == io.EOF {
  132. break
  133. }
  134. if err != nil {
  135. s.logger.Warn("Error reading log at offset %d: %v", offset, err)
  136. break
  137. }
  138. if entry.Index > s.firstIndex {
  139. s.entryOffsets[entry.Index] = offset
  140. entries = append(entries, *entry)
  141. if s.firstIndex == 0 || entry.Index < s.firstIndex {
  142. s.firstIndex = entry.Index
  143. }
  144. if entry.Index > s.lastIndex {
  145. s.lastIndex = entry.Index
  146. }
  147. }
  148. offset += int64(bytesRead)
  149. }
  150. // Load recent entries into memory
  151. if len(entries) > 0 {
  152. startIdx := 0
  153. if len(entries) > s.memoryCapacity {
  154. startIdx = len(entries) - s.memoryCapacity
  155. }
  156. s.memoryLog = entries[startIdx:]
  157. s.memoryStart = s.memoryLog[0].Index
  158. s.logger.Info("Loaded %d entries into memory, starting at index %d", len(s.memoryLog), s.memoryStart)
  159. }
  160. // Seek to end for appending
  161. s.logFile.Seek(0, io.SeekEnd)
  162. return nil
  163. }
  164. // readEntryAt reads a single entry from the reader
  165. func (s *HybridStorage) readEntryAt(reader *bufio.Reader) (*LogEntry, int, error) {
  166. // Format: [4 bytes length][json data]
  167. lenBuf := make([]byte, 4)
  168. if _, err := io.ReadFull(reader, lenBuf); err != nil {
  169. return nil, 0, err
  170. }
  171. length := binary.BigEndian.Uint32(lenBuf)
  172. if length > 10*1024*1024 { // 10MB limit
  173. return nil, 0, ErrCorrupted
  174. }
  175. data := make([]byte, length)
  176. if _, err := io.ReadFull(reader, data); err != nil {
  177. return nil, 0, err
  178. }
  179. var entry LogEntry
  180. if err := json.Unmarshal(data, &entry); err != nil {
  181. return nil, 0, err
  182. }
  183. return &entry, 4 + int(length), nil
  184. }
  185. // GetState returns the current persistent state
  186. func (s *HybridStorage) GetState() (*PersistentState, error) {
  187. s.mu.RLock()
  188. defer s.mu.RUnlock()
  189. if s.stateCache != nil {
  190. return s.stateCache, nil
  191. }
  192. return s.loadState()
  193. }
  194. func (s *HybridStorage) loadState() (*PersistentState, error) {
  195. data, err := os.ReadFile(s.stateFile)
  196. if err != nil {
  197. if os.IsNotExist(err) {
  198. return &PersistentState{}, nil
  199. }
  200. return nil, err
  201. }
  202. var state PersistentState
  203. if err := json.Unmarshal(data, &state); err != nil {
  204. return nil, err
  205. }
  206. s.stateCache = &state
  207. return &state, nil
  208. }
  209. // SaveState persists the state to disk
  210. func (s *HybridStorage) SaveState(state *PersistentState) error {
  211. s.mu.Lock()
  212. defer s.mu.Unlock()
  213. // Ensure data directory exists
  214. if err := os.MkdirAll(s.dataDir, 0755); err != nil {
  215. return fmt.Errorf("failed to create data directory: %w", err)
  216. }
  217. data, err := json.Marshal(state)
  218. if err != nil {
  219. return err
  220. }
  221. // Write to temp file first for atomicity
  222. tmpFile := s.stateFile + ".tmp"
  223. if err := os.WriteFile(tmpFile, data, 0644); err != nil {
  224. return err
  225. }
  226. if err := os.Rename(tmpFile, s.stateFile); err != nil {
  227. return err
  228. }
  229. s.stateCache = state
  230. return nil
  231. }
  232. // GetFirstIndex returns the first available log index
  233. func (s *HybridStorage) GetFirstIndex() uint64 {
  234. s.mu.RLock()
  235. defer s.mu.RUnlock()
  236. return s.firstIndex
  237. }
  238. // GetLastIndex returns the last log index
  239. func (s *HybridStorage) GetLastIndex() uint64 {
  240. s.mu.RLock()
  241. defer s.mu.RUnlock()
  242. return s.lastIndex
  243. }
  244. // GetEntry retrieves a single log entry by index
  245. func (s *HybridStorage) GetEntry(index uint64) (*LogEntry, error) {
  246. s.mu.RLock()
  247. defer s.mu.RUnlock()
  248. if index < s.firstIndex {
  249. return nil, ErrCompacted
  250. }
  251. if index > s.lastIndex {
  252. return nil, ErrOutOfRange
  253. }
  254. // Try memory first (fast path)
  255. if index >= s.memoryStart && len(s.memoryLog) > 0 {
  256. memIdx := int(index - s.memoryStart)
  257. if memIdx >= 0 && memIdx < len(s.memoryLog) {
  258. entry := s.memoryLog[memIdx]
  259. return &entry, nil
  260. }
  261. }
  262. // Fall back to file
  263. return s.readEntryFromFile(index)
  264. }
  265. // readEntryFromFile reads an entry from the log file
  266. func (s *HybridStorage) readEntryFromFile(index uint64) (*LogEntry, error) {
  267. offset, ok := s.entryOffsets[index]
  268. if !ok {
  269. return nil, ErrNotFound
  270. }
  271. if _, err := s.logFile.Seek(offset, io.SeekStart); err != nil {
  272. return nil, err
  273. }
  274. reader := bufio.NewReader(s.logFile)
  275. entry, _, err := s.readEntryAt(reader)
  276. return entry, err
  277. }
  278. // GetEntries retrieves a range of log entries [startIndex, endIndex)
  279. func (s *HybridStorage) GetEntries(startIndex, endIndex uint64) ([]LogEntry, error) {
  280. s.mu.RLock()
  281. defer s.mu.RUnlock()
  282. if startIndex < s.firstIndex {
  283. return nil, ErrCompacted
  284. }
  285. if endIndex > s.lastIndex+1 {
  286. endIndex = s.lastIndex + 1
  287. }
  288. if startIndex >= endIndex {
  289. return nil, nil
  290. }
  291. entries := make([]LogEntry, 0, endIndex-startIndex)
  292. // Check if all requested entries are in memory
  293. if startIndex >= s.memoryStart && len(s.memoryLog) > 0 {
  294. memStartIdx := int(startIndex - s.memoryStart)
  295. memEndIdx := int(endIndex - s.memoryStart)
  296. if memStartIdx >= 0 && memEndIdx <= len(s.memoryLog) {
  297. return append(entries, s.memoryLog[memStartIdx:memEndIdx]...), nil
  298. }
  299. }
  300. // Need to read from file
  301. for idx := startIndex; idx < endIndex; idx++ {
  302. // Try memory first
  303. if idx >= s.memoryStart && len(s.memoryLog) > 0 {
  304. memIdx := int(idx - s.memoryStart)
  305. if memIdx >= 0 && memIdx < len(s.memoryLog) {
  306. entries = append(entries, s.memoryLog[memIdx])
  307. continue
  308. }
  309. }
  310. // Read from file
  311. entry, err := s.readEntryFromFile(idx)
  312. if err != nil {
  313. return nil, err
  314. }
  315. entries = append(entries, *entry)
  316. }
  317. return entries, nil
  318. }
  319. // AppendEntries appends new entries to the log
  320. // It will skip entries that already exist and only append sequential new entries
  321. func (s *HybridStorage) AppendEntries(entries []LogEntry) error {
  322. if len(entries) == 0 {
  323. return nil
  324. }
  325. s.mu.Lock()
  326. defer s.mu.Unlock()
  327. // Filter entries: only append sequential entries starting from lastIndex + 1
  328. // This handles overlapping entries and gaps gracefully
  329. var newEntries []LogEntry
  330. nextExpected := s.lastIndex + 1
  331. // If log was compacted and this is a fresh start, adjust nextExpected
  332. if s.lastIndex == 0 && s.firstIndex > 0 {
  333. nextExpected = s.firstIndex + 1
  334. }
  335. for _, entry := range entries {
  336. if entry.Index == nextExpected {
  337. // This is the next expected entry, add it
  338. newEntries = append(newEntries, entry)
  339. nextExpected++
  340. } else if entry.Index > nextExpected {
  341. // Gap detected - this is normal during follower catch-up
  342. // Leader will send snapshot or earlier entries
  343. s.logger.Debug("Gap in entries: got %d, expected %d (will wait for leader)", entry.Index, nextExpected)
  344. break
  345. }
  346. // entry.Index < nextExpected: already exists, skip
  347. }
  348. if len(newEntries) == 0 {
  349. return nil // All entries already exist or there's a gap
  350. }
  351. // Get current file offset for indexing
  352. currentOffset, err := s.logFile.Seek(0, io.SeekEnd)
  353. if err != nil {
  354. return err
  355. }
  356. for i, entry := range newEntries {
  357. // Write to file
  358. data, err := json.Marshal(entry)
  359. if err != nil {
  360. return err
  361. }
  362. lenBuf := make([]byte, 4)
  363. binary.BigEndian.PutUint32(lenBuf, uint32(len(data)))
  364. if _, err := s.logWriter.Write(lenBuf); err != nil {
  365. return err
  366. }
  367. if _, err := s.logWriter.Write(data); err != nil {
  368. return err
  369. }
  370. // Update index
  371. s.entryOffsets[entry.Index] = currentOffset
  372. currentOffset += int64(4 + len(data))
  373. // Update memory cache
  374. // Initialize memoryStart when first entry is added
  375. if len(s.memoryLog) == 0 {
  376. s.memoryStart = entry.Index
  377. }
  378. s.memoryLog = append(s.memoryLog, entry)
  379. // Trim memory if needed
  380. if len(s.memoryLog) > s.memoryCapacity {
  381. excess := len(s.memoryLog) - s.memoryCapacity
  382. s.memoryLog = s.memoryLog[excess:]
  383. s.memoryStart = s.memoryLog[0].Index
  384. }
  385. s.lastIndex = entry.Index
  386. if s.firstIndex == 0 || (i == 0 && newEntries[0].Index < s.firstIndex) {
  387. s.firstIndex = newEntries[0].Index
  388. }
  389. }
  390. // Flush to disk is now handled by the caller or periodically
  391. return s.logWriter.Flush()
  392. // return nil
  393. }
  394. // TruncateAfter removes all entries after the given index
  395. func (s *HybridStorage) TruncateAfter(index uint64) error {
  396. s.mu.Lock()
  397. defer s.mu.Unlock()
  398. if index >= s.lastIndex {
  399. return nil
  400. }
  401. // Truncate file
  402. if offset, ok := s.entryOffsets[index+1]; ok {
  403. if err := s.logFile.Truncate(offset); err != nil {
  404. return err
  405. }
  406. s.logFile.Seek(0, io.SeekEnd)
  407. }
  408. // Remove from index
  409. for idx := index + 1; idx <= s.lastIndex; idx++ {
  410. delete(s.entryOffsets, idx)
  411. }
  412. // Truncate memory
  413. if index < s.memoryStart {
  414. s.memoryLog = s.memoryLog[:0]
  415. s.memoryStart = 0
  416. } else if index >= s.memoryStart && len(s.memoryLog) > 0 {
  417. memIdx := int(index - s.memoryStart + 1)
  418. if memIdx < len(s.memoryLog) {
  419. s.memoryLog = s.memoryLog[:memIdx]
  420. }
  421. }
  422. s.lastIndex = index
  423. return nil
  424. }
  425. // TruncateBefore removes all entries before the given index (for compaction)
  426. func (s *HybridStorage) TruncateBefore(index uint64) error {
  427. s.mu.Lock()
  428. defer s.mu.Unlock()
  429. if index <= s.firstIndex {
  430. return nil
  431. }
  432. // Remove from index
  433. for idx := s.firstIndex; idx < index; idx++ {
  434. delete(s.entryOffsets, idx)
  435. }
  436. // Truncate memory
  437. if index > s.memoryStart && len(s.memoryLog) > 0 {
  438. memIdx := int(index - s.memoryStart)
  439. if memIdx >= len(s.memoryLog) {
  440. s.memoryLog = s.memoryLog[:0]
  441. s.memoryStart = 0
  442. } else if memIdx > 0 {
  443. s.memoryLog = s.memoryLog[memIdx:]
  444. s.memoryStart = s.memoryLog[0].Index
  445. }
  446. }
  447. s.firstIndex = index
  448. // Note: We don't actually truncate the file here to avoid expensive rewrites
  449. // The compacted entries will be cleaned up during snapshot restoration
  450. // return nil
  451. // Flush any pending writes before reading for compaction
  452. if s.logWriter != nil {
  453. s.logWriter.Flush()
  454. }
  455. return s.compactLogFile(index)
  456. }
  457. // compactLogFile physically rewrites the log file to remove compacted entries
  458. func (s *HybridStorage) compactLogFile(newFirstIndex uint64) error {
  459. s.logger.Info("Starting physical log compaction. Keeping entries from %d to %d", newFirstIndex, s.lastIndex)
  460. // 1. Create temp file
  461. tmpPath := filepath.Join(s.dataDir, "log.bin.tmp")
  462. tmpFile, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
  463. if err != nil {
  464. return err
  465. }
  466. // Clean up temp file on error
  467. defer func() {
  468. if tmpFile != nil {
  469. tmpFile.Close()
  470. // Only remove if we didn't successfully rename
  471. if _, err := os.Stat(tmpPath); err == nil {
  472. os.Remove(tmpPath)
  473. }
  474. }
  475. }()
  476. tmpWriter := bufio.NewWriterSize(tmpFile, 1024*1024)
  477. newOffsets := make(map[uint64]int64)
  478. var currentOffset int64 = 0
  479. // 2. Copy entries
  480. for idx := newFirstIndex; idx <= s.lastIndex; idx++ {
  481. // Read from old file
  482. // Note: We use readEntryFromFile which uses s.logFile and s.entryOffsets
  483. entry, err := s.readEntryFromFile(idx)
  484. if err != nil {
  485. return fmt.Errorf("failed to read entry %d during compaction: %v", idx, err)
  486. }
  487. // Write to new file
  488. data, err := json.Marshal(entry)
  489. if err != nil {
  490. return err
  491. }
  492. lenBuf := make([]byte, 4)
  493. binary.BigEndian.PutUint32(lenBuf, uint32(len(data)))
  494. if _, err := tmpWriter.Write(lenBuf); err != nil {
  495. return err
  496. }
  497. if _, err := tmpWriter.Write(data); err != nil {
  498. return err
  499. }
  500. newOffsets[idx] = currentOffset
  501. currentOffset += int64(4 + len(data))
  502. }
  503. if err := tmpWriter.Flush(); err != nil {
  504. return err
  505. }
  506. // 3. Swap files
  507. // Close new file (defer will handle it, but we need to close before rename)
  508. if err := tmpFile.Close(); err != nil {
  509. tmpFile = nil // Prevent double close in defer
  510. return err
  511. }
  512. tmpFile = nil // Prevent double close in defer
  513. // Close old file
  514. if err := s.logFile.Close(); err != nil {
  515. return err
  516. }
  517. s.logFile = nil // Prevent usage
  518. // Rename
  519. logPath := filepath.Join(s.dataDir, "log.bin")
  520. if err := os.Rename(tmpPath, logPath); err != nil {
  521. // Try to recover old file? It's still there as log.bin
  522. // Re-open old file
  523. f, openErr := os.OpenFile(logPath, os.O_RDWR|os.O_CREATE, 0644)
  524. if openErr == nil {
  525. s.logFile = f
  526. s.logWriter = bufio.NewWriterSize(s.logFile, 1024*1024)
  527. s.logFile.Seek(0, io.SeekEnd)
  528. }
  529. return err
  530. }
  531. // Re-open log file
  532. s.logFile, err = os.OpenFile(logPath, os.O_RDWR|os.O_CREATE, 0644)
  533. if err != nil {
  534. return err
  535. }
  536. s.logWriter = bufio.NewWriterSize(s.logFile, 1024*1024)
  537. // Seek to end
  538. if _, err := s.logFile.Seek(0, io.SeekEnd); err != nil {
  539. return err
  540. }
  541. // Update offsets
  542. s.entryOffsets = newOffsets
  543. s.logger.Info("Compacted log file successfully. New size: %d bytes", currentOffset)
  544. return nil
  545. }
  546. // loadSnapshot reads the snapshot from disk
  547. func (s *HybridStorage) loadSnapshot() ([]byte, uint64, uint64, error) {
  548. data, err := os.ReadFile(s.snapshotFile)
  549. if err != nil {
  550. return nil, 0, 0, err
  551. }
  552. if len(data) < 16 {
  553. return nil, 0, 0, ErrCorrupted
  554. }
  555. lastIndex := binary.BigEndian.Uint64(data[:8])
  556. lastTerm := binary.BigEndian.Uint64(data[8:16])
  557. snapData := data[16:]
  558. return snapData, lastIndex, lastTerm, nil
  559. }
  560. // GetSnapshot returns the current snapshot
  561. func (s *HybridStorage) GetSnapshot() ([]byte, uint64, uint64, error) {
  562. s.mu.RLock()
  563. defer s.mu.RUnlock()
  564. return s.loadSnapshot()
  565. }
  566. // SaveSnapshot saves a new snapshot
  567. func (s *HybridStorage) SaveSnapshot(data []byte, lastIndex, lastTerm uint64) error {
  568. s.mu.Lock()
  569. defer s.mu.Unlock()
  570. // Ensure data directory exists
  571. if err := os.MkdirAll(s.dataDir, 0755); err != nil {
  572. return fmt.Errorf("failed to create data directory: %w", err)
  573. }
  574. // Format: [8 bytes lastIndex][8 bytes lastTerm][snapshot data]
  575. buf := make([]byte, 16+len(data))
  576. binary.BigEndian.PutUint64(buf[:8], lastIndex)
  577. binary.BigEndian.PutUint64(buf[8:16], lastTerm)
  578. copy(buf[16:], data)
  579. // Write to temp file first
  580. tmpFile := s.snapshotFile + ".tmp"
  581. if err := os.WriteFile(tmpFile, buf, 0644); err != nil {
  582. return err
  583. }
  584. return os.Rename(tmpFile, s.snapshotFile)
  585. }
  586. // GetClusterConfig returns the current cluster configuration
  587. func (s *HybridStorage) GetClusterConfig() (*ClusterConfig, error) {
  588. s.mu.RLock()
  589. defer s.mu.RUnlock()
  590. if s.clusterCache != nil {
  591. return s.clusterCache, nil
  592. }
  593. return s.loadClusterConfig()
  594. }
  595. // loadClusterConfig reads the cluster configuration from disk
  596. func (s *HybridStorage) loadClusterConfig() (*ClusterConfig, error) {
  597. data, err := os.ReadFile(s.clusterFile)
  598. if err != nil {
  599. if os.IsNotExist(err) {
  600. return nil, nil // No config saved yet
  601. }
  602. return nil, err
  603. }
  604. var config ClusterConfig
  605. if err := json.Unmarshal(data, &config); err != nil {
  606. return nil, err
  607. }
  608. s.clusterCache = &config
  609. return &config, nil
  610. }
  611. // SaveClusterConfig persists the cluster configuration to disk
  612. func (s *HybridStorage) SaveClusterConfig(config *ClusterConfig) error {
  613. s.mu.Lock()
  614. defer s.mu.Unlock()
  615. // Ensure data directory exists
  616. if err := os.MkdirAll(s.dataDir, 0755); err != nil {
  617. return fmt.Errorf("failed to create data directory: %w", err)
  618. }
  619. data, err := json.Marshal(config)
  620. if err != nil {
  621. return err
  622. }
  623. // Write to temp file first for atomicity
  624. tmpFile := s.clusterFile + ".tmp"
  625. if err := os.WriteFile(tmpFile, data, 0644); err != nil {
  626. return err
  627. }
  628. if err := os.Rename(tmpFile, s.clusterFile); err != nil {
  629. return err
  630. }
  631. s.clusterCache = config
  632. return nil
  633. }
  634. // Close closes the storage
  635. func (s *HybridStorage) Close() error {
  636. s.mu.Lock()
  637. defer s.mu.Unlock()
  638. if s.logWriter != nil {
  639. s.logWriter.Flush()
  640. }
  641. if s.logFile != nil {
  642. return s.logFile.Close()
  643. }
  644. return nil
  645. }
  646. // Sync forces a sync to disk
  647. func (s *HybridStorage) Sync() error {
  648. s.mu.Lock()
  649. defer s.mu.Unlock()
  650. if s.logWriter != nil {
  651. if err := s.logWriter.Flush(); err != nil {
  652. return err
  653. }
  654. }
  655. if s.logFile != nil {
  656. return s.logFile.Sync()
  657. }
  658. return nil
  659. }
  660. // Flush writes buffered data to the operating system
  661. func (s *HybridStorage) Flush() error {
  662. s.mu.Lock()
  663. defer s.mu.Unlock()
  664. if s.logWriter != nil {
  665. return s.logWriter.Flush()
  666. }
  667. return nil
  668. }