server.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. package raft
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "sort"
  7. "strings"
  8. "sync"
  9. "time"
  10. "igit.com/xbase/raft/db"
  11. )
  12. // KVServer wraps Raft to provide a distributed key-value store
  13. type KVServer struct {
  14. Raft *Raft
  15. DB *db.Engine
  16. stopCh chan struct{}
  17. wg sync.WaitGroup
  18. stopOnce sync.Once
  19. }
  20. // NewKVServer creates a new KV server
  21. func NewKVServer(config *Config) (*KVServer, error) {
  22. // Initialize DB Engine
  23. // Use a subdirectory for DB to avoid conflict with Raft logs if they share DataDir
  24. dbPath := config.DataDir + "/kv_engine"
  25. engine, err := db.NewEngine(dbPath)
  26. if err != nil {
  27. return nil, fmt.Errorf("failed to create db engine: %w", err)
  28. }
  29. // Initialize LastAppliedIndex from DB to prevent re-applying entries
  30. config.LastAppliedIndex = engine.GetLastAppliedIndex()
  31. // Create stop channel early for use in callbacks
  32. stopCh := make(chan struct{})
  33. // Configure snapshot provider
  34. config.SnapshotProvider = func(minIncludeIndex uint64) ([]byte, error) {
  35. // Wait for DB to catch up to the requested index
  36. // This is critical for data integrity during compaction
  37. for engine.GetLastAppliedIndex() < minIncludeIndex {
  38. select {
  39. case <-stopCh:
  40. return nil, fmt.Errorf("server stopping")
  41. default:
  42. time.Sleep(10 * time.Millisecond)
  43. }
  44. }
  45. // Force sync to disk to ensure data durability before compaction
  46. // This prevents data loss if Raft logs are compacted but DB data is only in OS cache
  47. if err := engine.Sync(); err != nil {
  48. return nil, fmt.Errorf("failed to sync engine before snapshot: %w", err)
  49. }
  50. return engine.Snapshot()
  51. }
  52. // Configure get handler for remote reads
  53. config.GetHandler = func(key string) (string, bool) {
  54. return engine.Get(key)
  55. }
  56. applyCh := make(chan ApplyMsg, 1000) // Increase buffer for async processing
  57. transport := NewTCPTransport(config.ListenAddr, 10, config.Logger)
  58. r, err := NewRaft(config, transport, applyCh)
  59. if err != nil {
  60. engine.Close()
  61. return nil, err
  62. }
  63. s := &KVServer{
  64. Raft: r,
  65. DB: engine,
  66. stopCh: stopCh,
  67. }
  68. // Start applying entries
  69. go s.runApplyLoop(applyCh)
  70. // Start background maintenance loop
  71. s.wg.Add(1)
  72. go s.maintenanceLoop()
  73. return s, nil
  74. }
  75. func (s *KVServer) Start() error {
  76. return s.Raft.Start()
  77. }
  78. func (s *KVServer) Stop() error {
  79. var err error
  80. s.stopOnce.Do(func() {
  81. // Stop maintenance loop
  82. if s.stopCh != nil {
  83. close(s.stopCh)
  84. s.wg.Wait()
  85. }
  86. // Stop Raft first
  87. if errRaft := s.Raft.Stop(); errRaft != nil {
  88. err = errRaft
  89. }
  90. // Close DB
  91. if s.DB != nil {
  92. if errDB := s.DB.Close(); errDB != nil {
  93. // Combine errors if both fail
  94. if err != nil {
  95. err = fmt.Errorf("raft stop error: %v, db close error: %v", err, errDB)
  96. } else {
  97. err = errDB
  98. }
  99. }
  100. }
  101. })
  102. return err
  103. }
  104. func (s *KVServer) runApplyLoop(applyCh chan ApplyMsg) {
  105. for msg := range applyCh {
  106. if msg.CommandValid {
  107. // Optimization: Skip if already applied
  108. // We check this here to avoid unmarshalling and locking DB for known duplicates
  109. if msg.CommandIndex <= s.DB.GetLastAppliedIndex() {
  110. continue
  111. }
  112. var cmd KVCommand
  113. if err := json.Unmarshal(msg.Command, &cmd); err != nil {
  114. s.Raft.config.Logger.Error("Failed to unmarshal command: %v", err)
  115. continue
  116. }
  117. var err error
  118. switch cmd.Type {
  119. case KVSet:
  120. err = s.DB.Set(cmd.Key, cmd.Value, msg.CommandIndex)
  121. case KVDel:
  122. err = s.DB.Delete(cmd.Key, msg.CommandIndex)
  123. default:
  124. s.Raft.config.Logger.Error("Unknown command type: %d", cmd.Type)
  125. }
  126. if err != nil {
  127. s.Raft.config.Logger.Error("DB Apply failed: %v", err)
  128. }
  129. } else if msg.SnapshotValid {
  130. if err := s.DB.Restore(msg.Snapshot); err != nil {
  131. s.Raft.config.Logger.Error("DB Restore failed: %v", err)
  132. }
  133. }
  134. }
  135. }
  136. // Set sets a key-value pair
  137. func (s *KVServer) Set(key, value string) error {
  138. cmd := KVCommand{
  139. Type: KVSet,
  140. Key: key,
  141. Value: value,
  142. }
  143. data, err := json.Marshal(cmd)
  144. if err != nil {
  145. return err
  146. }
  147. _, _, err = s.Raft.ProposeWithForward(data)
  148. return err
  149. }
  150. // Del deletes a key
  151. func (s *KVServer) Del(key string) error {
  152. cmd := KVCommand{
  153. Type: KVDel,
  154. Key: key,
  155. }
  156. data, err := json.Marshal(cmd)
  157. if err != nil {
  158. return err
  159. }
  160. _, _, err = s.Raft.ProposeWithForward(data)
  161. return err
  162. }
  163. // Get gets a value (local read, can be stale)
  164. // For linearizable reads, use GetLinear instead
  165. func (s *KVServer) Get(key string) (string, bool) {
  166. return s.DB.Get(key)
  167. }
  168. // GetLinear gets a value with linearizable consistency
  169. // This ensures the read sees all writes committed before the read started
  170. func (s *KVServer) GetLinear(key string) (string, bool, error) {
  171. // First, ensure we have up-to-date data via ReadIndex
  172. _, err := s.Raft.ReadIndex()
  173. if err != nil {
  174. // If we're not leader, try forwarding
  175. if errors.Is(err, ErrNotLeader) {
  176. return s.forwardGet(key)
  177. }
  178. return "", false, err
  179. }
  180. val, ok := s.DB.Get(key)
  181. return val, ok, nil
  182. }
  183. // forwardGet forwards a get request to the leader
  184. func (s *KVServer) forwardGet(key string) (string, bool, error) {
  185. return s.Raft.ForwardGet(key)
  186. }
  187. // Join joins an existing cluster
  188. func (s *KVServer) Join(nodeID, addr string) error {
  189. return s.Raft.AddNodeWithForward(nodeID, addr)
  190. }
  191. // Leave leaves the cluster
  192. func (s *KVServer) Leave(nodeID string) error {
  193. return s.Raft.RemoveNodeWithForward(nodeID)
  194. }
  195. // WaitForLeader waits until a leader is elected
  196. func (s *KVServer) WaitForLeader(timeout time.Duration) error {
  197. deadline := time.Now().Add(timeout)
  198. for time.Now().Before(deadline) {
  199. leader := s.Raft.GetLeaderID()
  200. if leader != "" {
  201. return nil
  202. }
  203. time.Sleep(100 * time.Millisecond)
  204. }
  205. return fmt.Errorf("timeout waiting for leader")
  206. }
  207. // HealthCheck returns the health status of this server
  208. func (s *KVServer) HealthCheck() HealthStatus {
  209. return s.Raft.HealthCheck()
  210. }
  211. // GetStats returns runtime statistics
  212. func (s *KVServer) GetStats() Stats {
  213. return s.Raft.GetStats()
  214. }
  215. // GetMetrics returns runtime metrics
  216. func (s *KVServer) GetMetrics() Metrics {
  217. return s.Raft.GetMetrics()
  218. }
  219. // TransferLeadership transfers leadership to the specified node
  220. func (s *KVServer) TransferLeadership(targetID string) error {
  221. return s.Raft.TransferLeadership(targetID)
  222. }
  223. // GetClusterNodes returns current cluster membership
  224. func (s *KVServer) GetClusterNodes() map[string]string {
  225. return s.Raft.GetClusterNodes()
  226. }
  227. // IsLeader returns true if this node is the leader
  228. func (s *KVServer) IsLeader() bool {
  229. _, isLeader := s.Raft.GetState()
  230. return isLeader
  231. }
  232. // GetLeaderID returns the current leader ID
  233. func (s *KVServer) GetLeaderID() string {
  234. return s.Raft.GetLeaderID()
  235. }
  236. // WatchAll registers a watcher for all keys
  237. func (s *KVServer) WatchAll(handler WatchHandler) {
  238. // s.FSM.WatchAll(handler)
  239. // TODO: Implement Watcher for DB
  240. }
  241. // Watch registers a watcher for a key
  242. func (s *KVServer) Watch(key string, handler WatchHandler) {
  243. // s.FSM.Watch(key, handler)
  244. // TODO: Implement Watcher for DB
  245. }
  246. // Unwatch removes watchers for a key
  247. func (s *KVServer) Unwatch(key string) {
  248. // s.FSM.Unwatch(key)
  249. // TODO: Implement Watcher for DB
  250. }
  251. func (s *KVServer) maintenanceLoop() {
  252. defer s.wg.Done()
  253. // Check every 1 second for faster reaction
  254. ticker := time.NewTicker(1 * time.Second)
  255. defer ticker.Stop()
  256. for {
  257. select {
  258. case <-s.stopCh:
  259. return
  260. case <-ticker.C:
  261. s.updateNodeInfo()
  262. s.checkConnections()
  263. }
  264. }
  265. }
  266. func (s *KVServer) updateNodeInfo() {
  267. // 1. Ensure "CreateNode/<NodeID>" is set to self address
  268. // We do this via Propose (Set) so it's replicated
  269. myID := s.Raft.config.NodeID
  270. myAddr := s.Raft.config.ListenAddr
  271. key := fmt.Sprintf("CreateNode/%s", myID)
  272. // Check if we need to update (avoid spamming logs/proposals)
  273. val, exists := s.Get(key)
  274. if !exists || val != myAddr {
  275. // Run in goroutine to avoid blocking
  276. go func() {
  277. if err := s.Set(key, myAddr); err != nil {
  278. s.Raft.config.Logger.Debug("Failed to update node info: %v", err)
  279. }
  280. }()
  281. }
  282. // 2. Only leader updates RaftNode aggregation
  283. if s.IsLeader() {
  284. // Read current RaftNode to preserve history
  285. currentVal, _ := s.Get("RaftNode")
  286. knownNodes := make(map[string]string)
  287. if currentVal != "" {
  288. parts := strings.Split(currentVal, ";")
  289. for _, part := range parts {
  290. if part == "" { continue }
  291. kv := strings.SplitN(part, "=", 2)
  292. if len(kv) == 2 {
  293. knownNodes[kv[0]] = kv[1]
  294. }
  295. }
  296. }
  297. // Merge current cluster nodes
  298. changed := false
  299. currentCluster := s.GetClusterNodes()
  300. for id, addr := range currentCluster {
  301. if knownNodes[id] != addr {
  302. knownNodes[id] = addr
  303. changed = true
  304. }
  305. }
  306. // If changed, update RaftNode
  307. if changed {
  308. var peers []string
  309. for id, addr := range knownNodes {
  310. peers = append(peers, fmt.Sprintf("%s=%s", id, addr))
  311. }
  312. sort.Strings(peers)
  313. newVal := strings.Join(peers, ";")
  314. // Check again if we need to write to avoid loops if Get returned stale
  315. if newVal != currentVal {
  316. go func(k, v string) {
  317. if err := s.Set(k, v); err != nil {
  318. s.Raft.config.Logger.Warn("Failed to update RaftNode key: %v", err)
  319. }
  320. }("RaftNode", newVal)
  321. }
  322. }
  323. }
  324. }
  325. func (s *KVServer) checkConnections() {
  326. if !s.IsLeader() {
  327. return
  328. }
  329. // Read RaftNode key to find potential members that are missing
  330. val, ok := s.Get("RaftNode")
  331. if !ok || val == "" {
  332. return
  333. }
  334. // Parse saved nodes
  335. savedParts := strings.Split(val, ";")
  336. currentNodes := s.GetClusterNodes()
  337. // Invert currentNodes for address check
  338. currentAddrs := make(map[string]bool)
  339. for _, addr := range currentNodes {
  340. currentAddrs[addr] = true
  341. }
  342. for _, part := range savedParts {
  343. if part == "" {
  344. continue
  345. }
  346. // Expect id=addr
  347. kv := strings.SplitN(part, "=", 2)
  348. if len(kv) != 2 {
  349. continue
  350. }
  351. id, addr := kv[0], kv[1]
  352. if !currentAddrs[addr] {
  353. // Found a node that was previously in the cluster but is now missing
  354. // Try to add it back
  355. // We use AddNodeWithForward which handles non-blocking internally somewhat,
  356. // but we should run this in goroutine to not block the loop
  357. go func(nodeID, nodeAddr string) {
  358. // Try to add node
  359. s.Raft.config.Logger.Info("Auto-rejoining node found in RaftNode: %s (%s)", nodeID, nodeAddr)
  360. if err := s.Join(nodeID, nodeAddr); err != nil {
  361. s.Raft.config.Logger.Debug("Failed to auto-rejoin node %s: %v", nodeID, err)
  362. }
  363. }(id, addr)
  364. }
  365. }
  366. }