watchable_store_test.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "bytes"
  17. "fmt"
  18. "os"
  19. "reflect"
  20. "sync"
  21. "testing"
  22. "time"
  23. "github.com/coreos/etcd/lease"
  24. "github.com/coreos/etcd/mvcc/backend"
  25. "github.com/coreos/etcd/mvcc/mvccpb"
  26. )
  27. func TestWatch(t *testing.T) {
  28. b, tmpPath := backend.NewDefaultTmpBackend()
  29. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  30. defer func() {
  31. s.store.Close()
  32. os.Remove(tmpPath)
  33. }()
  34. testKey := []byte("foo")
  35. testValue := []byte("bar")
  36. s.Put(testKey, testValue, lease.NoLease)
  37. w := s.NewWatchStream()
  38. w.Watch(testKey, nil, 0)
  39. if !s.synced.contains(string(testKey)) {
  40. // the key must have had an entry in synced
  41. t.Errorf("existence = false, want true")
  42. }
  43. }
  44. func TestNewWatcherCancel(t *testing.T) {
  45. b, tmpPath := backend.NewDefaultTmpBackend()
  46. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  47. defer func() {
  48. s.store.Close()
  49. os.Remove(tmpPath)
  50. }()
  51. testKey := []byte("foo")
  52. testValue := []byte("bar")
  53. s.Put(testKey, testValue, lease.NoLease)
  54. w := s.NewWatchStream()
  55. wt := w.Watch(testKey, nil, 0)
  56. if err := w.Cancel(wt); err != nil {
  57. t.Error(err)
  58. }
  59. if s.synced.contains(string(testKey)) {
  60. // the key shoud have been deleted
  61. t.Errorf("existence = true, want false")
  62. }
  63. }
  64. // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
  65. func TestCancelUnsynced(t *testing.T) {
  66. b, tmpPath := backend.NewDefaultTmpBackend()
  67. // manually create watchableStore instead of newWatchableStore
  68. // because newWatchableStore automatically calls syncWatchers
  69. // method to sync watchers in unsynced map. We want to keep watchers
  70. // in unsynced to test if syncWatchers works as expected.
  71. s := &watchableStore{
  72. store: NewStore(b, &lease.FakeLessor{}, nil),
  73. unsynced: newWatcherGroup(),
  74. // to make the test not crash from assigning to nil map.
  75. // 'synced' doesn't get populated in this test.
  76. synced: newWatcherGroup(),
  77. }
  78. defer func() {
  79. s.store.Close()
  80. os.Remove(tmpPath)
  81. }()
  82. // Put a key so that we can spawn watchers on that key.
  83. // (testKey in this test). This increases the rev to 1,
  84. // and later we can we set the watcher's startRev to 1,
  85. // and force watchers to be in unsynced.
  86. testKey := []byte("foo")
  87. testValue := []byte("bar")
  88. s.Put(testKey, testValue, lease.NoLease)
  89. w := s.NewWatchStream()
  90. // arbitrary number for watchers
  91. watcherN := 100
  92. // create watcherN of watch ids to cancel
  93. watchIDs := make([]WatchID, watcherN)
  94. for i := 0; i < watcherN; i++ {
  95. // use 1 to keep watchers in unsynced
  96. watchIDs[i] = w.Watch(testKey, nil, 1)
  97. }
  98. for _, idx := range watchIDs {
  99. if err := w.Cancel(idx); err != nil {
  100. t.Error(err)
  101. }
  102. }
  103. // After running CancelFunc
  104. //
  105. // unsynced should be empty
  106. // because cancel removes watcher from unsynced
  107. if size := s.unsynced.size(); size != 0 {
  108. t.Errorf("unsynced size = %d, want 0", size)
  109. }
  110. }
  111. // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
  112. // method to see if it correctly sends events to channel of unsynced watchers
  113. // and moves these watchers to synced.
  114. func TestSyncWatchers(t *testing.T) {
  115. b, tmpPath := backend.NewDefaultTmpBackend()
  116. s := &watchableStore{
  117. store: NewStore(b, &lease.FakeLessor{}, nil),
  118. unsynced: newWatcherGroup(),
  119. synced: newWatcherGroup(),
  120. }
  121. defer func() {
  122. s.store.Close()
  123. os.Remove(tmpPath)
  124. }()
  125. testKey := []byte("foo")
  126. testValue := []byte("bar")
  127. s.Put(testKey, testValue, lease.NoLease)
  128. w := s.NewWatchStream()
  129. // arbitrary number for watchers
  130. watcherN := 100
  131. for i := 0; i < watcherN; i++ {
  132. // specify rev as 1 to keep watchers in unsynced
  133. w.Watch(testKey, nil, 1)
  134. }
  135. // Before running s.syncWatchers() synced should be empty because we manually
  136. // populate unsynced only
  137. sws := s.synced.watcherSetByKey(string(testKey))
  138. uws := s.unsynced.watcherSetByKey(string(testKey))
  139. if len(sws) != 0 {
  140. t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws))
  141. }
  142. // unsynced should not be empty because we manually populated unsynced only
  143. if len(uws) != watcherN {
  144. t.Errorf("unsynced size = %d, want %d", len(uws), watcherN)
  145. }
  146. // this should move all unsynced watchers to synced ones
  147. s.syncWatchers()
  148. sws = s.synced.watcherSetByKey(string(testKey))
  149. uws = s.unsynced.watcherSetByKey(string(testKey))
  150. // After running s.syncWatchers(), synced should not be empty because syncwatchers
  151. // populates synced in this test case
  152. if len(sws) != watcherN {
  153. t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN)
  154. }
  155. // unsynced should be empty because syncwatchers is expected to move all watchers
  156. // from unsynced to synced in this test case
  157. if len(uws) != 0 {
  158. t.Errorf("unsynced size = %d, want 0", len(uws))
  159. }
  160. for w := range sws {
  161. if w.minRev != s.Rev()+1 {
  162. t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1)
  163. }
  164. }
  165. if len(w.(*watchStream).ch) != watcherN {
  166. t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
  167. }
  168. evs := (<-w.(*watchStream).ch).Events
  169. if len(evs) != 1 {
  170. t.Errorf("len(evs) got = %d, want = 1", len(evs))
  171. }
  172. if evs[0].Type != mvccpb.PUT {
  173. t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT)
  174. }
  175. if !bytes.Equal(evs[0].Kv.Key, testKey) {
  176. t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
  177. }
  178. if !bytes.Equal(evs[0].Kv.Value, testValue) {
  179. t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
  180. }
  181. }
  182. // TestWatchCompacted tests a watcher that watches on a compacted revision.
  183. func TestWatchCompacted(t *testing.T) {
  184. b, tmpPath := backend.NewDefaultTmpBackend()
  185. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  186. defer func() {
  187. s.store.Close()
  188. os.Remove(tmpPath)
  189. }()
  190. testKey := []byte("foo")
  191. testValue := []byte("bar")
  192. maxRev := 10
  193. compactRev := int64(5)
  194. for i := 0; i < maxRev; i++ {
  195. s.Put(testKey, testValue, lease.NoLease)
  196. }
  197. _, err := s.Compact(compactRev)
  198. if err != nil {
  199. t.Fatalf("failed to compact kv (%v)", err)
  200. }
  201. w := s.NewWatchStream()
  202. wt := w.Watch(testKey, nil, compactRev-1)
  203. select {
  204. case resp := <-w.Chan():
  205. if resp.WatchID != wt {
  206. t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
  207. }
  208. if resp.CompactRevision == 0 {
  209. t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
  210. }
  211. case <-time.After(1 * time.Second):
  212. t.Fatalf("failed to receive response (timeout)")
  213. }
  214. }
  215. func TestWatchFutureRev(t *testing.T) {
  216. b, tmpPath := backend.NewDefaultTmpBackend()
  217. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  218. defer func() {
  219. s.store.Close()
  220. os.Remove(tmpPath)
  221. }()
  222. testKey := []byte("foo")
  223. testValue := []byte("bar")
  224. w := s.NewWatchStream()
  225. wrev := int64(10)
  226. w.Watch(testKey, nil, wrev)
  227. for i := 0; i < 10; i++ {
  228. rev := s.Put(testKey, testValue, lease.NoLease)
  229. if rev >= wrev {
  230. break
  231. }
  232. }
  233. select {
  234. case resp := <-w.Chan():
  235. if resp.Revision != wrev {
  236. t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
  237. }
  238. if len(resp.Events) != 1 {
  239. t.Fatalf("failed to get events from the response")
  240. }
  241. if resp.Events[0].Kv.ModRevision != wrev {
  242. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
  243. }
  244. case <-time.After(time.Second):
  245. t.Fatal("failed to receive event in 1 second.")
  246. }
  247. }
  248. func TestWatchRestore(t *testing.T) {
  249. test := func(delay time.Duration) func(t *testing.T) {
  250. return func(t *testing.T) {
  251. b, tmpPath := backend.NewDefaultTmpBackend()
  252. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  253. defer cleanup(s, b, tmpPath)
  254. testKey := []byte("foo")
  255. testValue := []byte("bar")
  256. rev := s.Put(testKey, testValue, lease.NoLease)
  257. newBackend, newPath := backend.NewDefaultTmpBackend()
  258. newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil)
  259. defer cleanup(newStore, newBackend, newPath)
  260. w := newStore.NewWatchStream()
  261. w.Watch(testKey, nil, rev-1)
  262. time.Sleep(delay)
  263. newStore.Restore(b)
  264. select {
  265. case resp := <-w.Chan():
  266. if resp.Revision != rev {
  267. t.Fatalf("rev = %d, want %d", resp.Revision, rev)
  268. }
  269. if len(resp.Events) != 1 {
  270. t.Fatalf("failed to get events from the response")
  271. }
  272. if resp.Events[0].Kv.ModRevision != rev {
  273. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
  274. }
  275. case <-time.After(time.Second):
  276. t.Fatal("failed to receive event in 1 second.")
  277. }
  278. }
  279. }
  280. t.Run("Normal", test(0))
  281. t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration
  282. }
  283. // TestWatchRestoreSyncedWatcher tests such a case that:
  284. // 1. watcher is created with a future revision "math.MaxInt64 - 2"
  285. // 2. watcher with a future revision is added to "synced" watcher group
  286. // 3. restore/overwrite storage with snapshot of a higher lasat revision
  287. // 4. restore operation moves "synced" to "unsynced" watcher group
  288. // 5. choose the watcher from step 1, without panic
  289. func TestWatchRestoreSyncedWatcher(t *testing.T) {
  290. b1, b1Path := backend.NewDefaultTmpBackend()
  291. s1 := newWatchableStore(b1, &lease.FakeLessor{}, nil)
  292. defer cleanup(s1, b1, b1Path)
  293. b2, b2Path := backend.NewDefaultTmpBackend()
  294. s2 := newWatchableStore(b2, &lease.FakeLessor{}, nil)
  295. defer cleanup(s2, b2, b2Path)
  296. testKey, testValue := []byte("foo"), []byte("bar")
  297. rev := s1.Put(testKey, testValue, lease.NoLease)
  298. startRev := rev + 2
  299. // create a watcher with a future revision
  300. // add to "synced" watcher group (startRev > s.store.currentRev)
  301. w1 := s1.NewWatchStream()
  302. w1.Watch(testKey, nil, startRev)
  303. // make "s2" ends up with a higher last revision
  304. s2.Put(testKey, testValue, lease.NoLease)
  305. s2.Put(testKey, testValue, lease.NoLease)
  306. // overwrite storage with higher revisions
  307. if err := s1.Restore(b2); err != nil {
  308. t.Fatal(err)
  309. }
  310. // wait for next "syncWatchersLoop" iteration
  311. // and the unsynced watcher should be chosen
  312. time.Sleep(2 * time.Second)
  313. // trigger events for "startRev"
  314. s1.Put(testKey, testValue, lease.NoLease)
  315. select {
  316. case resp := <-w1.Chan():
  317. if resp.Revision != startRev {
  318. t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision)
  319. }
  320. if len(resp.Events) != 1 {
  321. t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events))
  322. }
  323. if resp.Events[0].Kv.ModRevision != startRev {
  324. t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision)
  325. }
  326. case <-time.After(time.Second):
  327. t.Fatal("failed to receive event in 1 second")
  328. }
  329. }
  330. // TestWatchBatchUnsynced tests batching on unsynced watchers
  331. func TestWatchBatchUnsynced(t *testing.T) {
  332. b, tmpPath := backend.NewDefaultTmpBackend()
  333. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  334. oldMaxRevs := watchBatchMaxRevs
  335. defer func() {
  336. watchBatchMaxRevs = oldMaxRevs
  337. s.store.Close()
  338. os.Remove(tmpPath)
  339. }()
  340. batches := 3
  341. watchBatchMaxRevs = 4
  342. v := []byte("foo")
  343. for i := 0; i < watchBatchMaxRevs*batches; i++ {
  344. s.Put(v, v, lease.NoLease)
  345. }
  346. w := s.NewWatchStream()
  347. w.Watch(v, nil, 1)
  348. for i := 0; i < batches; i++ {
  349. if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
  350. t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
  351. }
  352. }
  353. s.store.revMu.Lock()
  354. defer s.store.revMu.Unlock()
  355. if size := s.synced.size(); size != 1 {
  356. t.Errorf("synced size = %d, want 1", size)
  357. }
  358. }
  359. func TestNewMapwatcherToEventMap(t *testing.T) {
  360. k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
  361. v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
  362. ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
  363. evs := []mvccpb.Event{
  364. {
  365. Type: mvccpb.PUT,
  366. Kv: &mvccpb.KeyValue{Key: k0, Value: v0},
  367. },
  368. {
  369. Type: mvccpb.PUT,
  370. Kv: &mvccpb.KeyValue{Key: k1, Value: v1},
  371. },
  372. {
  373. Type: mvccpb.PUT,
  374. Kv: &mvccpb.KeyValue{Key: k2, Value: v2},
  375. },
  376. }
  377. tests := []struct {
  378. sync []*watcher
  379. evs []mvccpb.Event
  380. wwe map[*watcher][]mvccpb.Event
  381. }{
  382. // no watcher in sync, some events should return empty wwe
  383. {
  384. nil,
  385. evs,
  386. map[*watcher][]mvccpb.Event{},
  387. },
  388. // one watcher in sync, one event that does not match the key of that
  389. // watcher should return empty wwe
  390. {
  391. []*watcher{ws[2]},
  392. evs[:1],
  393. map[*watcher][]mvccpb.Event{},
  394. },
  395. // one watcher in sync, one event that matches the key of that
  396. // watcher should return wwe with that matching watcher
  397. {
  398. []*watcher{ws[1]},
  399. evs[1:2],
  400. map[*watcher][]mvccpb.Event{
  401. ws[1]: evs[1:2],
  402. },
  403. },
  404. // two watchers in sync that watches two different keys, one event
  405. // that matches the key of only one of the watcher should return wwe
  406. // with the matching watcher
  407. {
  408. []*watcher{ws[0], ws[2]},
  409. evs[2:],
  410. map[*watcher][]mvccpb.Event{
  411. ws[2]: evs[2:],
  412. },
  413. },
  414. // two watchers in sync that watches the same key, two events that
  415. // match the keys should return wwe with those two watchers
  416. {
  417. []*watcher{ws[0], ws[1]},
  418. evs[:2],
  419. map[*watcher][]mvccpb.Event{
  420. ws[0]: evs[:1],
  421. ws[1]: evs[1:2],
  422. },
  423. },
  424. }
  425. for i, tt := range tests {
  426. wg := newWatcherGroup()
  427. for _, w := range tt.sync {
  428. wg.add(w)
  429. }
  430. gwe := newWatcherBatch(&wg, tt.evs)
  431. if len(gwe) != len(tt.wwe) {
  432. t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
  433. }
  434. // compare gwe and tt.wwe
  435. for w, eb := range gwe {
  436. if len(eb.evs) != len(tt.wwe[w]) {
  437. t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
  438. }
  439. if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
  440. t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
  441. }
  442. }
  443. }
  444. }
  445. // TestWatchVictims tests that watchable store delivers watch events
  446. // when the watch channel is temporarily clogged with too many events.
  447. func TestWatchVictims(t *testing.T) {
  448. oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
  449. b, tmpPath := backend.NewDefaultTmpBackend()
  450. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  451. defer func() {
  452. s.store.Close()
  453. os.Remove(tmpPath)
  454. chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
  455. }()
  456. chanBufLen, maxWatchersPerSync = 1, 2
  457. numPuts := chanBufLen * 64
  458. testKey, testValue := []byte("foo"), []byte("bar")
  459. var wg sync.WaitGroup
  460. numWatches := maxWatchersPerSync * 128
  461. errc := make(chan error, numWatches)
  462. wg.Add(numWatches)
  463. for i := 0; i < numWatches; i++ {
  464. go func() {
  465. w := s.NewWatchStream()
  466. w.Watch(testKey, nil, 1)
  467. defer func() {
  468. w.Close()
  469. wg.Done()
  470. }()
  471. tc := time.After(10 * time.Second)
  472. evs, nextRev := 0, int64(2)
  473. for evs < numPuts {
  474. select {
  475. case <-tc:
  476. errc <- fmt.Errorf("time out")
  477. return
  478. case wr := <-w.Chan():
  479. evs += len(wr.Events)
  480. for _, ev := range wr.Events {
  481. if ev.Kv.ModRevision != nextRev {
  482. errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision)
  483. return
  484. }
  485. nextRev++
  486. }
  487. time.Sleep(time.Millisecond)
  488. }
  489. }
  490. if evs != numPuts {
  491. errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs)
  492. return
  493. }
  494. select {
  495. case <-w.Chan():
  496. errc <- fmt.Errorf("unexpected response")
  497. default:
  498. }
  499. }()
  500. time.Sleep(time.Millisecond)
  501. }
  502. var wgPut sync.WaitGroup
  503. wgPut.Add(numPuts)
  504. for i := 0; i < numPuts; i++ {
  505. go func() {
  506. defer wgPut.Done()
  507. s.Put(testKey, testValue, lease.NoLease)
  508. }()
  509. }
  510. wgPut.Wait()
  511. wg.Wait()
  512. select {
  513. case err := <-errc:
  514. t.Fatal(err)
  515. default:
  516. }
  517. }
  518. // TestStressWatchCancelClose tests closing a watch stream while
  519. // canceling its watches.
  520. func TestStressWatchCancelClose(t *testing.T) {
  521. b, tmpPath := backend.NewDefaultTmpBackend()
  522. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  523. defer func() {
  524. s.store.Close()
  525. os.Remove(tmpPath)
  526. }()
  527. testKey, testValue := []byte("foo"), []byte("bar")
  528. var wg sync.WaitGroup
  529. readyc := make(chan struct{})
  530. wg.Add(100)
  531. for i := 0; i < 100; i++ {
  532. go func() {
  533. defer wg.Done()
  534. w := s.NewWatchStream()
  535. ids := make([]WatchID, 10)
  536. for i := range ids {
  537. ids[i] = w.Watch(testKey, nil, 0)
  538. }
  539. <-readyc
  540. wg.Add(1 + len(ids)/2)
  541. for i := range ids[:len(ids)/2] {
  542. go func(n int) {
  543. defer wg.Done()
  544. w.Cancel(ids[n])
  545. }(i)
  546. }
  547. go func() {
  548. defer wg.Done()
  549. w.Close()
  550. }()
  551. }()
  552. }
  553. close(readyc)
  554. for i := 0; i < 100; i++ {
  555. s.Put(testKey, testValue, lease.NoLease)
  556. }
  557. wg.Wait()
  558. }