sample_test.go 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. package metrics
  2. import (
  3. "math/rand"
  4. "runtime"
  5. "testing"
  6. "time"
  7. )
  8. // Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
  9. // expensive computations like Variance, the cost of copying the Sample, as
  10. // approximated by a make and copy, is much greater than the cost of the
  11. // computation for small samples and only slightly less for large samples.
  12. func BenchmarkCompute1000(b *testing.B) {
  13. s := make([]int64, 1000)
  14. for i := 0; i < len(s); i++ {
  15. s[i] = int64(i)
  16. }
  17. b.ResetTimer()
  18. for i := 0; i < b.N; i++ {
  19. SampleVariance(s)
  20. }
  21. }
  22. func BenchmarkCompute1000000(b *testing.B) {
  23. s := make([]int64, 1000000)
  24. for i := 0; i < len(s); i++ {
  25. s[i] = int64(i)
  26. }
  27. b.ResetTimer()
  28. for i := 0; i < b.N; i++ {
  29. SampleVariance(s)
  30. }
  31. }
  32. func BenchmarkCopy1000(b *testing.B) {
  33. s := make([]int64, 1000)
  34. for i := 0; i < len(s); i++ {
  35. s[i] = int64(i)
  36. }
  37. b.ResetTimer()
  38. for i := 0; i < b.N; i++ {
  39. sCopy := make([]int64, len(s))
  40. copy(sCopy, s)
  41. }
  42. }
  43. func BenchmarkCopy1000000(b *testing.B) {
  44. s := make([]int64, 1000000)
  45. for i := 0; i < len(s); i++ {
  46. s[i] = int64(i)
  47. }
  48. b.ResetTimer()
  49. for i := 0; i < b.N; i++ {
  50. sCopy := make([]int64, len(s))
  51. copy(sCopy, s)
  52. }
  53. }
  54. func BenchmarkExpDecaySample257(b *testing.B) {
  55. benchmarkSample(b, NewExpDecaySample(257, 0.015))
  56. }
  57. func BenchmarkExpDecaySample514(b *testing.B) {
  58. benchmarkSample(b, NewExpDecaySample(514, 0.015))
  59. }
  60. func BenchmarkExpDecaySample1028(b *testing.B) {
  61. benchmarkSample(b, NewExpDecaySample(1028, 0.015))
  62. }
  63. func BenchmarkUniformSample257(b *testing.B) {
  64. benchmarkSample(b, NewUniformSample(257))
  65. }
  66. func BenchmarkUniformSample514(b *testing.B) {
  67. benchmarkSample(b, NewUniformSample(514))
  68. }
  69. func BenchmarkUniformSample1028(b *testing.B) {
  70. benchmarkSample(b, NewUniformSample(1028))
  71. }
  72. func TestExpDecaySample10(t *testing.T) {
  73. rand.Seed(1)
  74. s := NewExpDecaySample(100, 0.99)
  75. for i := 0; i < 10; i++ {
  76. s.Update(int64(i))
  77. }
  78. if size := s.Count(); 10 != size {
  79. t.Errorf("s.Count(): 10 != %v\n", size)
  80. }
  81. if size := s.Size(); 10 != size {
  82. t.Errorf("s.Size(): 10 != %v\n", size)
  83. }
  84. if l := len(s.Values()); 10 != l {
  85. t.Errorf("len(s.Values()): 10 != %v\n", l)
  86. }
  87. for _, v := range s.Values() {
  88. if v > 10 || v < 0 {
  89. t.Errorf("out of range [0, 10): %v\n", v)
  90. }
  91. }
  92. }
  93. func TestExpDecaySample100(t *testing.T) {
  94. rand.Seed(1)
  95. s := NewExpDecaySample(1000, 0.01)
  96. for i := 0; i < 100; i++ {
  97. s.Update(int64(i))
  98. }
  99. if size := s.Count(); 100 != size {
  100. t.Errorf("s.Count(): 100 != %v\n", size)
  101. }
  102. if size := s.Size(); 100 != size {
  103. t.Errorf("s.Size(): 100 != %v\n", size)
  104. }
  105. if l := len(s.Values()); 100 != l {
  106. t.Errorf("len(s.Values()): 100 != %v\n", l)
  107. }
  108. for _, v := range s.Values() {
  109. if v > 100 || v < 0 {
  110. t.Errorf("out of range [0, 100): %v\n", v)
  111. }
  112. }
  113. }
  114. func TestExpDecaySample1000(t *testing.T) {
  115. rand.Seed(1)
  116. s := NewExpDecaySample(100, 0.99)
  117. for i := 0; i < 1000; i++ {
  118. s.Update(int64(i))
  119. }
  120. if size := s.Count(); 1000 != size {
  121. t.Errorf("s.Count(): 1000 != %v\n", size)
  122. }
  123. if size := s.Size(); 100 != size {
  124. t.Errorf("s.Size(): 100 != %v\n", size)
  125. }
  126. if l := len(s.Values()); 100 != l {
  127. t.Errorf("len(s.Values()): 100 != %v\n", l)
  128. }
  129. for _, v := range s.Values() {
  130. if v > 1000 || v < 0 {
  131. t.Errorf("out of range [0, 1000): %v\n", v)
  132. }
  133. }
  134. }
  135. // This test makes sure that the sample's priority is not amplified by using
  136. // nanosecond duration since start rather than second duration since start.
  137. // The priority becomes +Inf quickly after starting if this is done,
  138. // effectively freezing the set of samples until a rescale step happens.
  139. func TestExpDecaySampleNanosecondRegression(t *testing.T) {
  140. rand.Seed(1)
  141. s := NewExpDecaySample(100, 0.99)
  142. for i := 0; i < 100; i++ {
  143. s.Update(10)
  144. }
  145. time.Sleep(1 * time.Millisecond)
  146. for i := 0; i < 100; i++ {
  147. s.Update(20)
  148. }
  149. v := s.Values()
  150. avg := float64(0)
  151. for i := 0; i < len(v); i++ {
  152. avg += float64(v[i])
  153. }
  154. avg /= float64(len(v))
  155. if avg > 16 || avg < 14 {
  156. t.Errorf("out of range [14, 16]: %v\n", avg)
  157. }
  158. }
  159. func TestExpDecaySampleRescale(t *testing.T) {
  160. s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
  161. s.update(time.Now(), 1)
  162. s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
  163. for _, v := range s.values.Values() {
  164. if v.k == 0.0 {
  165. t.Fatal("v.k == 0.0")
  166. }
  167. }
  168. }
  169. func TestExpDecaySampleSnapshot(t *testing.T) {
  170. now := time.Now()
  171. rand.Seed(1)
  172. s := NewExpDecaySample(100, 0.99)
  173. for i := 1; i <= 10000; i++ {
  174. s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
  175. }
  176. snapshot := s.Snapshot()
  177. s.Update(1)
  178. testExpDecaySampleStatistics(t, snapshot)
  179. }
  180. func TestExpDecaySampleStatistics(t *testing.T) {
  181. now := time.Now()
  182. rand.Seed(1)
  183. s := NewExpDecaySample(100, 0.99)
  184. for i := 1; i <= 10000; i++ {
  185. s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
  186. }
  187. testExpDecaySampleStatistics(t, s)
  188. }
  189. func TestUniformSample(t *testing.T) {
  190. rand.Seed(1)
  191. s := NewUniformSample(100)
  192. for i := 0; i < 1000; i++ {
  193. s.Update(int64(i))
  194. }
  195. if size := s.Count(); 1000 != size {
  196. t.Errorf("s.Count(): 1000 != %v\n", size)
  197. }
  198. if size := s.Size(); 100 != size {
  199. t.Errorf("s.Size(): 100 != %v\n", size)
  200. }
  201. if l := len(s.Values()); 100 != l {
  202. t.Errorf("len(s.Values()): 100 != %v\n", l)
  203. }
  204. for _, v := range s.Values() {
  205. if v > 1000 || v < 0 {
  206. t.Errorf("out of range [0, 100): %v\n", v)
  207. }
  208. }
  209. }
  210. func TestUniformSampleIncludesTail(t *testing.T) {
  211. rand.Seed(1)
  212. s := NewUniformSample(100)
  213. max := 100
  214. for i := 0; i < max; i++ {
  215. s.Update(int64(i))
  216. }
  217. v := s.Values()
  218. sum := 0
  219. exp := (max - 1) * max / 2
  220. for i := 0; i < len(v); i++ {
  221. sum += int(v[i])
  222. }
  223. if exp != sum {
  224. t.Errorf("sum: %v != %v\n", exp, sum)
  225. }
  226. }
  227. func TestUniformSampleSnapshot(t *testing.T) {
  228. s := NewUniformSample(100)
  229. for i := 1; i <= 10000; i++ {
  230. s.Update(int64(i))
  231. }
  232. snapshot := s.Snapshot()
  233. s.Update(1)
  234. testUniformSampleStatistics(t, snapshot)
  235. }
  236. func TestUniformSampleStatistics(t *testing.T) {
  237. rand.Seed(1)
  238. s := NewUniformSample(100)
  239. for i := 1; i <= 10000; i++ {
  240. s.Update(int64(i))
  241. }
  242. testUniformSampleStatistics(t, s)
  243. }
  244. func benchmarkSample(b *testing.B, s Sample) {
  245. var memStats runtime.MemStats
  246. runtime.ReadMemStats(&memStats)
  247. pauseTotalNs := memStats.PauseTotalNs
  248. b.ResetTimer()
  249. for i := 0; i < b.N; i++ {
  250. s.Update(1)
  251. }
  252. b.StopTimer()
  253. runtime.GC()
  254. runtime.ReadMemStats(&memStats)
  255. b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
  256. }
  257. func testExpDecaySampleStatistics(t *testing.T, s Sample) {
  258. if count := s.Count(); 10000 != count {
  259. t.Errorf("s.Count(): 10000 != %v\n", count)
  260. }
  261. if min := s.Min(); 107 != min {
  262. t.Errorf("s.Min(): 107 != %v\n", min)
  263. }
  264. if max := s.Max(); 10000 != max {
  265. t.Errorf("s.Max(): 10000 != %v\n", max)
  266. }
  267. if mean := s.Mean(); 4965.98 != mean {
  268. t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
  269. }
  270. if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
  271. t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
  272. }
  273. ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
  274. if 4615 != ps[0] {
  275. t.Errorf("median: 4615 != %v\n", ps[0])
  276. }
  277. if 7672 != ps[1] {
  278. t.Errorf("75th percentile: 7672 != %v\n", ps[1])
  279. }
  280. if 9998.99 != ps[2] {
  281. t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
  282. }
  283. }
  284. func testUniformSampleStatistics(t *testing.T, s Sample) {
  285. if count := s.Count(); 10000 != count {
  286. t.Errorf("s.Count(): 10000 != %v\n", count)
  287. }
  288. if min := s.Min(); 37 != min {
  289. t.Errorf("s.Min(): 37 != %v\n", min)
  290. }
  291. if max := s.Max(); 9989 != max {
  292. t.Errorf("s.Max(): 9989 != %v\n", max)
  293. }
  294. if mean := s.Mean(); 4748.14 != mean {
  295. t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
  296. }
  297. if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
  298. t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
  299. }
  300. ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
  301. if 4599 != ps[0] {
  302. t.Errorf("median: 4599 != %v\n", ps[0])
  303. }
  304. if 7380.5 != ps[1] {
  305. t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
  306. }
  307. if 9986.429999999998 != ps[2] {
  308. t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
  309. }
  310. }
  311. // TestUniformSampleConcurrentUpdateCount would expose data race problems with
  312. // concurrent Update and Count calls on Sample when test is called with -race
  313. // argument
  314. func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
  315. if testing.Short() {
  316. t.Skip("skipping in short mode")
  317. }
  318. s := NewUniformSample(100)
  319. for i := 0; i < 100; i++ {
  320. s.Update(int64(i))
  321. }
  322. quit := make(chan struct{})
  323. go func() {
  324. t := time.NewTicker(10 * time.Millisecond)
  325. for {
  326. select {
  327. case <-t.C:
  328. s.Update(rand.Int63())
  329. case <-quit:
  330. t.Stop()
  331. return
  332. }
  333. }
  334. }()
  335. for i := 0; i < 1000; i++ {
  336. s.Count()
  337. time.Sleep(5 * time.Millisecond)
  338. }
  339. quit <- struct{}{}
  340. }