etcd.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package embed
  15. import (
  16. "context"
  17. "crypto/tls"
  18. "fmt"
  19. "io/ioutil"
  20. defaultLog "log"
  21. "net"
  22. "net/http"
  23. "net/url"
  24. "strconv"
  25. "sync"
  26. "time"
  27. "github.com/coreos/etcd/compactor"
  28. "github.com/coreos/etcd/etcdserver"
  29. "github.com/coreos/etcd/etcdserver/api/etcdhttp"
  30. "github.com/coreos/etcd/etcdserver/api/v2http"
  31. "github.com/coreos/etcd/etcdserver/api/v2v3"
  32. "github.com/coreos/etcd/etcdserver/api/v3client"
  33. "github.com/coreos/etcd/etcdserver/api/v3rpc"
  34. "github.com/coreos/etcd/pkg/cors"
  35. "github.com/coreos/etcd/pkg/debugutil"
  36. runtimeutil "github.com/coreos/etcd/pkg/runtime"
  37. "github.com/coreos/etcd/pkg/transport"
  38. "github.com/coreos/etcd/pkg/types"
  39. "github.com/coreos/etcd/rafthttp"
  40. "github.com/coreos/pkg/capnslog"
  41. grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
  42. "github.com/soheilhy/cmux"
  43. "google.golang.org/grpc"
  44. "google.golang.org/grpc/keepalive"
  45. )
  46. var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed")
  47. const (
  48. // internal fd usage includes disk usage and transport usage.
  49. // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
  50. // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
  51. // read all logs after some snapshot index, which locates at the end of
  52. // the second last and the head of the last. For purging, it needs to read
  53. // directory, so it needs 1. For fd monitor, it needs 1.
  54. // For transport, rafthttp builds two long-polling connections and at most
  55. // four temporary connections with each member. There are at most 9 members
  56. // in a cluster, so it should reserve 96.
  57. // For the safety, we set the total reserved number to 150.
  58. reservedInternalFDNum = 150
  59. )
  60. // Etcd contains a running etcd server and its listeners.
  61. type Etcd struct {
  62. Peers []*peerListener
  63. Clients []net.Listener
  64. // a map of contexts for the servers that serves client requests.
  65. sctxs map[string]*serveCtx
  66. metricsListeners []net.Listener
  67. Server *etcdserver.EtcdServer
  68. cfg Config
  69. stopc chan struct{}
  70. errc chan error
  71. closeOnce sync.Once
  72. }
  73. type peerListener struct {
  74. net.Listener
  75. serve func() error
  76. close func(context.Context) error
  77. }
  78. // StartEtcd launches the etcd server and HTTP handlers for client/server communication.
  79. // The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
  80. // on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
  81. func StartEtcd(inCfg *Config) (e *Etcd, err error) {
  82. if err = inCfg.Validate(); err != nil {
  83. return nil, err
  84. }
  85. serving := false
  86. e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
  87. cfg := &e.cfg
  88. defer func() {
  89. if e == nil || err == nil {
  90. return
  91. }
  92. if !serving {
  93. // errored before starting gRPC server for serveCtx.serversC
  94. for _, sctx := range e.sctxs {
  95. close(sctx.serversC)
  96. }
  97. }
  98. e.Close()
  99. e = nil
  100. }()
  101. if e.Peers, err = startPeerListeners(cfg); err != nil {
  102. return e, err
  103. }
  104. if e.sctxs, err = startClientListeners(cfg); err != nil {
  105. return e, err
  106. }
  107. for _, sctx := range e.sctxs {
  108. e.Clients = append(e.Clients, sctx.l)
  109. }
  110. var (
  111. urlsmap types.URLsMap
  112. token string
  113. )
  114. memberInitialized := true
  115. if !isMemberInitialized(cfg) {
  116. memberInitialized = false
  117. urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
  118. if err != nil {
  119. return e, fmt.Errorf("error setting up initial cluster: %v", err)
  120. }
  121. }
  122. // AutoCompactionRetention defaults to "0" if not set.
  123. if len(cfg.AutoCompactionRetention) == 0 {
  124. cfg.AutoCompactionRetention = "0"
  125. }
  126. autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
  127. if err != nil {
  128. return e, err
  129. }
  130. srvcfg := etcdserver.ServerConfig{
  131. Name: cfg.Name,
  132. ClientURLs: cfg.ACUrls,
  133. PeerURLs: cfg.APUrls,
  134. DataDir: cfg.Dir,
  135. DedicatedWALDir: cfg.WalDir,
  136. SnapCount: cfg.SnapCount,
  137. MaxSnapFiles: cfg.MaxSnapFiles,
  138. MaxWALFiles: cfg.MaxWalFiles,
  139. InitialPeerURLsMap: urlsmap,
  140. InitialClusterToken: token,
  141. DiscoveryURL: cfg.Durl,
  142. DiscoveryProxy: cfg.Dproxy,
  143. NewCluster: cfg.IsNewCluster(),
  144. ForceNewCluster: cfg.ForceNewCluster,
  145. PeerTLSInfo: cfg.PeerTLSInfo,
  146. TickMs: cfg.TickMs,
  147. ElectionTicks: cfg.ElectionTicks(),
  148. InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
  149. AutoCompactionRetention: autoCompactionRetention,
  150. AutoCompactionMode: cfg.AutoCompactionMode,
  151. QuotaBackendBytes: cfg.QuotaBackendBytes,
  152. MaxTxnOps: cfg.MaxTxnOps,
  153. MaxRequestBytes: cfg.MaxRequestBytes,
  154. StrictReconfigCheck: cfg.StrictReconfigCheck,
  155. ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
  156. AuthToken: cfg.AuthToken,
  157. InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
  158. CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
  159. Debug: cfg.Debug,
  160. }
  161. if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
  162. return e, err
  163. }
  164. // buffer channel so goroutines on closed connections won't wait forever
  165. e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
  166. // newly started member ("memberInitialized==false")
  167. // does not need corruption check
  168. if memberInitialized {
  169. if err = e.Server.CheckInitialHashKV(); err != nil {
  170. // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
  171. // (nothing to close since rafthttp transports have not been started)
  172. e.Server = nil
  173. return e, err
  174. }
  175. }
  176. e.Server.Start()
  177. if err = e.servePeers(); err != nil {
  178. return e, err
  179. }
  180. if err = e.serveClients(); err != nil {
  181. return e, err
  182. }
  183. if err = e.serveMetrics(); err != nil {
  184. return e, err
  185. }
  186. serving = true
  187. return e, nil
  188. }
  189. // Config returns the current configuration.
  190. func (e *Etcd) Config() Config {
  191. return e.cfg
  192. }
  193. // Close gracefully shuts down all servers/listeners.
  194. // Client requests will be terminated with request timeout.
  195. // After timeout, enforce remaning requests be closed immediately.
  196. func (e *Etcd) Close() {
  197. e.closeOnce.Do(func() { close(e.stopc) })
  198. // close client requests with request timeout
  199. timeout := 2 * time.Second
  200. if e.Server != nil {
  201. timeout = e.Server.Cfg.ReqTimeout()
  202. }
  203. for _, sctx := range e.sctxs {
  204. for ss := range sctx.serversC {
  205. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  206. stopServers(ctx, ss)
  207. cancel()
  208. }
  209. }
  210. for _, sctx := range e.sctxs {
  211. sctx.cancel()
  212. }
  213. for i := range e.Clients {
  214. if e.Clients[i] != nil {
  215. e.Clients[i].Close()
  216. }
  217. }
  218. for i := range e.metricsListeners {
  219. e.metricsListeners[i].Close()
  220. }
  221. // close rafthttp transports
  222. if e.Server != nil {
  223. e.Server.Stop()
  224. }
  225. // close all idle connections in peer handler (wait up to 1-second)
  226. for i := range e.Peers {
  227. if e.Peers[i] != nil && e.Peers[i].close != nil {
  228. ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  229. e.Peers[i].close(ctx)
  230. cancel()
  231. }
  232. }
  233. }
  234. func stopServers(ctx context.Context, ss *servers) {
  235. shutdownNow := func() {
  236. // first, close the http.Server
  237. ss.http.Shutdown(ctx)
  238. // then close grpc.Server; cancels all active RPCs
  239. ss.grpc.Stop()
  240. }
  241. // do not grpc.Server.GracefulStop with TLS enabled etcd server
  242. // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
  243. // and https://github.com/coreos/etcd/issues/8916
  244. if ss.secure {
  245. shutdownNow()
  246. return
  247. }
  248. ch := make(chan struct{})
  249. go func() {
  250. defer close(ch)
  251. // close listeners to stop accepting new connections,
  252. // will block on any existing transports
  253. ss.grpc.GracefulStop()
  254. }()
  255. // wait until all pending RPCs are finished
  256. select {
  257. case <-ch:
  258. case <-ctx.Done():
  259. // took too long, manually close open transports
  260. // e.g. watch streams
  261. shutdownNow()
  262. // concurrent GracefulStop should be interrupted
  263. <-ch
  264. }
  265. }
  266. func (e *Etcd) Err() <-chan error { return e.errc }
  267. func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
  268. if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
  269. return nil, err
  270. }
  271. if err = cfg.PeerSelfCert(); err != nil {
  272. plog.Fatalf("could not get certs (%v)", err)
  273. }
  274. if !cfg.PeerTLSInfo.Empty() {
  275. plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
  276. }
  277. peers = make([]*peerListener, len(cfg.LPUrls))
  278. defer func() {
  279. if err == nil {
  280. return
  281. }
  282. for i := range peers {
  283. if peers[i] != nil && peers[i].close != nil {
  284. plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
  285. ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  286. peers[i].close(ctx)
  287. cancel()
  288. }
  289. }
  290. }()
  291. for i, u := range cfg.LPUrls {
  292. if u.Scheme == "http" {
  293. if !cfg.PeerTLSInfo.Empty() {
  294. plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
  295. }
  296. if cfg.PeerTLSInfo.ClientCertAuth {
  297. plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
  298. }
  299. }
  300. peers[i] = &peerListener{close: func(context.Context) error { return nil }}
  301. peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
  302. if err != nil {
  303. return nil, err
  304. }
  305. // once serve, overwrite with 'http.Server.Shutdown'
  306. peers[i].close = func(context.Context) error {
  307. return peers[i].Listener.Close()
  308. }
  309. plog.Info("listening for peers on ", u.String())
  310. }
  311. return peers, nil
  312. }
  313. // configure peer handlers after rafthttp.Transport started
  314. func (e *Etcd) servePeers() (err error) {
  315. ph := etcdhttp.NewPeerHandler(e.Server)
  316. var peerTLScfg *tls.Config
  317. if !e.cfg.PeerTLSInfo.Empty() {
  318. if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
  319. return err
  320. }
  321. }
  322. for _, p := range e.Peers {
  323. gs := v3rpc.Server(e.Server, peerTLScfg)
  324. m := cmux.New(p.Listener)
  325. go gs.Serve(m.Match(cmux.HTTP2()))
  326. srv := &http.Server{
  327. Handler: grpcHandlerFunc(gs, ph),
  328. ReadTimeout: 5 * time.Minute,
  329. ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
  330. }
  331. go srv.Serve(m.Match(cmux.Any()))
  332. p.serve = func() error { return m.Serve() }
  333. p.close = func(ctx context.Context) error {
  334. // gracefully shutdown http.Server
  335. // close open listeners, idle connections
  336. // until context cancel or time-out
  337. stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
  338. return nil
  339. }
  340. }
  341. // start peer servers in a goroutine
  342. for _, pl := range e.Peers {
  343. go func(l *peerListener) {
  344. e.errHandler(l.serve())
  345. }(pl)
  346. }
  347. return nil
  348. }
  349. func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
  350. if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
  351. return nil, err
  352. }
  353. if err = cfg.ClientSelfCert(); err != nil {
  354. plog.Fatalf("could not get certs (%v)", err)
  355. }
  356. if cfg.EnablePprof {
  357. plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
  358. }
  359. sctxs = make(map[string]*serveCtx)
  360. for _, u := range cfg.LCUrls {
  361. sctx := newServeCtx()
  362. if u.Scheme == "http" || u.Scheme == "unix" {
  363. if !cfg.ClientTLSInfo.Empty() {
  364. plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
  365. }
  366. if cfg.ClientTLSInfo.ClientCertAuth {
  367. plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
  368. }
  369. }
  370. if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
  371. return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String())
  372. }
  373. proto := "tcp"
  374. addr := u.Host
  375. if u.Scheme == "unix" || u.Scheme == "unixs" {
  376. proto = "unix"
  377. addr = u.Host + u.Path
  378. }
  379. sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
  380. sctx.insecure = !sctx.secure
  381. if oldctx := sctxs[addr]; oldctx != nil {
  382. oldctx.secure = oldctx.secure || sctx.secure
  383. oldctx.insecure = oldctx.insecure || sctx.insecure
  384. continue
  385. }
  386. if sctx.l, err = net.Listen(proto, addr); err != nil {
  387. return nil, err
  388. }
  389. // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
  390. // hosts that disable ipv6. So, use the address given by the user.
  391. sctx.addr = addr
  392. if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
  393. if fdLimit <= reservedInternalFDNum {
  394. plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
  395. }
  396. sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
  397. }
  398. if proto == "tcp" {
  399. if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil {
  400. return nil, err
  401. }
  402. }
  403. plog.Info("listening for client requests on ", u.Host)
  404. defer func() {
  405. if err != nil {
  406. sctx.l.Close()
  407. plog.Info("stopping listening for client requests on ", u.Host)
  408. }
  409. }()
  410. for k := range cfg.UserHandlers {
  411. sctx.userHandlers[k] = cfg.UserHandlers[k]
  412. }
  413. sctx.serviceRegister = cfg.ServiceRegister
  414. if cfg.EnablePprof || cfg.Debug {
  415. sctx.registerPprof()
  416. }
  417. if cfg.Debug {
  418. sctx.registerTrace()
  419. }
  420. sctxs[addr] = sctx
  421. }
  422. return sctxs, nil
  423. }
  424. func (e *Etcd) serveClients() (err error) {
  425. if !e.cfg.ClientTLSInfo.Empty() {
  426. plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
  427. }
  428. if e.cfg.CorsInfo.String() != "" {
  429. plog.Infof("cors = %s", e.cfg.CorsInfo)
  430. }
  431. // Start a client server goroutine for each listen address
  432. var h http.Handler
  433. if e.Config().EnableV2 {
  434. if len(e.Config().ExperimentalEnableV2V3) > 0 {
  435. srv := v2v3.NewServer(v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
  436. h = v2http.NewClientHandler(srv, e.Server.Cfg.ReqTimeout())
  437. } else {
  438. h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
  439. }
  440. } else {
  441. mux := http.NewServeMux()
  442. etcdhttp.HandleBasic(mux, e.Server)
  443. h = mux
  444. }
  445. h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo})
  446. gopts := []grpc.ServerOption{}
  447. if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
  448. gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
  449. MinTime: e.cfg.GRPCKeepAliveMinTime,
  450. PermitWithoutStream: false,
  451. }))
  452. }
  453. if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
  454. e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
  455. gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
  456. Time: e.cfg.GRPCKeepAliveInterval,
  457. Timeout: e.cfg.GRPCKeepAliveTimeout,
  458. }))
  459. }
  460. // start client servers in a goroutine
  461. for _, sctx := range e.sctxs {
  462. go func(s *serveCtx) {
  463. e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
  464. }(sctx)
  465. }
  466. return nil
  467. }
  468. func (e *Etcd) serveMetrics() (err error) {
  469. if e.cfg.Metrics == "extensive" {
  470. grpc_prometheus.EnableHandlingTimeHistogram()
  471. }
  472. if len(e.cfg.ListenMetricsUrls) > 0 {
  473. metricsMux := http.NewServeMux()
  474. etcdhttp.HandleMetricsHealth(metricsMux, e.Server)
  475. for _, murl := range e.cfg.ListenMetricsUrls {
  476. tlsInfo := &e.cfg.ClientTLSInfo
  477. if murl.Scheme == "http" {
  478. tlsInfo = nil
  479. }
  480. ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsInfo)
  481. if err != nil {
  482. return err
  483. }
  484. e.metricsListeners = append(e.metricsListeners, ml)
  485. go func(u url.URL, ln net.Listener) {
  486. plog.Info("listening for metrics on ", u.String())
  487. e.errHandler(http.Serve(ln, metricsMux))
  488. }(murl, ml)
  489. }
  490. }
  491. return nil
  492. }
  493. func (e *Etcd) errHandler(err error) {
  494. select {
  495. case <-e.stopc:
  496. return
  497. default:
  498. }
  499. select {
  500. case <-e.stopc:
  501. case e.errc <- err:
  502. }
  503. }
  504. func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
  505. h, err := strconv.Atoi(retention)
  506. if err == nil {
  507. switch mode {
  508. case compactor.ModeRevision:
  509. ret = time.Duration(int64(h))
  510. case compactor.ModePeriodic:
  511. ret = time.Duration(int64(h)) * time.Hour
  512. }
  513. } else {
  514. // periodic compaction
  515. ret, err = time.ParseDuration(retention)
  516. if err != nil {
  517. return 0, fmt.Errorf("error parsing CompactionRetention: %v", err)
  518. }
  519. }
  520. return ret, nil
  521. }