@@ -27,6 +27,7 @@ import (
2727 "github.com/pingcap/log"
2828 "github.com/tikv/pd/client/errs"
2929 "github.com/tikv/pd/client/grpcutil"
30+ "github.com/tikv/pd/client/timerpool"
3031 "github.com/tikv/pd/client/tsoutil"
3132 "go.uber.org/zap"
3233 "google.golang.org/grpc"
@@ -139,11 +140,24 @@ func (c *tsoClient) updateTSODispatcher() {
139140}
140141
141142type deadline struct {
142- timer <- chan time.Time
143+ timer * time.Timer
143144 done chan struct {}
144145 cancel context.CancelFunc
145146}
146147
148+ func newTSDeadline (
149+ timeout time.Duration ,
150+ done chan struct {},
151+ cancel context.CancelFunc ,
152+ ) * deadline {
153+ timer := timerpool .GlobalTimerPool .Get (timeout )
154+ return & deadline {
155+ timer : timer ,
156+ done : done ,
157+ cancel : cancel ,
158+ }
159+ }
160+
147161func (c * tsoClient ) tsCancelLoop () {
148162 defer c .wg .Done ()
149163
@@ -172,19 +186,21 @@ func (c *tsoClient) tsCancelLoop() {
172186
173187func (c * tsoClient ) watchTSDeadline (ctx context.Context , dcLocation string ) {
174188 if _ , exist := c .tsDeadline .Load (dcLocation ); ! exist {
175- tsDeadlineCh := make (chan deadline , 1 )
189+ tsDeadlineCh := make (chan * deadline , 1 )
176190 c .tsDeadline .Store (dcLocation , tsDeadlineCh )
177- go func (dc string , tsDeadlineCh <- chan deadline ) {
191+ go func (dc string , tsDeadlineCh <- chan * deadline ) {
178192 for {
179193 select {
180194 case d := <- tsDeadlineCh :
181195 select {
182- case <- d .timer :
196+ case <- d .timer . C :
183197 log .Error ("[tso] tso request is canceled due to timeout" , zap .String ("dc-location" , dc ), errs .ZapError (errs .ErrClientGetTSOTimeout ))
184198 d .cancel ()
199+ timerpool .GlobalTimerPool .Put (d .timer )
185200 case <- d .done :
186- continue
201+ timerpool . GlobalTimerPool . Put ( d . timer )
187202 case <- ctx .Done ():
203+ timerpool .GlobalTimerPool .Put (d .timer )
188204 return
189205 }
190206 case <- ctx .Done ():
@@ -234,6 +250,8 @@ func (c *tsoClient) checkAllocator(
234250 }()
235251 cc , u := c .GetTSOAllocatorClientConnByDCLocation (dc )
236252 healthCli := healthpb .NewHealthClient (cc )
253+ ticker := time .NewTicker (time .Second )
254+ defer ticker .Stop ()
237255 for {
238256 // the pd/allocator leader change, we need to re-establish the stream
239257 if u != url {
@@ -259,7 +277,7 @@ func (c *tsoClient) checkAllocator(
259277 select {
260278 case <- dispatcherCtx .Done ():
261279 return
262- case <- time . After ( time . Second ) :
280+ case <- ticker . C :
263281 // To ensure we can get the latest allocator leader
264282 // and once the leader is changed, we can exit this function.
265283 _ , u = c .GetTSOAllocatorClientConnByDCLocation (dc )
@@ -366,6 +384,7 @@ func (c *tsoClient) handleDispatcher(
366384
367385 // Loop through each batch of TSO requests and send them for processing.
368386 streamLoopTimer := time .NewTimer (c .option .timeout )
387+ defer streamLoopTimer .Stop ()
369388tsoBatchLoop:
370389 for {
371390 select {
@@ -389,6 +408,15 @@ tsoBatchLoop:
389408 if maxBatchWaitInterval >= 0 {
390409 tbc .adjustBestBatchSize ()
391410 }
411+ // Stop the timer if it's not stopped.
412+ if ! streamLoopTimer .Stop () {
413+ select {
414+ case <- streamLoopTimer .C : // try to drain from the channel
415+ default :
416+ }
417+ }
418+ // We need be careful here, see more details in the comments of Timer.Reset.
419+ // https://pkg.go.dev/time@master#Timer.Reset
392420 streamLoopTimer .Reset (c .option .timeout )
393421 // Choose a stream to send the TSO gRPC request.
394422 streamChoosingLoop:
@@ -403,16 +431,20 @@ tsoBatchLoop:
403431 if c .updateTSOConnectionCtxs (dispatcherCtx , dc , & connectionCtxs ) {
404432 continue streamChoosingLoop
405433 }
434+ timer := time .NewTimer (retryInterval )
406435 select {
407436 case <- dispatcherCtx .Done ():
437+ timer .Stop ()
408438 return
409439 case <- streamLoopTimer .C :
410440 err = errs .ErrClientCreateTSOStream .FastGenByArgs (errs .RetryTimeoutErr )
411441 log .Error ("[tso] create tso stream error" , zap .String ("dc-location" , dc ), errs .ZapError (err ))
412442 c .svcDiscovery .ScheduleCheckMemberChanged ()
413443 c .finishRequest (tbc .getCollectedRequests (), 0 , 0 , 0 , errors .WithStack (err ))
444+ timer .Stop ()
414445 continue tsoBatchLoop
415- case <- time .After (retryInterval ):
446+ case <- timer .C :
447+ timer .Stop ()
416448 continue streamChoosingLoop
417449 }
418450 }
@@ -429,11 +461,7 @@ tsoBatchLoop:
429461 }
430462 }
431463 done := make (chan struct {})
432- dl := deadline {
433- timer : time .After (c .option .timeout ),
434- done : done ,
435- cancel : cancel ,
436- }
464+ dl := newTSDeadline (c .option .timeout , done , cancel )
437465 tsDeadlineCh , ok := c .tsDeadline .Load (dc )
438466 for ! ok || tsDeadlineCh == nil {
439467 c .scheduleCheckTSDeadline ()
@@ -443,7 +471,7 @@ tsoBatchLoop:
443471 select {
444472 case <- dispatcherCtx .Done ():
445473 return
446- case tsDeadlineCh .(chan deadline ) <- dl :
474+ case tsDeadlineCh .(chan * deadline ) <- dl :
447475 }
448476 opts = extractSpanReference (tbc , opts [:0 ])
449477 err = c .processRequests (stream , dc , tbc , opts )
@@ -558,6 +586,8 @@ func (c *tsoClient) tryConnectToTSO(
558586 }
559587 // retry several times before falling back to the follower when the network problem happens
560588
589+ ticker := time .NewTicker (retryInterval )
590+ defer ticker .Stop ()
561591 for i := 0 ; i < maxRetryTimes ; i ++ {
562592 c .svcDiscovery .ScheduleCheckMemberChanged ()
563593 cc , url = c .GetTSOAllocatorClientConnByDCLocation (dc )
@@ -587,7 +617,7 @@ func (c *tsoClient) tryConnectToTSO(
587617 select {
588618 case <- dispatcherCtx .Done ():
589619 return err
590- case <- time . After ( retryInterval ) :
620+ case <- ticker . C :
591621 }
592622 }
593623
0 commit comments