Skip to content

Commit e2c713b

Browse files
committed
WIP: Only obtain a bearer token once at a time
Currently, on pushes, we can start several concurrent layer pushes; each one will check for a bearer token in tokenCache, find none, and ask the server for one, and then write it into the cache. So, we can hammer the server with 6 basically-concurrent token requests. That's unnecessary, slower than just asking once, and potentially might impact rate limiting heuristics. Instead, serialize writes to a bearerToken so that we only have one request in flight at a time. This does not apply to pulls, where the first request is for a manifest; that obtains a token, so subsequent concurrent layer pulls will not request a token again. WIP: Clean up the debugging log entries. Signed-off-by: Miloslav Trmač <[email protected]>
1 parent 025f73c commit e2c713b

File tree

1 file changed

+75
-20
lines changed

1 file changed

+75
-20
lines changed

docker/docker_client.go

+75-20
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
digest "github.com/opencontainers/go-digest"
3333
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
3434
"github.com/sirupsen/logrus"
35+
"golang.org/x/sync/semaphore"
3536
)
3637

3738
const (
@@ -84,8 +85,19 @@ type extensionSignatureList struct {
8485
Signatures []extensionSignature `json:"signatures"`
8586
}
8687

87-
// bearerToken records a cached token we can use to authenticate.
88+
// bearerToken records a cached token we can use to authenticate, or a pending process to obtain one.
89+
//
90+
// The goroutine obtaining the token holds lock to block concurrent token requests, and fills the structure (err and possibly the other fields)
91+
// before releasing the lock.
92+
// Other goroutines obtain lock to block on the token request, if any; and then inspect err to see if the token is usable.
93+
// If it is not, they try to get a new one.
8894
type bearerToken struct {
95+
// lock is held while obtaining the token. Potentially nested inside dockerClient.tokenCacheLock.
96+
// This is a counting semaphore only because we need a cancellable lock operation.
97+
lock *semaphore.Weighted
98+
99+
// The following fields can only be accessed with lock held.
100+
err error // nil if the token was successfully obtained (but may be expired); an error if the next lock holder _must_ obtain a new token.
89101
token string
90102
expirationTime time.Time
91103
}
@@ -115,7 +127,7 @@ type dockerClient struct {
115127
supportsSignatures bool
116128

117129
// Private state for setupRequestAuth (key: string, value: bearerToken)
118-
tokenCacheLock sync.Mutex // Protects tokenCache
130+
tokenCacheLock sync.Mutex // Protects tokenCache.
119131
tokenCache map[string]*bearerToken
120132
// Private state for detectProperties:
121133
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
@@ -674,31 +686,74 @@ func (c *dockerClient) obtainBearerToken(ctx context.Context, challenge challeng
674686
scopes = append(scopes, *extraScope)
675687
}
676688

677-
var token *bearerToken
678-
var inCache bool
679-
func() { // A scope for defer
689+
logrus.Debugf("REMOVE: Checking token cache for key %q", cacheKey)
690+
token, newEntry, err := func() (*bearerToken, bool, error) { // A scope for defer
680691
c.tokenCacheLock.Lock()
681692
defer c.tokenCacheLock.Unlock()
682-
token, inCache = c.tokenCache[cacheKey]
683-
}()
684-
if !inCache || time.Now().After(token.expirationTime) {
685-
token = &bearerToken{}
686-
687-
var err error
688-
if c.auth.IdentityToken != "" {
689-
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
693+
token, ok := c.tokenCache[cacheKey]
694+
if ok {
695+
return token, false, nil
690696
} else {
691-
err = c.getBearerToken(ctx, token, challenge, scopes)
697+
logrus.Debugf("REMOVE: No token cache for key %q, allocating one…", cacheKey)
698+
token = &bearerToken{
699+
lock: semaphore.NewWeighted(1),
700+
}
701+
// If this is a new *bearerToken, lock the entry before adding it to the cache, so that any other goroutine that finds
702+
// this entry blocks until we obtain the token for the first time, and does not see an empty object
703+
// (and does not try to obtain the token itself when we are going to do so).
704+
if err := token.lock.Acquire(ctx, 1); err != nil {
705+
// We do not block on this Acquire, so we don’t really expect to fail here — but if ctx is canceled,
706+
// there is no point in trying to continue anyway.
707+
return nil, false, err
708+
}
709+
c.tokenCache[cacheKey] = token
710+
return token, true, nil
692711
}
693-
if err != nil {
712+
}()
713+
if err != nil {
714+
return "", err
715+
}
716+
if !newEntry {
717+
// If this is an existing *bearerToken, obtain the lock only after releasing c.tokenCacheLock,
718+
// so that users of other cacheKey values are not blocked for the whole duration of our HTTP roundtrip.
719+
logrus.Debugf("REMOVE: Found existing token cache for key %q, getting lock", cacheKey)
720+
if err := token.lock.Acquire(ctx, 1); err != nil {
694721
return "", err
695722
}
723+
logrus.Debugf("REMOVE: Locked existing token cache for key %q", cacheKey)
724+
}
696725

697-
func() { // A scope for defer
698-
c.tokenCacheLock.Lock()
699-
defer c.tokenCacheLock.Unlock()
700-
c.tokenCache[cacheKey] = token
701-
}()
726+
defer token.lock.Release(1)
727+
728+
// Determine if the bearerToken is usable: if it is not, log the cause and fall through, otherwise return early.
729+
switch {
730+
case newEntry:
731+
logrus.Debugf("REMOVE: New token cache entry for key %q, getting first token", cacheKey)
732+
case token.err != nil:
733+
// If obtaining a token fails for any reason, the request that triggered that will fail;
734+
// other requests will see token.err and try obtaining their own token, one goroutine at a time.
735+
// (Consider that a request can fail because a very short timeout was provided to _that one operation_ using a context.Context;
736+
// that clearly shouldn’t prevent other operations from trying with a longer timeout.)
737+
//
738+
// If we got here while holding token.lock, we are the goroutine responsible for trying again; others are blocked
739+
// on token.lock.
740+
logrus.Debugf("REMOVE: Token cache for key %q records failure %v, getting new token", cacheKey, token.err)
741+
case time.Now().After(token.expirationTime):
742+
logrus.Debugf("REMOVE: Token cache for key %q is expired, getting new token", cacheKey)
743+
744+
default:
745+
return token.token, nil
746+
}
747+
748+
if c.auth.IdentityToken != "" {
749+
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
750+
} else {
751+
err = c.getBearerToken(ctx, token, challenge, scopes)
752+
}
753+
logrus.Debugf("REMOVE: Obtaining a token for key %q, error %v", cacheKey, err)
754+
token.err = err
755+
if token.err != nil {
756+
return "", token.err
702757
}
703758
return token.token, nil
704759
}

0 commit comments

Comments
 (0)