Skip to content

Commit fba06ab

Browse files
committed
WIP: Only obtain a bearer token once at a time
Currently, on pushes, we can start several concurrent layer pushes; each one will check for a bearer token in tokenCache, find none, and ask the server for one, and then write it into the cache. So, we can hammer the server with 6 basically-concurrent token requests. That's unnecessary, slower than just asking once, and potentially might impact rate limiting heuristics. Instead, serialize writes to a bearerToken so that we only have one request in flight at a time. This does not apply to pulls, where the first request is for a manifest; that obtains a token, so subsequent concurrent layer pulls will not request a token again. WIP: Clean up the debugging log entries. Signed-off-by: Miloslav Trmač <[email protected]>
1 parent 560f6cc commit fba06ab

File tree

1 file changed

+75
-20
lines changed

1 file changed

+75
-20
lines changed

docker/docker_client.go

+75-20
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
digest "github.com/opencontainers/go-digest"
3434
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
3535
"github.com/sirupsen/logrus"
36+
"golang.org/x/sync/semaphore"
3637
)
3738

3839
const (
@@ -85,8 +86,19 @@ type extensionSignatureList struct {
8586
Signatures []extensionSignature `json:"signatures"`
8687
}
8788

88-
// bearerToken records a cached token we can use to authenticate.
89+
// bearerToken records a cached token we can use to authenticate, or a pending process to obtain one.
90+
//
91+
// The goroutine obtaining the token holds lock to block concurrent token requests, and fills the structure (err and possibly the other fields)
92+
// before releasing the lock.
93+
// Other goroutines obtain lock to block on the token request, if any; and then inspect err to see if the token is usable.
94+
// If it is not, they try to get a new one.
8995
type bearerToken struct {
96+
// lock is held while obtaining the token. Potentially nested inside dockerClient.tokenCacheLock.
97+
// This is a counting semaphore only because we need a cancellable lock operation.
98+
lock *semaphore.Weighted
99+
100+
// The following fields can only be accessed with lock held.
101+
err error // nil if the token was successfully obtained (but may be expired); an error if the next lock holder _must_ obtain a new token.
90102
token string
91103
expirationTime time.Time
92104
}
@@ -116,7 +128,7 @@ type dockerClient struct {
116128
supportsSignatures bool
117129

118130
// Private state for setupRequestAuth (key: string, value: bearerToken)
119-
tokenCacheLock sync.Mutex // Protects tokenCache
131+
tokenCacheLock sync.Mutex // Protects tokenCache.
120132
tokenCache map[string]*bearerToken
121133
// Private state for detectProperties:
122134
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
@@ -746,31 +758,74 @@ func (c *dockerClient) obtainBearerToken(ctx context.Context, challenge challeng
746758
scopes = append(scopes, *extraScope)
747759
}
748760

749-
var token *bearerToken
750-
var inCache bool
751-
func() { // A scope for defer
761+
logrus.Debugf("REMOVE: Checking token cache for key %q", cacheKey)
762+
token, newEntry, err := func() (*bearerToken, bool, error) { // A scope for defer
752763
c.tokenCacheLock.Lock()
753764
defer c.tokenCacheLock.Unlock()
754-
token, inCache = c.tokenCache[cacheKey]
755-
}()
756-
if !inCache || time.Now().After(token.expirationTime) {
757-
token = &bearerToken{}
758-
759-
var err error
760-
if c.auth.IdentityToken != "" {
761-
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
765+
token, ok := c.tokenCache[cacheKey]
766+
if ok {
767+
return token, false, nil
762768
} else {
763-
err = c.getBearerToken(ctx, token, challenge, scopes)
769+
logrus.Debugf("REMOVE: No token cache for key %q, allocating one…", cacheKey)
770+
token = &bearerToken{
771+
lock: semaphore.NewWeighted(1),
772+
}
773+
// If this is a new *bearerToken, lock the entry before adding it to the cache, so that any other goroutine that finds
774+
// this entry blocks until we obtain the token for the first time, and does not see an empty object
775+
// (and does not try to obtain the token itself when we are going to do so).
776+
if err := token.lock.Acquire(ctx, 1); err != nil {
777+
// We do not block on this Acquire, so we don’t really expect to fail here — but if ctx is canceled,
778+
// there is no point in trying to continue anyway.
779+
return nil, false, err
780+
}
781+
c.tokenCache[cacheKey] = token
782+
return token, true, nil
764783
}
765-
if err != nil {
784+
}()
785+
if err != nil {
786+
return "", err
787+
}
788+
if !newEntry {
789+
// If this is an existing *bearerToken, obtain the lock only after releasing c.tokenCacheLock,
790+
// so that users of other cacheKey values are not blocked for the whole duration of our HTTP roundtrip.
791+
logrus.Debugf("REMOVE: Found existing token cache for key %q, getting lock", cacheKey)
792+
if err := token.lock.Acquire(ctx, 1); err != nil {
766793
return "", err
767794
}
795+
logrus.Debugf("REMOVE: Locked existing token cache for key %q", cacheKey)
796+
}
768797

769-
func() { // A scope for defer
770-
c.tokenCacheLock.Lock()
771-
defer c.tokenCacheLock.Unlock()
772-
c.tokenCache[cacheKey] = token
773-
}()
798+
defer token.lock.Release(1)
799+
800+
// Determine if the bearerToken is usable: if it is not, log the cause and fall through, otherwise return early.
801+
switch {
802+
case newEntry:
803+
logrus.Debugf("REMOVE: New token cache entry for key %q, getting first token", cacheKey)
804+
case token.err != nil:
805+
// If obtaining a token fails for any reason, the request that triggered that will fail;
806+
// other requests will see token.err and try obtaining their own token, one goroutine at a time.
807+
// (Consider that a request can fail because a very short timeout was provided to _that one operation_ using a context.Context;
808+
// that clearly shouldn’t prevent other operations from trying with a longer timeout.)
809+
//
810+
// If we got here while holding token.lock, we are the goroutine responsible for trying again; others are blocked
811+
// on token.lock.
812+
logrus.Debugf("REMOVE: Token cache for key %q records failure %v, getting new token", cacheKey, token.err)
813+
case time.Now().After(token.expirationTime):
814+
logrus.Debugf("REMOVE: Token cache for key %q is expired, getting new token", cacheKey)
815+
816+
default:
817+
return token.token, nil
818+
}
819+
820+
if c.auth.IdentityToken != "" {
821+
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
822+
} else {
823+
err = c.getBearerToken(ctx, token, challenge, scopes)
824+
}
825+
logrus.Debugf("REMOVE: Obtaining a token for key %q, error %v", cacheKey, err)
826+
token.err = err
827+
if token.err != nil {
828+
return "", token.err
774829
}
775830
return token.token, nil
776831
}

0 commit comments

Comments
 (0)