Skip to content

Commit 422bf12

Browse files
committed
WIP: Only obtain a bearer token once at a time
Currently, on pushes, we can start several concurrent layer pushes; each one will check for a bearer token in tokenCache, find none, and ask the server for one, and then write it into the cache. So, we can hammer the server with 6 basically-concurrent token requests. That's unnecessary, slower than just asking once, and potentially might impact rate limiting heuristics. Instead, serialize writes to a bearerToken so that we only have one request in flight at a time. This does not apply to pulls, where the first request is for a manifest; that obtains a token, so subsequent concurrent layer pulls will not request a token again. WIP: Clean up the debugging log entries. Signed-off-by: Miloslav Trmač <[email protected]>
1 parent 8148b3e commit 422bf12

File tree

1 file changed

+75
-20
lines changed

1 file changed

+75
-20
lines changed

docker/docker_client.go

+75-20
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
digest "github.com/opencontainers/go-digest"
3333
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
3434
"github.com/sirupsen/logrus"
35+
"golang.org/x/sync/semaphore"
3536
)
3637

3738
const (
@@ -84,8 +85,19 @@ type extensionSignatureList struct {
8485
Signatures []extensionSignature `json:"signatures"`
8586
}
8687

87-
// bearerToken records a cached token we can use to authenticate.
88+
// bearerToken records a cached token we can use to authenticate, or a pending process to obtain one.
89+
//
90+
// The goroutine obtaining the token holds lock to block concurrent token requests, and fills the structure (err and possibly the other fields)
91+
// before releasing the lock.
92+
// Other goroutines obtain lock to block on the token request, if any; and then inspect err to see if the token is usable.
93+
// If it is not, they try to get a new one.
8894
type bearerToken struct {
95+
// lock is held while obtaining the token. Potentially nested inside dockerClient.tokenCacheLock.
96+
// This is a counting semaphore only because we need a cancellable lock operation.
97+
lock *semaphore.Weighted
98+
99+
// The following fields can only be accessed with lock held.
100+
err error // nil if the token was successfully obtained (but may be expired); an error if the next lock holder _must_ obtain a new token.
89101
token string
90102
expirationTime time.Time
91103
}
@@ -115,7 +127,7 @@ type dockerClient struct {
115127
supportsSignatures bool
116128

117129
// Private state for setupRequestAuth (key: string, value: bearerToken)
118-
tokenCacheLock sync.Mutex // Protects tokenCache
130+
tokenCacheLock sync.Mutex // Protects tokenCache.
119131
tokenCache map[string]*bearerToken
120132
// Private state for detectProperties:
121133
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
@@ -741,31 +753,74 @@ func (c *dockerClient) obtainBearerToken(ctx context.Context, challenge challeng
741753
scopes = append(scopes, *extraScope)
742754
}
743755

744-
var token *bearerToken
745-
var inCache bool
746-
func() { // A scope for defer
756+
logrus.Debugf("REMOVE: Checking token cache for key %q", cacheKey)
757+
token, newEntry, err := func() (*bearerToken, bool, error) { // A scope for defer
747758
c.tokenCacheLock.Lock()
748759
defer c.tokenCacheLock.Unlock()
749-
token, inCache = c.tokenCache[cacheKey]
750-
}()
751-
if !inCache || time.Now().After(token.expirationTime) {
752-
token = &bearerToken{}
753-
754-
var err error
755-
if c.auth.IdentityToken != "" {
756-
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
760+
token, ok := c.tokenCache[cacheKey]
761+
if ok {
762+
return token, false, nil
757763
} else {
758-
err = c.getBearerToken(ctx, token, challenge, scopes)
764+
logrus.Debugf("REMOVE: No token cache for key %q, allocating one…", cacheKey)
765+
token = &bearerToken{
766+
lock: semaphore.NewWeighted(1),
767+
}
768+
// If this is a new *bearerToken, lock the entry before adding it to the cache, so that any other goroutine that finds
769+
// this entry blocks until we obtain the token for the first time, and does not see an empty object
770+
// (and does not try to obtain the token itself when we are going to do so).
771+
if err := token.lock.Acquire(ctx, 1); err != nil {
772+
// We do not block on this Acquire, so we don’t really expect to fail here — but if ctx is canceled,
773+
// there is no point in trying to continue anyway.
774+
return nil, false, err
775+
}
776+
c.tokenCache[cacheKey] = token
777+
return token, true, nil
759778
}
760-
if err != nil {
779+
}()
780+
if err != nil {
781+
return "", err
782+
}
783+
if !newEntry {
784+
// If this is an existing *bearerToken, obtain the lock only after releasing c.tokenCacheLock,
785+
// so that users of other cacheKey values are not blocked for the whole duration of our HTTP roundtrip.
786+
logrus.Debugf("REMOVE: Found existing token cache for key %q, getting lock", cacheKey)
787+
if err := token.lock.Acquire(ctx, 1); err != nil {
761788
return "", err
762789
}
790+
logrus.Debugf("REMOVE: Locked existing token cache for key %q", cacheKey)
791+
}
763792

764-
func() { // A scope for defer
765-
c.tokenCacheLock.Lock()
766-
defer c.tokenCacheLock.Unlock()
767-
c.tokenCache[cacheKey] = token
768-
}()
793+
defer token.lock.Release(1)
794+
795+
// Determine if the bearerToken is usable: if it is not, log the cause and fall through, otherwise return early.
796+
switch {
797+
case newEntry:
798+
logrus.Debugf("REMOVE: New token cache entry for key %q, getting first token", cacheKey)
799+
case token.err != nil:
800+
// If obtaining a token fails for any reason, the request that triggered that will fail;
801+
// other requests will see token.err and try obtaining their own token, one goroutine at a time.
802+
// (Consider that a request can fail because a very short timeout was provided to _that one operation_ using a context.Context;
803+
// that clearly shouldn’t prevent other operations from trying with a longer timeout.)
804+
//
805+
// If we got here while holding token.lock, we are the goroutine responsible for trying again; others are blocked
806+
// on token.lock.
807+
logrus.Debugf("REMOVE: Token cache for key %q records failure %v, getting new token", cacheKey, token.err)
808+
case time.Now().After(token.expirationTime):
809+
logrus.Debugf("REMOVE: Token cache for key %q is expired, getting new token", cacheKey)
810+
811+
default:
812+
return token.token, nil
813+
}
814+
815+
if c.auth.IdentityToken != "" {
816+
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
817+
} else {
818+
err = c.getBearerToken(ctx, token, challenge, scopes)
819+
}
820+
logrus.Debugf("REMOVE: Obtaining a token for key %q, error %v", cacheKey, err)
821+
token.err = err
822+
if token.err != nil {
823+
return "", token.err
769824
}
770825
return token.token, nil
771826
}

0 commit comments

Comments
 (0)