Skip to content

Commit 8636ccc

Browse files
committed
WIP: Only obtain a bearer token once at a time
Currently, on pushes, we can start several concurrent layer pushes; each one will check for a bearer token in tokenCache, find none, and ask the server for one, and then write it into the cache. So, we can hammer the server with 6 basically-concurrent token requests. That's unnecessary, slower than just asking once, and potentially might impact rate limiting heuristics. Instead, serialize writes to a bearerToken so that we only have one request in flight at a time. This does not apply to pulls, where the first request is for a manifest; that obtains a token, so subsequent concurrent layer pulls will not request a token again. WIP: Clean up the debugging log entries. Signed-off-by: Miloslav Trmač <[email protected]>
1 parent 939e326 commit 8636ccc

File tree

1 file changed

+75
-20
lines changed

1 file changed

+75
-20
lines changed

docker/docker_client.go

+75-20
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import (
3434
digest "github.com/opencontainers/go-digest"
3535
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
3636
"github.com/sirupsen/logrus"
37+
"golang.org/x/sync/semaphore"
3738
)
3839

3940
const (
@@ -86,8 +87,19 @@ type extensionSignatureList struct {
8687
Signatures []extensionSignature `json:"signatures"`
8788
}
8889

89-
// bearerToken records a cached token we can use to authenticate.
90+
// bearerToken records a cached token we can use to authenticate, or a pending process to obtain one.
91+
//
92+
// The goroutine obtaining the token holds lock to block concurrent token requests, and fills the structure (err and possibly the other fields)
93+
// before releasing the lock.
94+
// Other goroutines obtain lock to block on the token request, if any; and then inspect err to see if the token is usable.
95+
// If it is not, they try to get a new one.
9096
type bearerToken struct {
97+
// lock is held while obtaining the token. Potentially nested inside dockerClient.tokenCacheLock.
98+
// This is a counting semaphore only because we need a cancellable lock operation.
99+
lock *semaphore.Weighted
100+
101+
// The following fields can only be accessed with lock held.
102+
err error // nil if the token was successfully obtained (but may be expired); an error if the next lock holder _must_ obtain a new token.
91103
token string
92104
expirationTime time.Time
93105
}
@@ -117,7 +129,7 @@ type dockerClient struct {
117129
supportsSignatures bool
118130

119131
// Private state for setupRequestAuth (key: string, value: bearerToken)
120-
tokenCacheLock sync.Mutex // Protects tokenCache
132+
tokenCacheLock sync.Mutex // Protects tokenCache.
121133
tokenCache map[string]*bearerToken
122134
// Private state for detectProperties:
123135
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
@@ -752,31 +764,74 @@ func (c *dockerClient) obtainBearerToken(ctx context.Context, challenge challeng
752764
scopes = append(scopes, *extraScope)
753765
}
754766

755-
var token *bearerToken
756-
var inCache bool
757-
func() { // A scope for defer
767+
logrus.Debugf("REMOVE: Checking token cache for key %q", cacheKey)
768+
token, newEntry, err := func() (*bearerToken, bool, error) { // A scope for defer
758769
c.tokenCacheLock.Lock()
759770
defer c.tokenCacheLock.Unlock()
760-
token, inCache = c.tokenCache[cacheKey]
761-
}()
762-
if !inCache || time.Now().After(token.expirationTime) {
763-
token = &bearerToken{}
764-
765-
var err error
766-
if c.auth.IdentityToken != "" {
767-
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
771+
token, ok := c.tokenCache[cacheKey]
772+
if ok {
773+
return token, false, nil
768774
} else {
769-
err = c.getBearerToken(ctx, token, challenge, scopes)
775+
logrus.Debugf("REMOVE: No token cache for key %q, allocating one…", cacheKey)
776+
token = &bearerToken{
777+
lock: semaphore.NewWeighted(1),
778+
}
779+
// If this is a new *bearerToken, lock the entry before adding it to the cache, so that any other goroutine that finds
780+
// this entry blocks until we obtain the token for the first time, and does not see an empty object
781+
// (and does not try to obtain the token itself when we are going to do so).
782+
if err := token.lock.Acquire(ctx, 1); err != nil {
783+
// We do not block on this Acquire, so we don’t really expect to fail here — but if ctx is canceled,
784+
// there is no point in trying to continue anyway.
785+
return nil, false, err
786+
}
787+
c.tokenCache[cacheKey] = token
788+
return token, true, nil
770789
}
771-
if err != nil {
790+
}()
791+
if err != nil {
792+
return "", err
793+
}
794+
if !newEntry {
795+
// If this is an existing *bearerToken, obtain the lock only after releasing c.tokenCacheLock,
796+
// so that users of other cacheKey values are not blocked for the whole duration of our HTTP roundtrip.
797+
logrus.Debugf("REMOVE: Found existing token cache for key %q, getting lock", cacheKey)
798+
if err := token.lock.Acquire(ctx, 1); err != nil {
772799
return "", err
773800
}
801+
logrus.Debugf("REMOVE: Locked existing token cache for key %q", cacheKey)
802+
}
774803

775-
func() { // A scope for defer
776-
c.tokenCacheLock.Lock()
777-
defer c.tokenCacheLock.Unlock()
778-
c.tokenCache[cacheKey] = token
779-
}()
804+
defer token.lock.Release(1)
805+
806+
// Determine if the bearerToken is usable: if it is not, log the cause and fall through, otherwise return early.
807+
switch {
808+
case newEntry:
809+
logrus.Debugf("REMOVE: New token cache entry for key %q, getting first token", cacheKey)
810+
case token.err != nil:
811+
// If obtaining a token fails for any reason, the request that triggered that will fail;
812+
// other requests will see token.err and try obtaining their own token, one goroutine at a time.
813+
// (Consider that a request can fail because a very short timeout was provided to _that one operation_ using a context.Context;
814+
// that clearly shouldn’t prevent other operations from trying with a longer timeout.)
815+
//
816+
// If we got here while holding token.lock, we are the goroutine responsible for trying again; others are blocked
817+
// on token.lock.
818+
logrus.Debugf("REMOVE: Token cache for key %q records failure %v, getting new token", cacheKey, token.err)
819+
case time.Now().After(token.expirationTime):
820+
logrus.Debugf("REMOVE: Token cache for key %q is expired, getting new token", cacheKey)
821+
822+
default:
823+
return token.token, nil
824+
}
825+
826+
if c.auth.IdentityToken != "" {
827+
err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes)
828+
} else {
829+
err = c.getBearerToken(ctx, token, challenge, scopes)
830+
}
831+
logrus.Debugf("REMOVE: Obtaining a token for key %q, error %v", cacheKey, err)
832+
token.err = err
833+
if token.err != nil {
834+
return "", token.err
780835
}
781836
return token.token, nil
782837
}

0 commit comments

Comments
 (0)