Browse Source

Upgrade golangci to 2.4.0

Signed-off-by: Andy Lo-A-Foe <andy.loafoe@gmail.com>
pull/4154/head
Andy Lo-A-Foe 10 months ago
parent
commit
b76274666a
No known key found for this signature in database
GPG Key ID: C0E4EB79E9E6A23D
  1. 8
      connector/atlassiancrowd/atlassiancrowd_test.go
  2. 101
      server/oauth2.go
  3. 249
      server/rotation.go
  4. 6
      storage/kubernetes/lock.go
  5. 111
      storage/kubernetes/storage.go
  6. 12
      storage/kubernetes/types.go

8
connector/atlassiancrowd/atlassiancrowd_test.go

@ -124,19 +124,19 @@ func TestIdentityFromCrowdUser(t *testing.T) {
// unset
expectEquals(t, i.PreferredUsername, "")
c.Config.PreferredUsernameField = "key"
c.PreferredUsernameField = "key"
i = c.identityFromCrowdUser(user)
expectEquals(t, i.PreferredUsername, "12345")
c.Config.PreferredUsernameField = "name"
c.PreferredUsernameField = "name"
i = c.identityFromCrowdUser(user)
expectEquals(t, i.PreferredUsername, "testuser")
c.Config.PreferredUsernameField = "email"
c.PreferredUsernameField = "email"
i = c.identityFromCrowdUser(user)
expectEquals(t, i.PreferredUsername, "testuser@example.com")
c.Config.PreferredUsernameField = "invalidstring"
c.PreferredUsernameField = "invalidstring"
i = c.identityFromCrowdUser(user)
expectEquals(t, i.PreferredUsername, "")
}

101
server/oauth2.go

@ -3,6 +3,9 @@ package server
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
@ -22,7 +25,6 @@ import (
"github.com/dexidp/dex/connector"
"github.com/dexidp/dex/server/internal"
"github.com/dexidp/dex/server/signer"
"github.com/dexidp/dex/storage"
)
@ -173,6 +175,54 @@ func parseScopes(scopes []string) connector.Scopes {
return s
}
// Determine the signature algorithm for a JWT.
func signatureAlgorithm(jwk *jose.JSONWebKey) (alg jose.SignatureAlgorithm, err error) {
if jwk.Key == nil {
return alg, errors.New("no signing key")
}
switch key := jwk.Key.(type) {
case *rsa.PrivateKey:
// Because OIDC mandates that we support RS256, we always return that
// value. In the future, we might want to make this configurable on a
// per client basis. For example allowing PS256 or ECDSA variants.
//
// See https://github.com/dexidp/dex/issues/692
return jose.RS256, nil
case *ecdsa.PrivateKey:
// We don't actually support ECDSA keys yet, but they're tested for
// in case we want to in the future.
//
// These values are prescribed depending on the ECDSA key type. We
// can't return different values.
switch key.Params() {
case elliptic.P256().Params():
return jose.ES256, nil
case elliptic.P384().Params():
return jose.ES384, nil
case elliptic.P521().Params():
return jose.ES512, nil
default:
return alg, errors.New("unsupported ecdsa curve")
}
default:
return alg, fmt.Errorf("unsupported signing key type %T", key)
}
}
func signPayload(key *jose.JSONWebKey, alg jose.SignatureAlgorithm, payload []byte) (jws string, err error) {
signingKey := jose.SigningKey{Key: key, Algorithm: alg}
signer, err := jose.NewSigner(signingKey, &jose.SignerOptions{})
if err != nil {
return "", fmt.Errorf("new signer: %v", err)
}
signature, err := signer.Sign(payload)
if err != nil {
return "", fmt.Errorf("signing payload: %v", err)
}
return signature.CompactSerialize()
}
// The hash algorithm for the at_hash is determined by the signing
// algorithm used for the id_token. From the spec:
//
@ -301,6 +351,21 @@ func genSubject(userID string, connID string) (string, error) {
}
func (s *Server) newIDToken(ctx context.Context, clientID string, claims storage.Claims, scopes []string, nonce, accessToken, code, connID string) (idToken string, expiry time.Time, err error) {
keys, err := s.storage.GetKeys(ctx)
if err != nil {
s.logger.ErrorContext(ctx, "failed to get keys", "err", err)
return "", expiry, err
}
signingKey := keys.SigningKey
if signingKey == nil {
return "", expiry, fmt.Errorf("no key to sign payload with")
}
signingAlg, err := signatureAlgorithm(signingKey)
if err != nil {
return "", expiry, err
}
issuedAt := s.now()
expiry = issuedAt.Add(s.idTokensValidFor)
@ -318,13 +383,6 @@ func (s *Server) newIDToken(ctx context.Context, clientID string, claims storage
IssuedAt: issuedAt.Unix(),
}
// Determine signing algorithm from signer
signingAlg, err := s.signer.Algorithm(ctx)
if err != nil {
s.logger.ErrorContext(ctx, "failed to get signing algorithm", "err", err)
return "", expiry, fmt.Errorf("failed to get signing algorithm: %v", err)
}
if accessToken != "" {
atHash, err := accessTokenHash(signingAlg, accessToken)
if err != nil {
@ -344,16 +402,16 @@ func (s *Server) newIDToken(ctx context.Context, clientID string, claims storage
}
for _, scope := range scopes {
switch {
case scope == scopeEmail:
switch scope {
case scopeEmail:
tok.Email = claims.Email
tok.EmailVerified = &claims.EmailVerified
case scope == scopeGroups:
case scopeGroups:
tok.Groups = claims.Groups
case scope == scopeProfile:
case scopeProfile:
tok.Name = claims.Username
tok.PreferredUsername = claims.PreferredUsername
case scope == scopeFederatedID:
case scopeFederatedID:
tok.FederatedIDClaims = &federatedIDClaims{
ConnectorID: connID,
UserID: claims.UserID,
@ -387,7 +445,7 @@ func (s *Server) newIDToken(ctx context.Context, clientID string, claims storage
return "", expiry, fmt.Errorf("could not serialize claims: %v", err)
}
if idToken, err = s.signer.Sign(ctx, payload); err != nil {
if idToken, err = signPayload(signingKey, signingAlg, payload); err != nil {
return "", expiry, fmt.Errorf("failed to sign payload: %v", err)
}
return idToken, expiry, nil
@ -647,12 +705,12 @@ func validateConnectorID(connectors []storage.Connector, connectorID string) boo
return false
}
// signerKeySet implements the oidc.KeySet interface backed by the Dex signer
type signerKeySet struct {
signer signer.Signer
// storageKeySet implements the oidc.KeySet interface backed by Dex storage
type storageKeySet struct {
storage.Storage
}
func (s *signerKeySet) VerifySignature(ctx context.Context, jwt string) (payload []byte, err error) {
func (s *storageKeySet) VerifySignature(ctx context.Context, jwt string) (payload []byte, err error) {
jws, err := jose.ParseSigned(jwt, []jose.SignatureAlgorithm{jose.RS256, jose.RS384, jose.RS512, jose.ES256, jose.ES384, jose.ES512})
if err != nil {
return nil, err
@ -664,11 +722,16 @@ func (s *signerKeySet) VerifySignature(ctx context.Context, jwt string) (payload
break
}
keys, err := s.signer.ValidationKeys(ctx)
skeys, err := s.GetKeys(ctx)
if err != nil {
return nil, err
}
keys := []*jose.JSONWebKey{skeys.SigningKeyPub}
for _, vk := range skeys.VerificationKeys {
keys = append(keys, vk.PublicKey)
}
for _, key := range keys {
if keyID == "" || key.KeyID == keyID {
if payload, err := jws.Verify(key); err == nil {

249
server/rotation.go

@ -0,0 +1,249 @@
package server
import (
"context"
"crypto/rand"
"crypto/rsa"
"encoding/hex"
"errors"
"fmt"
"io"
"log/slog"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/dexidp/dex/storage"
)
var errAlreadyRotated = errors.New("keys already rotated by another server instance")
// rotationStrategy describes a strategy for generating cryptographic keys, how
// often to rotate them, and how long they can validate signatures after rotation.
type rotationStrategy struct {
// Time between rotations.
rotationFrequency time.Duration
// After being rotated how long should the key be kept around for validating
// signatures?
idTokenValidFor time.Duration
// Keys are always RSA keys. Though cryptopasta recommends ECDSA keys, not every
// client may support these (e.g. github.com/coreos/go-oidc/oidc).
key func() (*rsa.PrivateKey, error)
}
// staticRotationStrategy returns a strategy which never rotates keys.
func staticRotationStrategy(key *rsa.PrivateKey) rotationStrategy {
return rotationStrategy{
// Setting these values to 100 years is easier than having a flag indicating no rotation.
rotationFrequency: time.Hour * 8760 * 100,
idTokenValidFor: time.Hour * 8760 * 100,
key: func() (*rsa.PrivateKey, error) { return key, nil },
}
}
// defaultRotationStrategy returns a strategy which rotates keys every provided period,
// holding onto the public parts for some specified amount of time.
func defaultRotationStrategy(rotationFrequency, idTokenValidFor time.Duration) rotationStrategy {
return rotationStrategy{
rotationFrequency: rotationFrequency,
idTokenValidFor: idTokenValidFor,
key: func() (*rsa.PrivateKey, error) {
return rsa.GenerateKey(rand.Reader, 2048)
},
}
}
type keyRotator struct {
storage.Storage
strategy rotationStrategy
now func() time.Time
logger *slog.Logger
}
// startKeyRotation begins key rotation in a new goroutine, closing once the context is canceled.
//
// The method blocks until after the first attempt to rotate keys has completed. That way
// healthy storages will return from this call with valid keys.
func (s *Server) startKeyRotation(ctx context.Context, strategy rotationStrategy, now func() time.Time) {
rotator := keyRotator{s.storage, strategy, now, s.logger}
// Try to rotate immediately so properly configured storages will have keys.
if err := rotator.rotate(); err != nil {
if err == errAlreadyRotated {
s.logger.Info("key rotation not needed", "err", err)
} else {
s.logger.Error("failed to rotate keys", "err", err)
}
}
go func() {
for {
select {
case <-ctx.Done():
return
case <-time.After(time.Second * 30):
if err := rotator.rotate(); err != nil {
s.logger.Error("failed to rotate keys", "err", err)
}
}
}
}()
}
func (k keyRotator) rotate() error {
keys, err := k.GetKeys(context.Background())
if err != nil && err != storage.ErrNotFound {
return fmt.Errorf("get keys: %v", err)
}
if k.now().Before(keys.NextRotation) {
return nil
}
k.logger.Info("keys expired, rotating")
// Generate the key outside of a storage transaction.
key, err := k.strategy.key()
if err != nil {
return fmt.Errorf("generate key: %v", err)
}
b := make([]byte, 20)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
panic(err)
}
keyID := hex.EncodeToString(b)
priv := &jose.JSONWebKey{
Key: key,
KeyID: keyID,
Algorithm: "RS256",
Use: "sig",
}
pub := &jose.JSONWebKey{
Key: key.Public(),
KeyID: keyID,
Algorithm: "RS256",
Use: "sig",
}
var nextRotation time.Time
err = k.UpdateKeys(context.Background(), func(keys storage.Keys) (storage.Keys, error) {
tNow := k.now()
// if you are running multiple instances of dex, another instance
// could have already rotated the keys.
if tNow.Before(keys.NextRotation) {
return storage.Keys{}, errAlreadyRotated
}
expired := func(key storage.VerificationKey) bool {
return tNow.After(key.Expiry)
}
// Remove any verification keys that have expired.
i := 0
for _, key := range keys.VerificationKeys {
if !expired(key) {
keys.VerificationKeys[i] = key
i++
}
}
keys.VerificationKeys = keys.VerificationKeys[:i]
if keys.SigningKeyPub != nil {
// Move current signing key to a verification only key, throwing
// away the private part.
verificationKey := storage.VerificationKey{
PublicKey: keys.SigningKeyPub,
// After demoting the signing key, keep the token around for at least
// the amount of time an ID Token is valid for. This ensures the
// verification key won't expire until all ID Tokens it's signed
// expired as well.
Expiry: tNow.Add(k.strategy.idTokenValidFor),
}
keys.VerificationKeys = append(keys.VerificationKeys, verificationKey)
}
nextRotation = k.now().Add(k.strategy.rotationFrequency)
keys.SigningKey = priv
keys.SigningKeyPub = pub
keys.NextRotation = nextRotation
return keys, nil
})
if err != nil {
return err
}
k.logger.Info("keys rotated", "next_rotation", nextRotation)
return nil
}
type RefreshTokenPolicy struct {
rotateRefreshTokens bool // enable rotation
absoluteLifetime time.Duration // interval from token creation to the end of its life
validIfNotUsedFor time.Duration // interval from last token update to the end of its life
reuseInterval time.Duration // interval within which old refresh token is allowed to be reused
now func() time.Time
logger *slog.Logger
}
func NewRefreshTokenPolicy(logger *slog.Logger, rotation bool, validIfNotUsedFor, absoluteLifetime, reuseInterval string) (*RefreshTokenPolicy, error) {
r := RefreshTokenPolicy{now: time.Now, logger: logger}
var err error
if validIfNotUsedFor != "" {
r.validIfNotUsedFor, err = time.ParseDuration(validIfNotUsedFor)
if err != nil {
return nil, fmt.Errorf("invalid config value %q for refresh token valid if not used for: %v", validIfNotUsedFor, err)
}
logger.Info("config refresh tokens", "valid_if_not_used_for", validIfNotUsedFor)
}
if absoluteLifetime != "" {
r.absoluteLifetime, err = time.ParseDuration(absoluteLifetime)
if err != nil {
return nil, fmt.Errorf("invalid config value %q for refresh tokens absolute lifetime: %v", absoluteLifetime, err)
}
logger.Info("config refresh tokens", "absolute_lifetime", absoluteLifetime)
}
if reuseInterval != "" {
r.reuseInterval, err = time.ParseDuration(reuseInterval)
if err != nil {
return nil, fmt.Errorf("invalid config value %q for refresh tokens reuse interval: %v", reuseInterval, err)
}
logger.Info("config refresh tokens", "reuse_interval", reuseInterval)
}
r.rotateRefreshTokens = !rotation
logger.Info("config refresh tokens rotation", "enabled", r.rotateRefreshTokens)
return &r, nil
}
func (r *RefreshTokenPolicy) RotationEnabled() bool {
return r.rotateRefreshTokens
}
func (r *RefreshTokenPolicy) CompletelyExpired(lastUsed time.Time) bool {
if r.absoluteLifetime == 0 {
return false // expiration disabled
}
return r.now().After(lastUsed.Add(r.absoluteLifetime))
}
func (r *RefreshTokenPolicy) ExpiredBecauseUnused(lastUsed time.Time) bool {
if r.validIfNotUsedFor == 0 {
return false // expiration disabled
}
return r.now().After(lastUsed.Add(r.validIfNotUsedFor))
}
func (r *RefreshTokenPolicy) AllowedToReuse(lastUsed time.Time) bool {
if r.reuseInterval == 0 {
return false // expiration disabled
}
return !r.now().After(lastUsed.Add(r.reuseInterval))
}

6
storage/kubernetes/lock.go

@ -58,7 +58,7 @@ func (l *refreshTokenLock) Unlock(id string) {
}
r.Annotations = nil
err = l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r)
err = l.cli.put(resourceRefreshToken, r.Name, r)
if err != nil {
l.cli.logger.Debug("failed to release lock for refresh token", "token_id", id, "err", err)
}
@ -82,7 +82,7 @@ func (l *refreshTokenLock) setLockAnnotation(id string) (bool, error) {
}
r.Annotations = lockData
err := l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r)
err := l.cli.put(resourceRefreshToken, r.Name, r)
if err == nil {
return false, nil
}
@ -108,7 +108,7 @@ func (l *refreshTokenLock) setLockAnnotation(id string) (bool, error) {
// Lock time is out, lets break the lock and take the advantage
r.Annotations = lockData
err = l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r)
err = l.cli.put(resourceRefreshToken, r.Name, r)
if err == nil {
// break lock annotation
return false, nil

111
storage/kubernetes/storage.go

@ -40,11 +40,6 @@ const (
resourceDeviceToken = "devicetokens"
)
const (
crdHandlingEnsure = "ensure"
crdHandlingCheck = "check"
)
var _ storage.Storage = (*client)(nil)
const (
@ -55,16 +50,15 @@ const (
type Config struct {
InCluster bool `json:"inCluster"`
KubeConfigFile string `json:"kubeConfigFile"`
// CRDHandling controls how the storage handles Custom Resource Definitions (CRDs).
// Supported values:
// - "ensure": Attempt to create all missing CRDs. If any CRD creation fails, initialization fails. (default)
// - "check": Fail immediately if any CRDs are missing with message "storage is not initialized, CRDs are not created"
CRDHandling string `json:"crdHandling"`
}
// Open returns a storage using Kubernetes third party resource.
func (c *Config) Open(logger *slog.Logger) (storage.Storage, error) {
return c.open(logger, false)
cli, err := c.open(logger, false)
if err != nil {
return nil, err
}
return cli, nil
}
// open returns a kubernetes client, initializing the third party resources used
@ -73,9 +67,6 @@ func (c *Config) Open(logger *slog.Logger) (storage.Storage, error) {
// waitForResources controls if errors creating the resources cause this method to return
// immediately (used during testing), or if the client will asynchronously retry.
func (c *Config) open(logger *slog.Logger, waitForResources bool) (*client, error) {
if c.CRDHandling == "" {
c.CRDHandling = crdHandlingEnsure
}
if c.InCluster && (c.KubeConfigFile != "") {
return nil, errors.New("cannot specify both 'inCluster' and 'kubeConfigFile'")
}
@ -98,7 +89,7 @@ func (c *Config) open(logger *slog.Logger, waitForResources bool) (*client, erro
return nil, err
}
cli, err := newClient(cluster, user, namespace, logger, c.InCluster, c.CRDHandling)
cli, err := newClient(cluster, user, namespace, logger, c.InCluster)
if err != nil {
return nil, fmt.Errorf("create client: %v", err)
}
@ -152,55 +143,45 @@ func (c *Config) open(logger *slog.Logger, waitForResources bool) (*client, erro
// It logs all errors, returning true if the resources were created successfully.
//
// Creating a custom resource does not mean that they'll be immediately available.
func (cli *client) registerCustomResources() bool {
func (cli *client) registerCustomResources() (ok bool) {
ok = true
definitions := customResourceDefinitions(cli.crdAPIVersion)
length := len(definitions)
// First pass: collect all CRDs that don't exist
var missingCRDs []k8sapi.CustomResourceDefinition
for i := 0; i < length; i++ {
var err error
var resourceName string
for _, r := range definitions {
r := definitions[i]
var i interface{}
cli.logger.Info("checking if custom resource has already been created...", "object", r.ObjectMeta.Name)
cli.logger.Info("checking if custom resource has already been created...", "object", r.Name)
if err := cli.listN(r.Spec.Names.Plural, &i, 1); err == nil {
cli.logger.Info("the custom resource already available, skipping create", "object", r.ObjectMeta.Name)
cli.logger.Info("the custom resource already available, skipping create", "object", r.Name)
continue
} else {
cli.logger.Info("custom resource not found", "object", r.ObjectMeta.Name, "err", err)
missingCRDs = append(missingCRDs, r)
cli.logger.Info("failed to list custom resource, attempting to create", "object", r.Name, "err", err)
}
}
// Second pass: handle missing CRDs based on crdHandling option
if len(missingCRDs) > 0 {
cli.logger.Info("found missing CRDs", "count", len(missingCRDs))
switch cli.crdHandling {
case crdHandlingCheck:
// For "check" mode, fail and report that CRDs are not initialized
cli.logger.Error("storage is not initialized, CRDs are not created", "crdHandling", cli.crdHandling, "missing_count", len(missingCRDs))
return false
case crdHandlingEnsure:
cli.logger.Info("crdHandling is 'ensure', attempting to create missing CRDs")
for _, r := range missingCRDs {
resourceName := r.ObjectMeta.Name
err := cli.postResource(cli.crdAPIVersion, "", "customresourcedefinitions", r)
if err != nil {
if !errors.Is(err, storage.ErrAlreadyExists) {
cli.logger.Error("failed to create custom resource", "object", resourceName, "err", err)
return false
}
cli.logger.Info("custom resource already created", "object", resourceName)
} else {
cli.logger.Info("successfully created custom resource", "object", resourceName)
}
err = cli.postResource(cli.crdAPIVersion, "", "customresourcedefinitions", r)
resourceName = r.Name
if err != nil {
switch err {
case storage.ErrAlreadyExists:
cli.logger.Info("custom resource already created", "object", resourceName)
case storage.ErrNotFound:
cli.logger.Error("custom resources not found, please enable the respective API group")
ok = false
default:
cli.logger.Error("creating custom resource", "object", resourceName, "err", err)
ok = false
}
return true
default:
cli.logger.Error("invalid crdHandling value", "value", cli.crdHandling)
return false
continue
}
cli.logger.Error("create custom resource", "object", resourceName)
}
// All CRDs exist
return true
return ok
}
// waitForCRDs waits for all CRDs to be in a ready state, and is used
@ -436,7 +417,7 @@ func (cli *client) DeleteClient(ctx context.Context, id string) error {
if err != nil {
return err
}
return cli.delete(resourceClient, c.ObjectMeta.Name)
return cli.delete(resourceClient, c.Name)
}
func (cli *client) DeleteRefresh(ctx context.Context, id string) error {
@ -449,7 +430,7 @@ func (cli *client) DeletePassword(ctx context.Context, email string) error {
if err != nil {
return err
}
return cli.delete(resourcePassword, p.ObjectMeta.Name)
return cli.delete(resourcePassword, p.Name)
}
func (cli *client) DeleteOfflineSessions(ctx context.Context, userID string, connID string) error {
@ -458,7 +439,7 @@ func (cli *client) DeleteOfflineSessions(ctx context.Context, userID string, con
if err != nil {
return err
}
return cli.delete(resourceOfflineSessions, o.ObjectMeta.Name)
return cli.delete(resourceOfflineSessions, o.Name)
}
func (cli *client) DeleteConnector(ctx context.Context, id string) error {
@ -488,7 +469,7 @@ func (cli *client) UpdateRefreshToken(ctx context.Context, id string, updater fu
newToken := cli.fromStorageRefreshToken(updated)
newToken.ObjectMeta = r.ObjectMeta
return cli.put(resourceRefreshToken, r.ObjectMeta.Name, newToken)
return cli.put(resourceRefreshToken, r.Name, newToken)
})
}
@ -506,7 +487,7 @@ func (cli *client) UpdateClient(ctx context.Context, id string, updater func(old
newClient := cli.fromStorageClient(updated)
newClient.ObjectMeta = c.ObjectMeta
return cli.put(resourceClient, c.ObjectMeta.Name, newClient)
return cli.put(resourceClient, c.Name, newClient)
}
func (cli *client) UpdatePassword(ctx context.Context, email string, updater func(old storage.Password) (storage.Password, error)) error {
@ -523,7 +504,7 @@ func (cli *client) UpdatePassword(ctx context.Context, email string, updater fun
newPassword := cli.fromStoragePassword(updated)
newPassword.ObjectMeta = p.ObjectMeta
return cli.put(resourcePassword, p.ObjectMeta.Name, newPassword)
return cli.put(resourcePassword, p.Name, newPassword)
}
func (cli *client) UpdateOfflineSessions(ctx context.Context, userID string, connID string, updater func(old storage.OfflineSessions) (storage.OfflineSessions, error)) error {
@ -540,7 +521,7 @@ func (cli *client) UpdateOfflineSessions(ctx context.Context, userID string, con
newOfflineSessions := cli.fromStorageOfflineSessions(updated)
newOfflineSessions.ObjectMeta = o.ObjectMeta
return cli.put(resourceOfflineSessions, o.ObjectMeta.Name, newOfflineSessions)
return cli.put(resourceOfflineSessions, o.Name, newOfflineSessions)
})
}
@ -634,7 +615,7 @@ func (cli *client) GarbageCollect(ctx context.Context, now time.Time) (result st
var delErr error
for _, authRequest := range authRequests.AuthRequests {
if now.After(authRequest.Expiry) {
if err := cli.delete(resourceAuthRequest, authRequest.ObjectMeta.Name); err != nil {
if err := cli.delete(resourceAuthRequest, authRequest.Name); err != nil {
cli.logger.Error("failed to delete auth request", "err", err)
delErr = fmt.Errorf("failed to delete auth request: %v", err)
}
@ -652,7 +633,7 @@ func (cli *client) GarbageCollect(ctx context.Context, now time.Time) (result st
for _, authCode := range authCodes.AuthCodes {
if now.After(authCode.Expiry) {
if err := cli.delete(resourceAuthCode, authCode.ObjectMeta.Name); err != nil {
if err := cli.delete(resourceAuthCode, authCode.Name); err != nil {
cli.logger.Error("failed to delete auth code", "err", err)
delErr = fmt.Errorf("failed to delete auth code: %v", err)
}
@ -667,7 +648,7 @@ func (cli *client) GarbageCollect(ctx context.Context, now time.Time) (result st
for _, deviceRequest := range deviceRequests.DeviceRequests {
if now.After(deviceRequest.Expiry) {
if err := cli.delete(resourceDeviceRequest, deviceRequest.ObjectMeta.Name); err != nil {
if err := cli.delete(resourceDeviceRequest, deviceRequest.Name); err != nil {
cli.logger.Error("failed to delete device request", "err", err)
delErr = fmt.Errorf("failed to delete device request: %v", err)
}
@ -682,7 +663,7 @@ func (cli *client) GarbageCollect(ctx context.Context, now time.Time) (result st
for _, deviceToken := range deviceTokens.DeviceTokens {
if now.After(deviceToken.Expiry) {
if err := cli.delete(resourceDeviceToken, deviceToken.ObjectMeta.Name); err != nil {
if err := cli.delete(resourceDeviceToken, deviceToken.Name); err != nil {
cli.logger.Error("failed to delete device token", "err", err)
delErr = fmt.Errorf("failed to delete device token: %v", err)
}
@ -739,7 +720,7 @@ func (cli *client) UpdateDeviceToken(ctx context.Context, deviceCode string, upd
newToken := cli.fromStorageDeviceToken(updated)
newToken.ObjectMeta = r.ObjectMeta
return cli.put(resourceDeviceToken, r.ObjectMeta.Name, newToken)
return cli.put(resourceDeviceToken, r.Name, newToken)
})
}

12
storage/kubernetes/types.go

@ -369,7 +369,7 @@ type AuthRequestList struct {
func toStorageAuthRequest(req AuthRequest) storage.AuthRequest {
a := storage.AuthRequest{
ID: req.ObjectMeta.Name,
ID: req.Name,
ClientID: req.ClientID,
ResponseTypes: req.ResponseTypes,
Scopes: req.Scopes,
@ -538,7 +538,7 @@ func (cli *client) fromStorageAuthCode(a storage.AuthCode) AuthCode {
func toStorageAuthCode(a AuthCode) storage.AuthCode {
return storage.AuthCode{
ID: a.ObjectMeta.Name,
ID: a.Name,
ClientID: a.ClientID,
RedirectURI: a.RedirectURI,
ConnectorID: a.ConnectorID,
@ -585,7 +585,7 @@ type RefreshList struct {
func toStorageRefreshToken(r RefreshToken) storage.RefreshToken {
return storage.RefreshToken{
ID: r.ObjectMeta.Name,
ID: r.Name,
Token: r.Token,
ObsoleteToken: r.ObsoleteToken,
CreatedAt: r.CreatedAt,
@ -745,7 +745,7 @@ func toStorageConnector(c Connector) storage.Connector {
ID: c.ID,
Type: c.Type,
Name: c.Name,
ResourceVersion: c.ObjectMeta.ResourceVersion,
ResourceVersion: c.ResourceVersion,
Config: c.Config,
}
}
@ -798,7 +798,7 @@ func (cli *client) fromStorageDeviceRequest(a storage.DeviceRequest) DeviceReque
func toStorageDeviceRequest(req DeviceRequest) storage.DeviceRequest {
return storage.DeviceRequest{
UserCode: strings.ToUpper(req.ObjectMeta.Name),
UserCode: strings.ToUpper(req.Name),
DeviceCode: req.DeviceCode,
ClientID: req.ClientID,
ClientSecret: req.ClientSecret,
@ -852,7 +852,7 @@ func (cli *client) fromStorageDeviceToken(t storage.DeviceToken) DeviceToken {
func toStorageDeviceToken(t DeviceToken) storage.DeviceToken {
return storage.DeviceToken{
DeviceCode: t.ObjectMeta.Name,
DeviceCode: t.Name,
Status: t.Status,
Token: t.Token,
Expiry: t.Expiry,

Loading…
Cancel
Save