Browse Source

merge upstream master and fix non-constant format strings in oauth2.go

Signed-off-by: Nik Ogura <nik.ogura@gmail.com>
pull/4527/head
Nik Ogura 12 hours ago
parent
commit
193ab8987d
No known key found for this signature in database
GPG Key ID: 3A7A4AA69B634E2B
  1. 4
      .github/workflows/analysis-scorecard.yaml
  2. 26
      .github/workflows/artifacts.yaml
  3. 2
      .github/workflows/checks.yaml
  4. 36
      .github/workflows/ci.yaml
  5. 4
      Dockerfile
  6. 348
      api/api.pb.go
  7. 2
      api/api.proto
  8. 966
      api/v2/api.pb.go
  9. 14
      api/v2/api.proto
  10. 2
      api/v2/go.mod
  11. 4
      api/v2/go.sum
  12. 65
      cmd/dex/config.go
  13. 7
      cmd/dex/config_test.go
  14. 56
      cmd/dex/excluding_handler.go
  15. 141
      cmd/dex/excluding_handler_test.go
  16. 4
      cmd/dex/logger.go
  17. 36
      cmd/dex/serve.go
  18. 6
      cmd/dex/serve_test.go
  19. 19
      config.yaml.dist
  20. 10
      connector/atlassiancrowd/atlassiancrowd.go
  21. 10
      connector/authproxy/authproxy.go
  22. 12
      connector/authproxy/authproxy_test.go
  23. 4
      connector/github/github.go
  24. 15
      connector/github/github_test.go
  25. 33
      connector/gitlab/gitlab.go
  26. 85
      connector/gitlab/gitlab_test.go
  27. 10
      connector/keystone/keystone.go
  28. 10
      connector/ldap/ldap.go
  29. 12
      connector/linkedin/linkedin.go
  30. 12
      connector/mock/connectortest.go
  31. 10
      connector/oauth/oauth.go
  32. 10
      connector/oauth/oauth_test.go
  33. 5
      connector/oidc/oidc.go
  34. 70
      connector/saml/saml.go
  35. 327
      connector/saml/saml_test.go
  36. 4
      docker-compose.override.yaml.dist
  37. 9
      docker-compose.yaml
  38. 1505
      docs/enhancements/auth-sessions-2026-02-18.md
  39. 732
      docs/enhancements/cel-expressions-2026-02-28.md
  40. 43
      examples/config-dev.yaml
  41. 6
      examples/go.mod
  42. 8
      examples/go.sum
  43. 31
      go.mod
  44. 67
      go.sum
  45. 232
      pkg/cel/cel.go
  46. 280
      pkg/cel/cel_test.go
  47. 105
      pkg/cel/cost.go
  48. 137
      pkg/cel/cost_test.go
  49. 5
      pkg/cel/doc.go
  50. 4
      pkg/cel/library/doc.go
  51. 73
      pkg/cel/library/email.go
  52. 106
      pkg/cel/library/email_test.go
  53. 123
      pkg/cel/library/groups.go
  54. 141
      pkg/cel/library/groups_test.go
  55. 109
      pkg/cel/types.go
  56. 3
      pkg/featureflags/doc.go
  57. 10
      pkg/featureflags/set.go
  58. 2
      pkg/groups/doc.go
  59. 1
      pkg/groups/groups.go
  60. 3
      pkg/httpclient/doc.go
  61. 88
      server/api.go
  62. 133
      server/api_cache_test.go
  63. 99
      server/api_test.go
  64. 4
      server/deviceflowhandlers.go
  65. 38
      server/deviceflowhandlers_test.go
  66. 368
      server/handlers.go
  67. 117
      server/handlers_approval_test.go
  68. 1020
      server/handlers_test.go
  69. 4
      server/introspectionhandler_test.go
  70. 23
      server/oauth2.go
  71. 90
      server/oauth2_test.go
  72. 4
      server/refreshhandlers.go
  73. 50
      server/server.go
  74. 37
      server/server_test.go
  75. 204
      storage/conformance/conformance.go
  76. 121
      storage/conformance/transactions.go
  77. 108
      storage/ent/client/authsession.go
  78. 2
      storage/ent/client/client.go
  79. 2
      storage/ent/client/connector.go
  80. 8
      storage/ent/client/offlinesession.go
  81. 79
      storage/ent/client/types.go
  82. 130
      storage/ent/client/useridentity.go
  83. 8
      storage/ent/client/utils.go
  84. 150
      storage/ent/db/authsession.go
  85. 83
      storage/ent/db/authsession/authsession.go
  86. 355
      storage/ent/db/authsession/where.go
  87. 282
      storage/ent/db/authsession_create.go
  88. 88
      storage/ent/db/authsession_delete.go
  89. 527
      storage/ent/db/authsession_query.go
  90. 330
      storage/ent/db/authsession_update.go
  91. 301
      storage/ent/db/client.go
  92. 18
      storage/ent/db/connector.go
  93. 3
      storage/ent/db/connector/connector.go
  94. 10
      storage/ent/db/connector/where.go
  95. 10
      storage/ent/db/connector_create.go
  96. 59
      storage/ent/db/connector_update.go
  97. 4
      storage/ent/db/ent.go
  98. 24
      storage/ent/db/hook/hook.go
  99. 41
      storage/ent/db/migrate/schema.go
  100. 6814
      storage/ent/db/mutation.go
  101. Some files were not shown because too many files have changed in this diff Show More

4
.github/workflows/analysis-scorecard.yaml

@ -35,13 +35,13 @@ jobs:
publish_results: true
- name: Upload results as artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: OpenSSF Scorecard results
path: results.sarif
retention-days: 5
- name: Upload results to GitHub Security tab
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v3.29.5
uses: github/codeql-action/upload-sarif@0d579ffd059c29b07949a3cce3983f0780820c98 # v3.29.5
with:
sarif_file: results.sarif

26
.github/workflows/artifacts.yaml

@ -56,16 +56,16 @@ jobs:
fetch-tags: true
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Set up Syft
uses: anchore/sbom-action/download-syft@28d71544de8eaf1b958d335707167c5f783590ad # v0.22.2
uses: anchore/sbom-action/download-syft@57aae528053a48a3f6235f2d9461b05fbcb7366d # v0.23.1
- name: Install cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
uses: sigstore/cosign-installer@ba7bc0a3fef59531c69a25acd34668d6d3fe6f22 # v4.1.0
- name: Set image name
id: image-name
@ -73,7 +73,7 @@ jobs:
- name: Gather build metadata
id: meta
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0
with:
images: |
${{ steps.image-name.outputs.value }}
@ -109,7 +109,7 @@ jobs:
if_false: type=oci,dest=image.tar
- name: Login to GitHub Container Registry
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
registry: ghcr.io
username: ${{ github.actor }}
@ -117,7 +117,7 @@ jobs:
if: inputs.publish
- name: Login to Docker Hub
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
@ -125,7 +125,7 @@ jobs:
- name: Build and push image
id: build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
with:
context: .
platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x
@ -197,14 +197,14 @@ jobs:
# TODO: uncomment when the action is working for non ghcr.io pushes. GH Issue: https://github.com/actions/attest-build-provenance/issues/80
# - name: Generate build provenance attestation
# uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0
# uses: actions/attest-build-provenance@a2bbfa25375fe432b6a289bc6b6cd05ecd0c4c32 # v4.1.0
# with:
# subject-name: dexidp/dex
# subject-digest: ${{ steps.build.outputs.digest }}
# push-to-registry: true
- name: Generate build provenance attestation
uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0
uses: actions/attest-build-provenance@a2bbfa25375fe432b6a289bc6b6cd05ecd0c4c32 # v4.1.0
with:
subject-name: ghcr.io/${{ github.repository }}
subject-digest: ${{ steps.build.outputs.digest }}
@ -233,7 +233,7 @@ jobs:
restore-keys: trivy-cache-
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@e368e328979b113139d6f9068e03accaed98a518 # 0.34.1
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # 0.35.0
with:
input: docker-image
format: sarif
@ -256,7 +256,7 @@ jobs:
run: cat trivy-results.sarif
- name: Upload Trivy scan results as artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: "[${{ github.job }}] Trivy scan results"
path: trivy-results.sarif
@ -264,6 +264,6 @@ jobs:
overwrite: true
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v3.29.5
uses: github/codeql-action/upload-sarif@0d579ffd059c29b07949a3cce3983f0780820c98 # v3.29.5
with:
sarif_file: trivy-results.sarif

2
.github/workflows/checks.yaml

@ -16,7 +16,7 @@ jobs:
steps:
- name: Check minimum labels
uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5
uses: mheap/github-action-required-labels@0ac283b4e65c1fb28ce6079dea5546ceca98ccbe # v5.5
with:
mode: minimum
count: 1

36
.github/workflows/ci.yaml

@ -48,6 +48,24 @@ jobs:
- 3306
options: --health-cmd "mysql -proot -e \"show databases;\"" --health-interval 10s --health-timeout 5s --health-retries 5
mysql8:
image: mysql:8.0
env:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: dex
ports:
- 3306
options: --health-cmd "mysql -proot -e \"show databases;\"" --health-interval 10s --health-timeout 5s --health-retries 5
mysql8-ent:
image: mysql:8.0
env:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: dex
ports:
- 3306
options: --health-cmd "mysql -proot -e \"show databases;\"" --health-interval 10s --health-timeout 5s --health-retries 5
etcd:
image: gcr.io/etcd-development/etcd:v3.5.0
ports:
@ -87,7 +105,7 @@ jobs:
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Set up Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
@ -124,6 +142,18 @@ jobs:
DEX_MYSQL_ENT_HOST: 127.0.0.1
DEX_MYSQL_ENT_PORT: ${{ job.services.mysql-ent.ports[3306] }}
DEX_MYSQL8_DATABASE: dex
DEX_MYSQL8_USER: root
DEX_MYSQL8_PASSWORD: root
DEX_MYSQL8_HOST: 127.0.0.1
DEX_MYSQL8_PORT: ${{ job.services.mysql8.ports[3306] }}
DEX_MYSQL8_ENT_DATABASE: dex
DEX_MYSQL8_ENT_USER: root
DEX_MYSQL8_ENT_PASSWORD: root
DEX_MYSQL8_ENT_HOST: 127.0.0.1
DEX_MYSQL8_ENT_PORT: ${{ job.services.mysql8-ent.ports[3306] }}
DEX_POSTGRES_DATABASE: postgres
DEX_POSTGRES_USER: postgres
DEX_POSTGRES_PASSWORD: postgres
@ -163,7 +193,7 @@ jobs:
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Set up Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
@ -198,4 +228,4 @@ jobs:
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Dependency Review
uses: actions/dependency-review-action@05fe4576374b728f0c523d6a13d64c25081e0803 # v4.8.3
uses: actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48 # v4.9.0

4
Dockerfile

@ -2,7 +2,7 @@ ARG BASE_IMAGE=alpine
FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.9.0@sha256:c64defb9ed5a91eacb37f96ccc3d4cd72521c4bd18d5442905b95e2226b0e707 AS xx
FROM --platform=$BUILDPLATFORM golang:1.26.0-alpine3.22@sha256:169d3991a4f795124a88b33c73549955a3d856e26e8504b5530c30bd245f9f1b AS builder
FROM --platform=$BUILDPLATFORM golang:1.26.1-alpine3.22@sha256:07e91d24f6330432729082bb580983181809e0a48f0f38ecde26868d4568c6ac AS builder
COPY --from=xx / /
@ -59,7 +59,7 @@ FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f
FROM alpine AS user-setup
RUN addgroup -g 1001 -S dex && adduser -u 1001 -S -G dex -D -H -s /sbin/nologin dex
FROM gcr.io/distroless/static-debian13:nonroot@sha256:01e550fdb7ab79ee7be5ff440a563a58f1fd000ad9e0c532e65c3d23f917f1c5 AS distroless
FROM gcr.io/distroless/static-debian13:nonroot@sha256:e3f945647ffb95b5839c07038d64f9811adf17308b9121d8a2b87b6a22a80a39 AS distroless
FROM $BASE_IMAGE

348
api/api.pb.go

@ -23,16 +23,17 @@ const (
// Client represents an OAuth2 client.
type Client struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"`
TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"`
Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"`
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"`
TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"`
Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"`
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"`
AllowedConnectors []string `protobuf:"bytes,8,rep,name=allowed_connectors,json=allowedConnectors,proto3" json:"allowed_connectors,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Client) Reset() {
@ -114,6 +115,13 @@ func (x *Client) GetLogoUrl() string {
return ""
}
func (x *Client) GetAllowedConnectors() []string {
if x != nil {
return x.AllowedConnectors
}
return nil
}
// CreateClientReq is a request to make a client.
type CreateClientReq struct {
state protoimpl.MessageState `protogen:"open.v1"`
@ -305,14 +313,15 @@ func (x *DeleteClientResp) GetNotFound() bool {
// UpdateClientReq is a request to update an existing client.
type UpdateClientReq struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"`
TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"`
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"`
TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"`
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"`
AllowedConnectors []string `protobuf:"bytes,6,rep,name=allowed_connectors,json=allowedConnectors,proto3" json:"allowed_connectors,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UpdateClientReq) Reset() {
@ -380,6 +389,13 @@ func (x *UpdateClientReq) GetLogoUrl() string {
return ""
}
func (x *UpdateClientReq) GetAllowedConnectors() []string {
if x != nil {
return x.AllowedConnectors
}
return nil
}
// UpdateClientResp returns the response from updating a client.
type UpdateClientResp struct {
state protoimpl.MessageState `protogen:"open.v1"`
@ -1326,7 +1342,7 @@ var File_api_api_proto protoreflect.FileDescriptor
var file_api_api_proto_rawDesc = string([]byte{
0x0a, 0x0d, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x03, 0x61, 0x70, 0x69, 0x22, 0xc1, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12,
0x03, 0x61, 0x70, 0x69, 0x22, 0xf0, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12,
0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12,
0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x72,
@ -1338,155 +1354,161 @@ var file_api_api_proto_rawDesc = string([]byte{
0x08, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a,
0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x22, 0x36, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x06, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70,
0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x22, 0x5e, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f,
0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c,
0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70,
0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x22, 0x21, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x02, 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66,
0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46,
0x6f, 0x75, 0x6e, 0x64, 0x22, 0x9a, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69,
0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, 0x0a,
0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03,
0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65,
0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75,
0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72,
0x6c, 0x22, 0x2f, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75,
0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75,
0x6e, 0x64, 0x22, 0x69, 0x0a, 0x08, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x14,
0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65,
0x6d, 0x61, 0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72,
0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x3e, 0x0a,
0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52,
0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77,
0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3b, 0x0a,
0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52,
0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f,
0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x08,
0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x6e,
0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x36, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69,
0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22,
0x5e, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52,
0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65,
0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72,
0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x67, 0x0a, 0x11, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12,
0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x61, 0x73,
0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x48, 0x61, 0x73, 0x68,
0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x6e,
0x61, 0x6d, 0x65, 0x22, 0x31, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73,
0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74,
0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f,
0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x29, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65,
0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69,
0x6c, 0x22, 0x31, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77,
0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69,
0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22,
0x21, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52,
0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
0x69, 0x64, 0x22, 0x2f, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f,
0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f,
0x75, 0x6e, 0x64, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x72,
0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c,
0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, 0x0a, 0x0d,
0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20,
0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72,
0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72,
0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c,
0x12, 0x2d, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e,
0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c,
0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22,
0x2f, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52,
0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64,
0x22, 0x69, 0x0a, 0x08, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05,
0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61,
0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x11, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71,
0x12, 0x29, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3b, 0x0a, 0x12, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73,
0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69,
0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61,
0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x67, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a,
0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d,
0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21,
0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d,
0x65, 0x22, 0x31, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77,
0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66,
0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46,
0x6f, 0x75, 0x6e, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x22, 0x3f, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2b, 0x0a, 0x09, 0x70,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d,
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x09, 0x70,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x22, 0x37, 0x0a, 0x0b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a,
0x03, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x61, 0x70, 0x69, 0x22,
0x7a, 0x0a, 0x0f, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12,
0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20,
0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1b,
0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
0x03, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0e, 0x4c,
0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a,
0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x66,
0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66, 0x52, 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65,
0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73,
0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65,
0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64,
0x22, 0x30, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73,
0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75,
0x6f, 0x75, 0x6e, 0x64, 0x22, 0x29, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61,
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61,
0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22,
0x31, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75,
0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75,
0x6e, 0x64, 0x22, 0x45, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a,
0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4d, 0x0a, 0x12, 0x56, 0x65, 0x72,
0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12,
0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e,
0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x32, 0xc7, 0x05, 0x0a, 0x03, 0x44, 0x65, 0x78,
0x12, 0x3d, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65,
0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12,
0x3d, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12,
0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d,
0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14,
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a,
0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12,
0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72,
0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70,
0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61,
0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e,
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65,
0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61,
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x0d,
0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x14, 0x2e,
0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64,
0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61,
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a,
0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x2e, 0x61, 0x70, 0x69,
0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x61, 0x70,
0x69, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12,
0x3a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x13,
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x52,
0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x15, 0x2e, 0x61,
0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65,
0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a,
0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12,
0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65,
0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70,
0x22, 0x00, 0x42, 0x2f, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x6f, 0x73,
0x2e, 0x64, 0x65, 0x78, 0x2e, 0x61, 0x70, 0x69, 0x5a, 0x19, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x78, 0x69, 0x64, 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f,
0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6e, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f,
0x72, 0x64, 0x52, 0x65, 0x71, 0x22, 0x3f, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73,
0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2b, 0x0a, 0x09, 0x70, 0x61, 0x73,
0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61,
0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x09, 0x70, 0x61, 0x73,
0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x52, 0x65, 0x71, 0x22, 0x37, 0x0a, 0x0b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61,
0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x61, 0x70, 0x69, 0x22, 0x7a, 0x0a,
0x0f, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66,
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,
0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a,
0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09,
0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52,
0x08, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0e, 0x4c, 0x69, 0x73,
0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75,
0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73,
0x65, 0x72, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72,
0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x66, 0x72, 0x65,
0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b,
0x65, 0x6e, 0x52, 0x65, 0x66, 0x52, 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f,
0x6b, 0x65, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65,
0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72,
0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49,
0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x30,
0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52,
0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64,
0x22, 0x45, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f,
0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4d, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66,
0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1a, 0x0a,
0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74,
0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f,
0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x32, 0xc7, 0x05, 0x0a, 0x03, 0x44, 0x65, 0x78, 0x12, 0x3d,
0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14,
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a,
0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e,
0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c,
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x61,
0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52,
0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e,
0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f,
0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00,
0x12, 0x43, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f,
0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69,
0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52,
0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a,
0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x0d, 0x4c, 0x69,
0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70,
0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65,
0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73,
0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x47, 0x65,
0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x61, 0x70, 0x69, 0x2e,
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3a, 0x0a,
0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x13, 0x2e, 0x61,
0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65,
0x71, 0x1a, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72,
0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x52, 0x65, 0x76,
0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x15, 0x2e, 0x61, 0x70, 0x69,
0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65,
0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65,
0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x56,
0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e,
0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f,
0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69,
0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00,
0x42, 0x2f, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x6f, 0x73, 0x2e, 0x64,
0x65, 0x78, 0x2e, 0x61, 0x70, 0x69, 0x5a, 0x19, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x78, 0x69, 0x64, 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f, 0x61, 0x70,
0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (

2
api/api.proto

@ -14,6 +14,7 @@ message Client {
bool public = 5;
string name = 6;
string logo_url = 7;
repeated string allowed_connectors = 8;
}
// CreateClientReq is a request to make a client.
@ -45,6 +46,7 @@ message UpdateClientReq {
repeated string trusted_peers = 3;
string name = 4;
string logo_url = 5;
repeated string allowed_connectors = 6;
}
// UpdateClientResp returns the response from updating a client.

966
api/v2/api.pb.go

File diff suppressed because it is too large Load Diff

14
api/v2/api.proto

@ -14,6 +14,7 @@ message Client {
bool public = 5;
string name = 6;
string logo_url = 7;
repeated string allowed_connectors = 8;
}
// ClientInfo represents an OAuth2 client without sensitive information.
@ -24,6 +25,7 @@ message ClientInfo {
bool public = 4;
string name = 5;
string logo_url = 6;
repeated string allowed_connectors = 7;
}
// GetClientReq is a request to retrieve client details.
@ -66,6 +68,7 @@ message UpdateClientReq {
repeated string trusted_peers = 3;
string name = 4;
string logo_url = 5;
repeated string allowed_connectors = 6;
}
// UpdateClientResp returns the response from updating a client.
@ -140,6 +143,7 @@ message Connector {
string type = 2;
string name = 3;
bytes config = 4;
repeated string grant_types = 5;
}
// CreateConnectorReq is a request to make a connector.
@ -152,6 +156,12 @@ message CreateConnectorResp {
bool already_exists = 1;
}
// GrantTypes wraps a list of grant types to distinguish between
// "not specified" (no update) and "empty list" (unrestricted).
message GrantTypes {
repeated string grant_types = 1;
}
// UpdateConnectorReq is a request to modify an existing connector.
message UpdateConnectorReq {
// The id used to lookup the connector. This field cannot be modified
@ -159,6 +169,10 @@ message UpdateConnectorReq {
string new_type = 2;
string new_name = 3;
bytes new_config = 4;
// If set, updates the connector's allowed grant types.
// An empty grant_types list means unrestricted (all grant types allowed).
// If not set (null), grant types are not modified.
GrantTypes new_grant_types = 5;
}
// UpdateConnectorResp returns the response from modifying an existing connector.

2
api/v2/go.mod

@ -3,7 +3,7 @@ module github.com/dexidp/dex/api/v2
go 1.24.0
require (
google.golang.org/grpc v1.79.1
google.golang.org/grpc v1.79.2
google.golang.org/protobuf v1.36.11
)

4
api/v2/go.sum

@ -32,7 +32,7 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU=
google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=

65
cmd/dex/config.go

@ -1,6 +1,7 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
@ -23,6 +24,15 @@ import (
"github.com/dexidp/dex/storage/sql"
)
func configUnmarshaller(b []byte, v interface{}) error {
if !featureflags.ConfigDisallowUnknownFields.Enabled() {
return json.Unmarshal(b, v)
}
dec := json.NewDecoder(bytes.NewReader(b))
dec.DisallowUnknownFields()
return dec.Decode(v)
}
// Config is the config format for the main application.
type Config struct {
Issuer string `json:"issuer"`
@ -89,6 +99,7 @@ func (c Config) Validate() error {
checkErrors = append(checkErrors, check.errMsg)
}
}
if len(checkErrors) != 0 {
return fmt.Errorf("invalid Config:\n\t-\t%s", strings.Join(checkErrors, "\n\t-\t"))
}
@ -109,7 +120,7 @@ func (p *password) UnmarshalJSON(b []byte) error {
HashFromEnv string `json:"hashFromEnv"`
Groups []string `json:"groups"`
}
if err := json.Unmarshal(b, &data); err != nil {
if err := configUnmarshaller(b, &data); err != nil {
return err
}
*p = password(storage.Password{
@ -161,6 +172,16 @@ type OAuth2 struct {
AlwaysShowLoginScreen bool `json:"alwaysShowLoginScreen"`
// This is the connector that can be used for password grant
PasswordConnector string `json:"passwordConnector"`
// PKCE configuration
PKCE PKCE `json:"pkce"`
}
// PKCE holds the PKCE (Proof Key for Code Exchange) configuration.
type PKCE struct {
// If true, PKCE is required for all authorization code flows.
Enforce bool `json:"enforce"`
// Supported code challenge methods. Defaults to ["S256", "plain"].
CodeChallengeMethodsSupported []string `json:"codeChallengeMethodsSupported"`
}
// Web is the config format for the HTTP server.
@ -333,7 +354,7 @@ func (s *Storage) UnmarshalJSON(b []byte) error {
Type string `json:"type"`
Config json.RawMessage `json:"config"`
}
if err := json.Unmarshal(b, &store); err != nil {
if err := configUnmarshaller(b, &store); err != nil {
return fmt.Errorf("parse storage: %v", err)
}
f, ok := storages[store.Type]
@ -346,7 +367,7 @@ func (s *Storage) UnmarshalJSON(b []byte) error {
data := []byte(store.Config)
if featureflags.ExpandEnv.Enabled() {
var rawMap map[string]interface{}
if err := json.Unmarshal(store.Config, &rawMap); err != nil {
if err := configUnmarshaller(store.Config, &rawMap); err != nil {
return fmt.Errorf("unmarshal config for env expansion: %v", err)
}
@ -363,7 +384,7 @@ func (s *Storage) UnmarshalJSON(b []byte) error {
data = expandedData
}
if err := json.Unmarshal(data, storageConfig); err != nil {
if err := configUnmarshaller(data, storageConfig); err != nil {
return fmt.Errorf("parse storage config: %v", err)
}
}
@ -449,7 +470,8 @@ type Connector struct {
Name string `json:"name"`
ID string `json:"id"`
Config server.ConnectorConfig `json:"config"`
Config server.ConnectorConfig `json:"config"`
GrantTypes []string `json:"grantTypes"`
}
// UnmarshalJSON allows Connector to implement the unmarshaler interface to
@ -460,9 +482,10 @@ func (c *Connector) UnmarshalJSON(b []byte) error {
Name string `json:"name"`
ID string `json:"id"`
Config json.RawMessage `json:"config"`
Config json.RawMessage `json:"config"`
GrantTypes []string `json:"grantTypes"`
}
if err := json.Unmarshal(b, &conn); err != nil {
if err := configUnmarshaller(b, &conn); err != nil {
return fmt.Errorf("parse connector: %v", err)
}
f, ok := server.ConnectorsConfig[conn.Type]
@ -475,7 +498,7 @@ func (c *Connector) UnmarshalJSON(b []byte) error {
data := []byte(conn.Config)
if featureflags.ExpandEnv.Enabled() {
var rawMap map[string]interface{}
if err := json.Unmarshal(conn.Config, &rawMap); err != nil {
if err := configUnmarshaller(conn.Config, &rawMap); err != nil {
return fmt.Errorf("unmarshal config for env expansion: %v", err)
}
@ -492,16 +515,17 @@ func (c *Connector) UnmarshalJSON(b []byte) error {
data = expandedData
}
if err := json.Unmarshal(data, connConfig); err != nil {
if err := configUnmarshaller(data, connConfig); err != nil {
return fmt.Errorf("parse connector config: %v", err)
}
}
*c = Connector{
Type: conn.Type,
Name: conn.Name,
ID: conn.ID,
Config: connConfig,
Type: conn.Type,
Name: conn.Name,
ID: conn.ID,
Config: connConfig,
GrantTypes: conn.GrantTypes,
}
return nil
}
@ -514,10 +538,11 @@ func ToStorageConnector(c Connector) (storage.Connector, error) {
}
return storage.Connector{
ID: c.ID,
Type: c.Type,
Name: c.Name,
Config: data,
ID: c.ID,
Type: c.Type,
Name: c.Name,
Config: data,
GrantTypes: c.GrantTypes,
}, nil
}
@ -546,6 +571,12 @@ type Logger struct {
// Format specifies the format to be used for logging.
Format string `json:"format"`
// ExcludeFields specifies log attribute keys that should be dropped from all
// log output. This is useful for suppressing PII fields like email, username,
// preferred_username, or groups in environments subject to GDPR or similar
// data-handling constraints.
ExcludeFields []string `json:"excludeFields"`
}
type RefreshToken struct {

7
cmd/dex/config_test.go

@ -107,6 +107,9 @@ connectors:
- type: mockCallback
id: mock
name: Example
grantTypes:
- authorization_code
- "urn:ietf:params:oauth:grant-type:token-exchange"
- type: oidc
id: google
name: Google
@ -202,6 +205,10 @@ additionalFeatures: [
ID: "mock",
Name: "Example",
Config: &mock.CallbackConfig{},
GrantTypes: []string{
"authorization_code",
"urn:ietf:params:oauth:grant-type:token-exchange",
},
},
{
Type: "oidc",

56
cmd/dex/excluding_handler.go

@ -0,0 +1,56 @@
package main
import (
"context"
"log/slog"
)
// excludingHandler is an slog.Handler wrapper that drops log attributes
// whose keys match a configured set. This allows PII fields like email,
// username, or groups to be redacted at the logger level rather than
// requiring per-callsite suppression logic.
type excludingHandler struct {
inner slog.Handler
exclude map[string]bool
}
func newExcludingHandler(inner slog.Handler, fields []string) slog.Handler {
if len(fields) == 0 {
return inner
}
m := make(map[string]bool, len(fields))
for _, f := range fields {
m[f] = true
}
return &excludingHandler{inner: inner, exclude: m}
}
func (h *excludingHandler) Enabled(ctx context.Context, level slog.Level) bool {
return h.inner.Enabled(ctx, level)
}
func (h *excludingHandler) Handle(ctx context.Context, record slog.Record) error {
// Rebuild the record without excluded attributes.
filtered := slog.NewRecord(record.Time, record.Level, record.Message, record.PC)
record.Attrs(func(a slog.Attr) bool {
if !h.exclude[a.Key] {
filtered.AddAttrs(a)
}
return true
})
return h.inner.Handle(ctx, filtered)
}
func (h *excludingHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
var kept []slog.Attr
for _, a := range attrs {
if !h.exclude[a.Key] {
kept = append(kept, a)
}
}
return &excludingHandler{inner: h.inner.WithAttrs(kept), exclude: h.exclude}
}
func (h *excludingHandler) WithGroup(name string) slog.Handler {
return &excludingHandler{inner: h.inner.WithGroup(name), exclude: h.exclude}
}

141
cmd/dex/excluding_handler_test.go

@ -0,0 +1,141 @@
package main
import (
"bytes"
"context"
"encoding/json"
"log/slog"
"testing"
)
func TestExcludingHandler(t *testing.T) {
tests := []struct {
name string
exclude []string
logAttrs []slog.Attr
wantKeys []string
absentKeys []string
}{
{
name: "no exclusions",
exclude: nil,
logAttrs: []slog.Attr{
slog.String("email", "user@example.com"),
slog.String("connector_id", "github"),
},
wantKeys: []string{"email", "connector_id"},
},
{
name: "exclude email",
exclude: []string{"email"},
logAttrs: []slog.Attr{
slog.String("email", "user@example.com"),
slog.String("connector_id", "github"),
},
wantKeys: []string{"connector_id"},
absentKeys: []string{"email"},
},
{
name: "exclude multiple fields",
exclude: []string{"email", "username", "groups"},
logAttrs: []slog.Attr{
slog.String("email", "user@example.com"),
slog.String("username", "johndoe"),
slog.String("connector_id", "github"),
slog.Any("groups", []string{"admin"}),
},
wantKeys: []string{"connector_id"},
absentKeys: []string{"email", "username", "groups"},
},
{
name: "exclude non-existent field is harmless",
exclude: []string{"nonexistent"},
logAttrs: []slog.Attr{
slog.String("email", "user@example.com"),
},
wantKeys: []string{"email"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var buf bytes.Buffer
inner := slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})
handler := newExcludingHandler(inner, tt.exclude)
logger := slog.New(handler)
attrs := make([]any, 0, len(tt.logAttrs)*2)
for _, a := range tt.logAttrs {
attrs = append(attrs, a)
}
logger.Info("test message", attrs...)
var result map[string]any
if err := json.Unmarshal(buf.Bytes(), &result); err != nil {
t.Fatalf("failed to parse log output: %v", err)
}
for _, key := range tt.wantKeys {
if _, ok := result[key]; !ok {
t.Errorf("expected key %q in log output", key)
}
}
for _, key := range tt.absentKeys {
if _, ok := result[key]; ok {
t.Errorf("expected key %q to be absent from log output", key)
}
}
})
}
}
func TestExcludingHandlerWithAttrs(t *testing.T) {
var buf bytes.Buffer
inner := slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})
handler := newExcludingHandler(inner, []string{"email"})
logger := slog.New(handler)
// Pre-bind an excluded attr via With
child := logger.With("email", "user@example.com", "connector_id", "github")
child.Info("login successful")
var result map[string]any
if err := json.Unmarshal(buf.Bytes(), &result); err != nil {
t.Fatalf("failed to parse log output: %v", err)
}
if _, ok := result["email"]; ok {
t.Error("expected email to be excluded from WithAttrs output")
}
if _, ok := result["connector_id"]; !ok {
t.Error("expected connector_id to be present")
}
}
func TestExcludingHandlerEnabled(t *testing.T) {
inner := slog.NewJSONHandler(&bytes.Buffer{}, &slog.HandlerOptions{Level: slog.LevelWarn})
handler := newExcludingHandler(inner, []string{"email"})
if handler.Enabled(context.Background(), slog.LevelInfo) {
t.Error("expected Info to be disabled when handler level is Warn")
}
if !handler.Enabled(context.Background(), slog.LevelWarn) {
t.Error("expected Warn to be enabled")
}
}
func TestExcludingHandlerNilFields(t *testing.T) {
var buf bytes.Buffer
inner := slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})
// With nil/empty fields, should return the inner handler directly
handler := newExcludingHandler(inner, nil)
if _, ok := handler.(*excludingHandler); ok {
t.Error("expected nil fields to return inner handler directly, not wrap it")
}
handler = newExcludingHandler(inner, []string{})
if _, ok := handler.(*excludingHandler); ok {
t.Error("expected empty fields to return inner handler directly, not wrap it")
}
}

4
cmd/dex/logger.go

@ -12,7 +12,7 @@ import (
var logFormats = []string{"json", "text"}
func newLogger(level slog.Level, format string) (*slog.Logger, error) {
func newLogger(level slog.Level, format string, excludeFields []string) (*slog.Logger, error) {
var handler slog.Handler
switch strings.ToLower(format) {
case "", "text":
@ -27,6 +27,8 @@ func newLogger(level slog.Level, format string) (*slog.Logger, error) {
return nil, fmt.Errorf("log format is not one of the supported values (%s): %s", strings.Join(logFormats, ", "), format)
}
handler = newExcludingHandler(handler, excludeFields)
return slog.New(newRequestContextHandler(handler)), nil
}

36
cmd/dex/serve.go

@ -97,13 +97,19 @@ func runServe(options serveOptions) error {
}
var c Config
if err := yaml.Unmarshal(configData, &c); err != nil {
jsonConfigData, err := yaml.YAMLToJSON(configData)
if err != nil {
return fmt.Errorf("error parse config file %s: %v", configFile, err)
}
if err := configUnmarshaller(jsonConfigData, &c); err != nil {
return fmt.Errorf("error unmarshalling config file %s: %v", configFile, err)
}
applyConfigOverrides(options, &c)
logger, err := newLogger(c.Logger.Level, c.Logger.Format)
logger, err := newLogger(c.Logger.Level, c.Logger.Format, c.Logger.ExcludeFields)
if err != nil {
return fmt.Errorf("invalid config: %v", err)
}
@ -249,6 +255,11 @@ func runServe(options serveOptions) error {
if c.Config == nil {
return fmt.Errorf("invalid config: no config field for connector %q", c.ID)
}
for _, gt := range c.GrantTypes {
if !server.ConnectorGrantTypes[gt] {
return fmt.Errorf("invalid config: unknown grant type %q for connector %q", gt, c.ID)
}
}
logger.Info("config connector", "connector_id", c.ID)
// convert to a storage connector object
@ -351,11 +362,15 @@ func runServe(options serveOptions) error {
}
serverConfig := server.Config{
AllowedGrantTypes: c.OAuth2.GrantTypes,
SupportedResponseTypes: c.OAuth2.ResponseTypes,
SkipApprovalScreen: c.OAuth2.SkipApprovalScreen,
AlwaysShowLoginScreen: c.OAuth2.AlwaysShowLoginScreen,
PasswordConnector: c.OAuth2.PasswordConnector,
AllowedGrantTypes: c.OAuth2.GrantTypes,
SupportedResponseTypes: c.OAuth2.ResponseTypes,
SkipApprovalScreen: c.OAuth2.SkipApprovalScreen,
AlwaysShowLoginScreen: c.OAuth2.AlwaysShowLoginScreen,
PasswordConnector: c.OAuth2.PasswordConnector,
PKCE: server.PKCEConfig{
Enforce: c.OAuth2.PKCE.Enforce,
CodeChallengeMethodsSupported: c.OAuth2.PKCE.CodeChallengeMethodsSupported,
},
Headers: c.Web.Headers.ToHTTPHeader(),
AllowedOrigins: c.Web.AllowedOrigins,
AllowedHeaders: c.Web.AllowedHeaders,
@ -382,7 +397,7 @@ func runServe(options serveOptions) error {
if c.Expiry.DeviceRequests != "" {
deviceRequests, err := time.ParseDuration(c.Expiry.DeviceRequests)
if err != nil {
return fmt.Errorf("invalid config value %q for device request expiry: %v", c.Expiry.AuthRequests, err)
return fmt.Errorf("invalid config value %q for device request expiry: %v", c.Expiry.DeviceRequests, err)
}
logger.Info("config device requests", "valid_for", deviceRequests)
serverConfig.DeviceRequestsValidFor = deviceRequests
@ -556,7 +571,7 @@ func runServe(options serveOptions) error {
grpcListener, err := net.Listen("tcp", c.GRPC.Addr)
if err != nil {
return fmt.Errorf("listening (grcp) on %s: %w", c.GRPC.Addr, err)
return fmt.Errorf("listening (grpc) on %s: %w", c.GRPC.Addr, err)
}
grpcSrv := grpc.NewServer(grpcOptions...)
@ -616,6 +631,9 @@ func applyConfigOverrides(options serveOptions, config *Config) {
"urn:ietf:params:oauth:grant-type:device_code",
"urn:ietf:params:oauth:grant-type:token-exchange",
}
if featureflags.ClientCredentialGrantEnabledByDefault.Enabled() {
config.OAuth2.GrantTypes = append(config.OAuth2.GrantTypes, "client_credentials")
}
}
}

6
cmd/dex/serve_test.go

@ -9,19 +9,19 @@ import (
func TestNewLogger(t *testing.T) {
t.Run("JSON", func(t *testing.T) {
logger, err := newLogger(slog.LevelInfo, "json")
logger, err := newLogger(slog.LevelInfo, "json", nil)
require.NoError(t, err)
require.NotEqual(t, (*slog.Logger)(nil), logger)
})
t.Run("Text", func(t *testing.T) {
logger, err := newLogger(slog.LevelError, "text")
logger, err := newLogger(slog.LevelError, "text", nil)
require.NoError(t, err)
require.NotEqual(t, (*slog.Logger)(nil), logger)
})
t.Run("Unknown", func(t *testing.T) {
logger, err := newLogger(slog.LevelError, "gofmt")
logger, err := newLogger(slog.LevelError, "gofmt", nil)
require.Error(t, err)
require.Equal(t, "log format is not one of the supported values (json, text): gofmt", err.Error())
require.Equal(t, (*slog.Logger)(nil), logger)

19
config.yaml.dist

@ -72,6 +72,8 @@ web:
# logger:
# level: "debug"
# format: "text" # can also be "json"
# # Drop these attribute keys from all log output (useful for GDPR/PII suppression).
# # excludeFields: [email, username, preferred_username, groups]
# gRPC API configuration
# Uncomment this block to enable the gRPC API.
@ -109,6 +111,13 @@ web:
#
# # Uncomment to use a specific connector for password grants
# passwordConnector: local
#
# # PKCE (Proof Key for Code Exchange) configuration
# pkce:
# # If true, PKCE is required for all authorization code flows (OAuth 2.1).
# enforce: false
# # Supported code challenge methods. Defaults to ["S256", "plain"].
# codeChallengeMethodsSupported: ["S256", "plain"]
# Static clients registered in Dex by default.
#
@ -134,6 +143,16 @@ web:
# - /device/callback
# name: 'Static Client for Device Flow'
# public: true
#
# # Example of a client restricted to specific connectors
# - id: restricted-client
# secret: restricted-client-secret
# redirectURIs:
# - 'https://app.example.com/callback'
# name: 'Restricted Client'
# allowedConnectors:
# - github
# - google
# Connectors are used to authenticate users against upstream identity providers.
#

10
connector/atlassiancrowd/atlassiancrowd.go

@ -87,16 +87,16 @@ func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, erro
return &crowdConnector{Config: *c, logger: logger.With(slog.Group("connector", "type", "atlassiancrowd", "id", id))}, nil
}
type crowdConnector struct {
Config
logger *slog.Logger
}
var (
_ connector.PasswordConnector = (*crowdConnector)(nil)
_ connector.RefreshConnector = (*crowdConnector)(nil)
)
type crowdConnector struct {
Config
logger *slog.Logger
}
type refreshData struct {
Username string `json:"username"`
}

10
connector/authproxy/authproxy.go

@ -68,6 +68,8 @@ func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, erro
}, nil
}
var _ connector.CallbackConnector = (*callback)(nil)
// Callback is a connector which returns an identity with the HTTP header
// X-Remote-User as verified email.
type callback struct {
@ -83,20 +85,20 @@ type callback struct {
}
// LoginURL returns the URL to redirect the user to login with.
func (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) {
func (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, []byte, error) {
u, err := url.Parse(callbackURL)
if err != nil {
return "", fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err)
return "", nil, fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err)
}
u.Path += m.pathSuffix
v := u.Query()
v.Set("state", state)
u.RawQuery = v.Encode()
return u.String(), nil
return u.String(), nil, nil
}
// HandleCallback parses the request and returns the user's identity
func (m *callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) {
func (m *callback) HandleCallback(s connector.Scopes, _ []byte, r *http.Request) (connector.Identity, error) {
remoteUser := r.Header.Get(m.userHeader)
if remoteUser == "" {
return connector.Identity{}, fmt.Errorf("required HTTP header %s is not set", m.userHeader)

12
connector/authproxy/authproxy_test.go

@ -36,7 +36,7 @@ func TestUser(t *testing.T) {
"X-Remote-User": {testUsername},
}
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req)
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req)
expectNil(t, err)
// If not specified, the userID and email should fall back to the remote user
@ -62,7 +62,7 @@ func TestExtraHeaders(t *testing.T) {
"X-Remote-User-Email": {testEmail},
}
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req)
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req)
expectNil(t, err)
expectEquals(t, ident.UserID, testUserID)
@ -85,7 +85,7 @@ func TestSingleGroup(t *testing.T) {
"X-Remote-Group": {testGroup1},
}
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req)
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req)
expectNil(t, err)
expectEquals(t, ident.UserID, testEmail)
@ -106,7 +106,7 @@ func TestMultipleGroup(t *testing.T) {
"X-Remote-Group": {testGroup1 + ", " + testGroup2 + ", " + testGroup3 + ", " + testGroup4},
}
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req)
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req)
expectNil(t, err)
expectEquals(t, ident.UserID, testEmail)
@ -132,7 +132,7 @@ func TestMultipleGroupWithCustomSeparator(t *testing.T) {
"X-Remote-Group": {testGroup1 + ";" + testGroup2 + ";" + testGroup3 + ";" + testGroup4},
}
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req)
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req)
expectNil(t, err)
expectEquals(t, ident.UserID, testEmail)
@ -158,7 +158,7 @@ func TestStaticGroup(t *testing.T) {
"X-Remote-Group": {testGroup1 + ", " + testGroup2 + ", " + testGroup3 + ", " + testGroup4},
}
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req)
ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req)
expectNil(t, err)
expectEquals(t, ident.UserID, testEmail)

4
connector/github/github.go

@ -28,6 +28,8 @@ const (
// GitHub requires this scope to access '/user/teams' and '/orgs' API endpoints
// which are used when a client includes the 'groups' scope.
scopeOrgs = "read:org"
// githubAPIVersion pins the GitHub REST API version used in requests.
githubAPIVersion = "2022-11-28"
)
// Pagination URL patterns
@ -462,6 +464,7 @@ func get(ctx context.Context, client *http.Client, apiURL string, v interface{})
return "", fmt.Errorf("github: new req: %v", err)
}
req = req.WithContext(ctx)
req.Header.Set("X-GitHub-Api-Version", githubAPIVersion)
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("github: get URL %v", err)
@ -659,6 +662,7 @@ func (c *githubConnector) userInOrg(ctx context.Context, client *http.Client, us
return false, fmt.Errorf("github: new req: %v", err)
}
req = req.WithContext(ctx)
req.Header.Set("X-GitHub-Api-Version", githubAPIVersion)
resp, err := client.Do(req)
if err != nil {
return false, fmt.Errorf("github: get teams: %v", err)

15
connector/github/github_test.go

@ -485,6 +485,21 @@ func Test_Open_PreferredDomainConfig(t *testing.T) {
}
}
func TestGetSendsAPIVersionHeader(t *testing.T) {
var gotHeader string
s := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gotHeader = r.Header.Get("X-GitHub-Api-Version")
w.Header().Add("Content-Type", "application/json")
json.NewEncoder(w).Encode([]org{})
}))
defer s.Close()
var result []org
_, err := get(context.Background(), newClient(), s.URL+"/user/orgs", &result)
expectNil(t, err)
expectEquals(t, gotHeader, githubAPIVersion)
}
func newTestServer(responses map[string]testResponse) *httptest.Server {
var s *httptest.Server
s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {

33
connector/gitlab/gitlab.go

@ -87,8 +87,9 @@ type connectorData struct {
}
var (
_ connector.CallbackConnector = (*gitlabConnector)(nil)
_ connector.RefreshConnector = (*gitlabConnector)(nil)
_ connector.CallbackConnector = (*gitlabConnector)(nil)
_ connector.RefreshConnector = (*gitlabConnector)(nil)
_ connector.TokenIdentityConnector = (*gitlabConnector)(nil)
)
type gitlabConnector struct {
@ -243,6 +244,34 @@ func (c *gitlabConnector) Refresh(ctx context.Context, s connector.Scopes, ident
}
}
// TokenIdentity is used for token exchange, verifying a GitLab access token
// and returning the associated user identity. This enables direct authentication
// with Dex using an existing GitLab token without going through the OAuth flow.
//
// Note: The connector decides whether to fetch groups based on its configuration
// (groups filter, getGroupsPermission), not on the scopes from the token exchange request.
// The server will then decide whether to include groups in the final token based on
// the requested scopes. This matches the behavior of other connectors (e.g., OIDC).
func (c *gitlabConnector) TokenIdentity(ctx context.Context, _, subjectToken string) (connector.Identity, error) {
if c.httpClient != nil {
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.httpClient)
}
token := &oauth2.Token{
AccessToken: subjectToken,
TokenType: "Bearer", // GitLab tokens are typically Bearer tokens even if the type is not explicitly provided.
}
// For token exchange, we determine if groups should be fetched based on connector configuration.
// If the connector has groups filter or getGroupsPermission enabled, we fetch groups.
scopes := connector.Scopes{
// Scopes are not provided in token exchange, so we request groups every time and return only if configured.
Groups: true,
}
return c.identity(ctx, scopes, token)
}
func (c *gitlabConnector) groupsRequired(groupScope bool) bool {
return len(c.groups) > 0 || groupScope
}

85
connector/gitlab/gitlab_test.go

@ -485,3 +485,88 @@ func expectEquals(t *testing.T, a interface{}, b interface{}) {
t.Errorf("Expected %+v to equal %+v", a, b)
}
}
func TestTokenIdentity(t *testing.T) {
// Note: These tests verify that the connector returns groups based on its configuration.
// The actual inclusion of groups in the final Dex token depends on the 'groups' scope
// in the token exchange request, which is handled by the Dex server, not the connector.
tests := []struct {
name string
userInfo userInfo
groups []string
getGroupsPermission bool
useLoginAsID bool
expectUserID string
expectGroups []string
}{
{
name: "without groups config",
expectUserID: "12345678",
expectGroups: nil,
},
{
name: "with groups filter",
userInfo: userInfo{
Groups: []string{"team-1", "team-2"},
},
groups: []string{"team-1"},
expectUserID: "12345678",
expectGroups: []string{"team-1"},
},
{
name: "with groups permission",
userInfo: userInfo{
Groups: []string{"ops", "dev"},
OwnerPermission: []string{"ops"},
DeveloperPermission: []string{"dev"},
MaintainerPermission: []string{},
},
getGroupsPermission: true,
expectUserID: "12345678",
expectGroups: []string{"ops", "dev", "ops:owner", "dev:developer"},
},
{
name: "with useLoginAsID",
useLoginAsID: true,
expectUserID: "joebloggs",
expectGroups: nil,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
responses := map[string]interface{}{
"/api/v4/user": gitlabUser{
Email: "some@email.com",
ID: 12345678,
Name: "Joe Bloggs",
Username: "joebloggs",
},
"/oauth/userinfo": tc.userInfo,
}
s := newTestServer(responses)
defer s.Close()
c := gitlabConnector{
baseURL: s.URL,
httpClient: newClient(),
groups: tc.groups,
getGroupsPermission: tc.getGroupsPermission,
useLoginAsID: tc.useLoginAsID,
}
accessToken := "test-access-token"
ctx := context.Background()
identity, err := c.TokenIdentity(ctx, "urn:ietf:params:oauth:token-type:access_token", accessToken)
expectNil(t, err)
expectEquals(t, identity.UserID, tc.expectUserID)
expectEquals(t, identity.Username, "Joe Bloggs")
expectEquals(t, identity.PreferredUsername, "joebloggs")
expectEquals(t, identity.Email, "some@email.com")
expectEquals(t, identity.EmailVerified, true)
expectEquals(t, identity.Groups, tc.expectGroups)
})
}
}

10
connector/keystone/keystone.go

@ -15,6 +15,11 @@ import (
"github.com/dexidp/dex/connector"
)
var (
_ connector.PasswordConnector = (*conn)(nil)
_ connector.RefreshConnector = (*conn)(nil)
)
type conn struct {
Domain domainKeystone
Host string
@ -103,11 +108,6 @@ type userResponse struct {
} `json:"user"`
}
var (
_ connector.PasswordConnector = &conn{}
_ connector.RefreshConnector = &conn{}
)
// Open returns an authentication strategy using Keystone.
func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, error) {
_, err := uuid.Parse(c.Domain)

10
connector/ldap/ldap.go

@ -301,6 +301,11 @@ func (c *Config) openConnector(logger *slog.Logger) (*ldapConnector, error) {
return &ldapConnector{*c, userSearchScope, groupSearchScope, tlsConfig, logger}, nil
}
var (
_ connector.PasswordConnector = (*ldapConnector)(nil)
_ connector.RefreshConnector = (*ldapConnector)(nil)
)
type ldapConnector struct {
Config
@ -312,11 +317,6 @@ type ldapConnector struct {
logger *slog.Logger
}
var (
_ connector.PasswordConnector = (*ldapConnector)(nil)
_ connector.RefreshConnector = (*ldapConnector)(nil)
)
// do initializes a connection to the LDAP directory and passes it to the
// provided function. It then performs appropriate teardown or reuse before
// returning.

12
connector/linkedin/linkedin.go

@ -49,18 +49,16 @@ type connectorData struct {
AccessToken string `json:"accessToken"`
}
type linkedInConnector struct {
oauth2Config *oauth2.Config
logger *slog.Logger
}
// LinkedIn doesn't provide refresh tokens, so refresh tokens issued by Dex
// will expire in 60 days (default LinkedIn token lifetime).
var (
_ connector.CallbackConnector = (*linkedInConnector)(nil)
_ connector.RefreshConnector = (*linkedInConnector)(nil)
)
type linkedInConnector struct {
oauth2Config *oauth2.Config
logger *slog.Logger
}
// LoginURL returns an access token request URL
func (c *linkedInConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, []byte, error) {
if c.oauth2Config.RedirectURL != callbackURL {

12
connector/mock/connectortest.go

@ -29,10 +29,9 @@ func NewCallbackConnector(logger *slog.Logger) connector.Connector {
}
var (
_ connector.CallbackConnector = &Callback{}
_ connector.PasswordConnector = passwordConnector{}
_ connector.RefreshConnector = passwordConnector{}
_ connector.CallbackConnector = &Callback{}
_ connector.RefreshConnector = &Callback{}
_ connector.TokenIdentityConnector = &Callback{}
)
// Callback is a connector that requires no user interaction and always returns the same identity.
@ -97,6 +96,11 @@ func (c *PasswordConfig) Open(id string, logger *slog.Logger) (connector.Connect
return &passwordConnector{c.Username, c.Password, logger}, nil
}
var (
_ connector.PasswordConnector = passwordConnector{}
_ connector.RefreshConnector = passwordConnector{}
)
type passwordConnector struct {
username string
password string

10
connector/oauth/oauth.go

@ -16,6 +16,8 @@ import (
"github.com/dexidp/dex/pkg/httpclient"
)
var _ connector.CallbackConnector = (*oauthConnector)(nil)
type oauthConnector struct {
clientID string
clientSecret string
@ -116,9 +118,9 @@ func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, erro
return oauthConn, err
}
func (c *oauthConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) {
func (c *oauthConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, []byte, error) {
if c.redirectURI != callbackURL {
return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI)
return "", nil, fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI)
}
oauth2Config := &oauth2.Config{
@ -129,10 +131,10 @@ func (c *oauthConnector) LoginURL(scopes connector.Scopes, callbackURL, state st
Scopes: c.scopes,
}
return oauth2Config.AuthCodeURL(state), nil
return oauth2Config.AuthCodeURL(state), nil, nil
}
func (c *oauthConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) {
func (c *oauthConnector) HandleCallback(s connector.Scopes, _ []byte, r *http.Request) (identity connector.Identity, err error) {
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
return identity, errors.New(q.Get("error_description"))

10
connector/oauth/oauth_test.go

@ -50,7 +50,7 @@ func TestLoginURL(t *testing.T) {
conn := newConnector(t, testServer.URL)
loginURL, err := conn.LoginURL(connector.Scopes{}, conn.redirectURI, "some-state")
loginURL, _, err := conn.LoginURL(connector.Scopes{}, conn.redirectURI, "some-state")
assert.Equal(t, err, nil)
expectedURL, err := url.Parse(testServer.URL + "/authorize")
@ -86,7 +86,7 @@ func TestHandleCallBackForGroupsInUserInfo(t *testing.T) {
conn := newConnector(t, testServer.URL)
req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallBackForGroupsInUserInfo")
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req)
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req)
assert.Equal(t, err, nil)
sort.Strings(identity.Groups)
@ -122,7 +122,7 @@ func TestHandleCallBackForGroupMapsInUserInfo(t *testing.T) {
conn := newConnector(t, testServer.URL)
req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallBackForGroupMapsInUserInfo")
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req)
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req)
assert.Equal(t, err, nil)
sort.Strings(identity.Groups)
@ -156,7 +156,7 @@ func TestHandleCallBackForGroupsInToken(t *testing.T) {
conn := newConnector(t, testServer.URL)
req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallBackForGroupsInToken")
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req)
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req)
assert.Equal(t, err, nil)
assert.Equal(t, len(identity.Groups), 1)
@ -186,7 +186,7 @@ func TestHandleCallbackForNumericUserID(t *testing.T) {
conn := newConnector(t, testServer.URL)
req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallbackForNumericUserID")
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req)
identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req)
assert.Equal(t, err, nil)
assert.Equal(t, identity.UserID, "1000")

5
connector/oidc/oidc.go

@ -379,8 +379,9 @@ func (c *Config) Open(id string, logger *slog.Logger) (conn connector.Connector,
}
var (
_ connector.CallbackConnector = (*oidcConnector)(nil)
_ connector.RefreshConnector = (*oidcConnector)(nil)
_ connector.CallbackConnector = (*oidcConnector)(nil)
_ connector.RefreshConnector = (*oidcConnector)(nil)
_ connector.TokenIdentityConnector = (*oidcConnector)(nil)
)
type oidcConnector struct {

70
connector/saml/saml.go

@ -3,8 +3,10 @@ package saml
import (
"bytes"
"context"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"encoding/xml"
"fmt"
@ -230,6 +232,11 @@ func (c *Config) openConnector(logger *slog.Logger) (*provider, error) {
return p, nil
}
var (
_ connector.SAMLConnector = (*provider)(nil)
_ connector.RefreshConnector = (*provider)(nil)
)
type provider struct {
entityIssuer string
ssoIssuer string
@ -255,6 +262,36 @@ type provider struct {
logger *slog.Logger
}
// cachedIdentity stores the identity from SAML assertion for refresh token support.
// Since SAML has no native refresh mechanism, we cache the identity obtained during
// the initial authentication and return it on subsequent refresh requests.
type cachedIdentity struct {
UserID string `json:"userId"`
Username string `json:"username"`
PreferredUsername string `json:"preferredUsername"`
Email string `json:"email"`
EmailVerified bool `json:"emailVerified"`
Groups []string `json:"groups,omitempty"`
}
// marshalCachedIdentity serializes the identity into ConnectorData for refresh token support.
func marshalCachedIdentity(ident connector.Identity) (connector.Identity, error) {
ci := cachedIdentity{
UserID: ident.UserID,
Username: ident.Username,
PreferredUsername: ident.PreferredUsername,
Email: ident.Email,
EmailVerified: ident.EmailVerified,
Groups: ident.Groups,
}
connectorData, err := json.Marshal(ci)
if err != nil {
return ident, fmt.Errorf("saml: failed to marshal cached identity: %v", err)
}
ident.ConnectorData = connectorData
return ident, nil
}
func (p *provider) POSTData(s connector.Scopes, id string) (action, value string, err error) {
r := &authnRequest{
ProtocolBinding: bindingPOST,
@ -405,7 +442,7 @@ func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo str
if len(p.allowedGroups) == 0 && (!s.Groups || p.groupsAttr == "") {
// Groups not requested or not configured. We're done.
return ident, nil
return marshalCachedIdentity(ident)
}
if len(p.allowedGroups) > 0 && (!s.Groups || p.groupsAttr == "") {
@ -431,7 +468,7 @@ func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo str
if len(p.allowedGroups) == 0 {
// No allowed groups set, just return the ident
return ident, nil
return marshalCachedIdentity(ident)
}
// Look for membership in one of the allowed groups
@ -447,6 +484,35 @@ func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo str
}
// Otherwise, we're good
return marshalCachedIdentity(ident)
}
// Refresh implements connector.RefreshConnector.
// Since SAML has no native refresh mechanism, this method returns the cached
// identity from the initial SAML assertion stored in ConnectorData.
func (p *provider) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) {
if len(ident.ConnectorData) == 0 {
return ident, fmt.Errorf("saml: no connector data available for refresh")
}
var ci cachedIdentity
if err := json.Unmarshal(ident.ConnectorData, &ci); err != nil {
return ident, fmt.Errorf("saml: failed to unmarshal cached identity: %v", err)
}
ident.UserID = ci.UserID
ident.Username = ci.Username
ident.PreferredUsername = ci.PreferredUsername
ident.Email = ci.Email
ident.EmailVerified = ci.EmailVerified
// Only populate groups if the client requested the groups scope.
if s.Groups {
ident.Groups = ci.Groups
} else {
ident.Groups = nil
}
return ident, nil
}

327
connector/saml/saml_test.go

@ -1,8 +1,10 @@
package saml
import (
"context"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"log/slog"
@ -448,6 +450,24 @@ func (r responseTest) run(t *testing.T) {
}
sort.Strings(ident.Groups)
sort.Strings(r.wantIdent.Groups)
// Verify ConnectorData contains valid cached identity, then clear it
// for the main identity comparison (ConnectorData is an implementation
// detail of refresh token support).
if len(ident.ConnectorData) > 0 {
var ci cachedIdentity
if err := json.Unmarshal(ident.ConnectorData, &ci); err != nil {
t.Fatalf("failed to unmarshal ConnectorData: %v", err)
}
if ci.UserID != ident.UserID {
t.Errorf("cached identity UserID mismatch: got %q, want %q", ci.UserID, ident.UserID)
}
if ci.Email != ident.Email {
t.Errorf("cached identity Email mismatch: got %q, want %q", ci.Email, ident.Email)
}
}
ident.ConnectorData = nil
if diff := pretty.Compare(ident, r.wantIdent); diff != "" {
t.Error(diff)
}
@ -589,3 +609,310 @@ func TestVerifySignedMessageAndSignedAssertion(t *testing.T) {
func TestVerifyUnsignedMessageAndUnsignedAssertion(t *testing.T) {
runVerify(t, "testdata/idp-cert.pem", "testdata/idp-resp.xml", false)
}
func TestSAMLRefresh(t *testing.T) {
// Create a provider using the same pattern as existing tests.
c := Config{
CA: "testdata/ca.crt",
UsernameAttr: "Name",
EmailAttr: "email",
GroupsAttr: "groups",
RedirectURI: "http://127.0.0.1:5556/dex/callback",
SSOURL: "http://foo.bar/",
}
conn, err := c.openConnector(slog.New(slog.DiscardHandler))
if err != nil {
t.Fatal(err)
}
t.Run("SuccessfulRefresh", func(t *testing.T) {
ci := cachedIdentity{
UserID: "test-user-id",
Username: "testuser",
PreferredUsername: "testuser",
Email: "test@example.com",
EmailVerified: true,
Groups: []string{"group1", "group2"},
}
connectorData, err := json.Marshal(ci)
if err != nil {
t.Fatal(err)
}
ident := connector.Identity{
UserID: "old-id",
Username: "old-name",
ConnectorData: connectorData,
}
refreshed, err := conn.Refresh(context.Background(), connector.Scopes{Groups: true}, ident)
if err != nil {
t.Fatalf("Refresh failed: %v", err)
}
if refreshed.UserID != "test-user-id" {
t.Errorf("expected UserID %q, got %q", "test-user-id", refreshed.UserID)
}
if refreshed.Username != "testuser" {
t.Errorf("expected Username %q, got %q", "testuser", refreshed.Username)
}
if refreshed.PreferredUsername != "testuser" {
t.Errorf("expected PreferredUsername %q, got %q", "testuser", refreshed.PreferredUsername)
}
if refreshed.Email != "test@example.com" {
t.Errorf("expected Email %q, got %q", "test@example.com", refreshed.Email)
}
if !refreshed.EmailVerified {
t.Error("expected EmailVerified to be true")
}
if len(refreshed.Groups) != 2 || refreshed.Groups[0] != "group1" || refreshed.Groups[1] != "group2" {
t.Errorf("expected groups [group1, group2], got %v", refreshed.Groups)
}
// ConnectorData should be preserved through refresh
if len(refreshed.ConnectorData) == 0 {
t.Error("expected ConnectorData to be preserved")
}
})
t.Run("RefreshPreservesConnectorData", func(t *testing.T) {
ci := cachedIdentity{
UserID: "user-123",
Username: "alice",
Email: "alice@example.com",
EmailVerified: true,
}
connectorData, err := json.Marshal(ci)
if err != nil {
t.Fatal(err)
}
ident := connector.Identity{
UserID: "old-id",
ConnectorData: connectorData,
}
refreshed, err := conn.Refresh(context.Background(), connector.Scopes{}, ident)
if err != nil {
t.Fatalf("Refresh failed: %v", err)
}
// Verify the refreshed identity can be refreshed again (round-trip)
var roundTrip cachedIdentity
if err := json.Unmarshal(refreshed.ConnectorData, &roundTrip); err != nil {
t.Fatalf("failed to unmarshal ConnectorData after refresh: %v", err)
}
if roundTrip.UserID != "user-123" {
t.Errorf("round-trip UserID mismatch: got %q, want %q", roundTrip.UserID, "user-123")
}
})
t.Run("EmptyConnectorData", func(t *testing.T) {
ident := connector.Identity{
UserID: "test-id",
ConnectorData: nil,
}
_, err := conn.Refresh(context.Background(), connector.Scopes{}, ident)
if err == nil {
t.Error("expected error for empty ConnectorData")
}
})
t.Run("InvalidJSON", func(t *testing.T) {
ident := connector.Identity{
UserID: "test-id",
ConnectorData: []byte("not-json"),
}
_, err := conn.Refresh(context.Background(), connector.Scopes{}, ident)
if err == nil {
t.Error("expected error for invalid JSON")
}
})
t.Run("HandlePOSTThenRefresh", func(t *testing.T) {
// Full integration: HandlePOST → get ConnectorData → Refresh → verify identity
now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z")
if err != nil {
t.Fatal(err)
}
conn.now = func() time.Time { return now }
resp, err := os.ReadFile("testdata/good-resp.xml")
if err != nil {
t.Fatal(err)
}
samlResp := base64.StdEncoding.EncodeToString(resp)
scopes := connector.Scopes{
OfflineAccess: true,
Groups: true,
}
ident, err := conn.HandlePOST(scopes, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m")
if err != nil {
t.Fatalf("HandlePOST failed: %v", err)
}
if len(ident.ConnectorData) == 0 {
t.Fatal("expected ConnectorData to be set after HandlePOST")
}
// Now refresh using the ConnectorData from HandlePOST
refreshed, err := conn.Refresh(context.Background(), scopes, ident)
if err != nil {
t.Fatalf("Refresh failed: %v", err)
}
if refreshed.UserID != ident.UserID {
t.Errorf("UserID mismatch: got %q, want %q", refreshed.UserID, ident.UserID)
}
if refreshed.Username != ident.Username {
t.Errorf("Username mismatch: got %q, want %q", refreshed.Username, ident.Username)
}
if refreshed.Email != ident.Email {
t.Errorf("Email mismatch: got %q, want %q", refreshed.Email, ident.Email)
}
if refreshed.EmailVerified != ident.EmailVerified {
t.Errorf("EmailVerified mismatch: got %v, want %v", refreshed.EmailVerified, ident.EmailVerified)
}
sort.Strings(refreshed.Groups)
sort.Strings(ident.Groups)
if len(refreshed.Groups) != len(ident.Groups) {
t.Errorf("Groups length mismatch: got %d, want %d", len(refreshed.Groups), len(ident.Groups))
}
for i := range ident.Groups {
if i < len(refreshed.Groups) && refreshed.Groups[i] != ident.Groups[i] {
t.Errorf("Groups[%d] mismatch: got %q, want %q", i, refreshed.Groups[i], ident.Groups[i])
}
}
})
t.Run("HandlePOSTThenDoubleRefresh", func(t *testing.T) {
// Verify that refresh tokens can be chained: HandlePOST → Refresh → Refresh
now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z")
if err != nil {
t.Fatal(err)
}
conn.now = func() time.Time { return now }
resp, err := os.ReadFile("testdata/good-resp.xml")
if err != nil {
t.Fatal(err)
}
samlResp := base64.StdEncoding.EncodeToString(resp)
scopes := connector.Scopes{OfflineAccess: true, Groups: true}
ident, err := conn.HandlePOST(scopes, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m")
if err != nil {
t.Fatalf("HandlePOST failed: %v", err)
}
// First refresh
refreshed1, err := conn.Refresh(context.Background(), scopes, ident)
if err != nil {
t.Fatalf("first Refresh failed: %v", err)
}
if len(refreshed1.ConnectorData) == 0 {
t.Fatal("expected ConnectorData after first refresh")
}
// Second refresh using output of first refresh
refreshed2, err := conn.Refresh(context.Background(), scopes, refreshed1)
if err != nil {
t.Fatalf("second Refresh failed: %v", err)
}
// All fields should match original
if refreshed2.UserID != ident.UserID {
t.Errorf("UserID mismatch after double refresh: got %q, want %q", refreshed2.UserID, ident.UserID)
}
if refreshed2.Email != ident.Email {
t.Errorf("Email mismatch after double refresh: got %q, want %q", refreshed2.Email, ident.Email)
}
if refreshed2.Username != ident.Username {
t.Errorf("Username mismatch after double refresh: got %q, want %q", refreshed2.Username, ident.Username)
}
})
t.Run("HandlePOSTWithAssertionSignedThenRefresh", func(t *testing.T) {
// Test with assertion-signed.xml (signature on assertion, not response)
now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z")
if err != nil {
t.Fatal(err)
}
conn.now = func() time.Time { return now }
resp, err := os.ReadFile("testdata/assertion-signed.xml")
if err != nil {
t.Fatal(err)
}
samlResp := base64.StdEncoding.EncodeToString(resp)
scopes := connector.Scopes{OfflineAccess: true, Groups: true}
ident, err := conn.HandlePOST(scopes, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m")
if err != nil {
t.Fatalf("HandlePOST with assertion-signed failed: %v", err)
}
if len(ident.ConnectorData) == 0 {
t.Fatal("expected ConnectorData after HandlePOST with assertion-signed")
}
refreshed, err := conn.Refresh(context.Background(), scopes, ident)
if err != nil {
t.Fatalf("Refresh after assertion-signed HandlePOST failed: %v", err)
}
if refreshed.Email != ident.Email {
t.Errorf("Email mismatch: got %q, want %q", refreshed.Email, ident.Email)
}
if refreshed.Username != ident.Username {
t.Errorf("Username mismatch: got %q, want %q", refreshed.Username, ident.Username)
}
})
t.Run("HandlePOSTRefreshWithoutGroupsScope", func(t *testing.T) {
// Verify that groups are NOT returned when groups scope is not requested during refresh
now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z")
if err != nil {
t.Fatal(err)
}
conn.now = func() time.Time { return now }
resp, err := os.ReadFile("testdata/good-resp.xml")
if err != nil {
t.Fatal(err)
}
samlResp := base64.StdEncoding.EncodeToString(resp)
// Initial auth WITH groups
scopesWithGroups := connector.Scopes{OfflineAccess: true, Groups: true}
ident, err := conn.HandlePOST(scopesWithGroups, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m")
if err != nil {
t.Fatalf("HandlePOST failed: %v", err)
}
if len(ident.Groups) == 0 {
t.Fatal("expected groups in initial identity")
}
// Refresh WITHOUT groups scope
scopesNoGroups := connector.Scopes{OfflineAccess: true, Groups: false}
refreshed, err := conn.Refresh(context.Background(), scopesNoGroups, ident)
if err != nil {
t.Fatalf("Refresh failed: %v", err)
}
if len(refreshed.Groups) != 0 {
t.Errorf("expected no groups when groups scope not requested, got %v", refreshed.Groups)
}
// Refresh WITH groups scope — groups should be back
refreshedWithGroups, err := conn.Refresh(context.Background(), scopesWithGroups, ident)
if err != nil {
t.Fatalf("Refresh with groups failed: %v", err)
}
if len(refreshedWithGroups.Groups) == 0 {
t.Error("expected groups when groups scope is requested")
}
})
}

4
docker-compose.override.yaml.dist

@ -5,6 +5,10 @@ services:
ports:
- "127.0.0.1:3306:3306"
mysql8:
ports:
- "127.0.0.1:3307:3306"
postgres:
ports:
- "127.0.0.1:5432:5432"

9
docker-compose.yaml

@ -17,6 +17,15 @@ services:
MYSQL_PASSWORD: mysql
MYSQL_ROOT_PASSWORD: root
mysql8:
image: mysql:8.0
command: --default-authentication-plugin=mysql_native_password
environment:
MYSQL_DATABASE: dex
MYSQL_USER: mysql
MYSQL_PASSWORD: mysql
MYSQL_ROOT_PASSWORD: root
postgres:
image: postgres:10.15
environment:

1505
docs/enhancements/auth-sessions-2026-02-18.md

File diff suppressed because it is too large Load Diff

732
docs/enhancements/cel-expressions-2026-02-28.md

@ -0,0 +1,732 @@
# Dex Enhancement Proposal (DEP) - 2026-02-28 - CEL (Common Expression Language) Integration
## Table of Contents
- [Summary](#summary)
- [Context](#context)
- [Motivation](#motivation)
- [Goals/Pain](#goalspain)
- [Non-Goals](#non-goals)
- [Proposal](#proposal)
- [User Experience](#user-experience)
- [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints)
- [Phase 1: pkg/cel - Core CEL Library](#phase-1-pkgcel---core-cel-library)
- [Phase 2: Authentication Policies](#phase-2-authentication-policies)
- [Phase 3: Token Policies](#phase-3-token-policies)
- [Phase 4: OIDC Connector Claim Mapping](#phase-4-oidc-connector-claim-mapping)
- [Policy Application Flow](#policy-application-flow)
- [Risks and Mitigations](#risks-and-mitigations)
- [Alternatives](#alternatives)
- [Future Improvements](#future-improvements)
## Summary
This DEP proposes integrating [CEL (Common Expression Language)][cel-spec] into Dex as a first-class
expression engine for policy evaluation, claim mapping, and token customization. A new reusable
`pkg/cel` package will provide a safe, sandboxed CEL environment with Kubernetes-grade compatibility
guarantees, cost budgets, and a curated set of extension libraries. Subsequent phases will leverage
this package to implement authentication policies, token policies, advanced claim mapping in
connectors, and per-client/global access rules — replacing the need for ad-hoc configuration fields
and external policy engines.
[cel-spec]: https://github.com/google/cel-spec
## Context
- [#1583 Add allowedGroups option for clients config][#1583] — a long-standing request for a
configuration option to allow a client to specify a list of allowed groups.
- [#1635 Connector Middleware][#1635] — long-standing request for a policy/middleware layer between
connectors and the server for claim transformations and access control.
- [#1052 Allow restricting connectors per client][#1052] — frequently requested feature to restrict
which connectors are available to specific OAuth2 clients.
- [#2178 Custom claims in ID tokens][#2178] — requests for including additional payload in issued tokens.
- [#2812 Token Exchange DEP][dep-token-exchange] — mentions CEL/Rego as future improvement for
policy-based assertions on exchanged tokens.
- The OIDC connector already has a growing set of ad-hoc claim mutation options
(`ClaimMapping`, `ClaimMutations.NewGroupFromClaims`, `FilterGroupClaims`, `ModifyGroupNames`)
that would benefit from a unified expression language.
- Previous community discussions explored OPA/Rego and JMESPath, but CEL offers a better fit
(see [Alternatives](#alternatives)).
[#1583]: https://github.com/dexidp/dex/pull/1583
[#1635]: https://github.com/dexidp/dex/issues/1635
[#1052]: https://github.com/dexidp/dex/issues/1052
[#2178]: https://github.com/dexidp/dex/issues/2178
[dep-token-exchange]: /docs/enhancements/token-exchange-2023-02-03-%232812.md
## Motivation
### Goals/Pain
1. **Complex query/filter capabilities** — Dex needs a way to express complex validations and
mutations in multiple places (authentication flow, token issuance, claim mapping). Today each
feature requires new Go code, new config fields, and a new release cycle. CEL allows operators
to express these rules declaratively without code changes.
2. **Authentication policies** — Operators want to control _who_ can log in based on rich
conditions: restrict specific connectors to specific clients, require group membership for
certain clients, deny login based on email domain, enforce MFA claims, etc. Currently there is
no unified mechanism; users rely on downstream applications or external proxies.
3. **Token policies** — Operators want to customize issued tokens: add extra claims to ID tokens,
restrict scopes per client, modify `aud` claims, include upstream connector metadata, etc.
Today this requires forking Dex or using a reverse proxy.
4. **Claim mapping in OIDC connector** — The OIDC connector has accumulated multiple ad-hoc config
options for claim mapping and group mutations (`ClaimMapping`, `NewGroupFromClaims`,
`FilterGroupClaims`, `ModifyGroupNames`). A single CEL expression field would replace all of
these with a more powerful and composable approach.
5. **Per-client and global policies** — One of the most frequent requests is allowing different
connectors for different clients and restricting group-based access per client. CEL policies at
the global and per-client level address this cleanly.
6. **CNCF ecosystem alignment** — CEL has massive adoption across the CNCF ecosystem:
| Project | CEL Usage | Evidence |
|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
| **Kubernetes** | ValidatingAdmissionPolicy, CRD validation rules (`x-kubernetes-validations`), AuthorizationPolicy, field selectors, CEL-based match conditions in webhooks | [KEP-3488][k8s-cel-kep], [CRD Validation Rules][k8s-crd-cel], [AuthorizationPolicy KEP-3221][k8s-authz-cel] |
| **Kyverno** | CEL expressions in validation/mutation policies (v1.12+), preconditions | [Kyverno CEL docs][kyverno-cel] |
| **OPA Gatekeeper** | Partially added support for CEL in constraint templates | [Gatekeeper CEL][gatekeeper-cel] |
| **Istio** | AuthorizationPolicy conditions, request routing, telemetry | [Istio CEL docs][istio-cel] |
| **Envoy / Envoy Gateway** | RBAC filter, ext_authz, rate limiting, route matching, access logging | [Envoy CEL docs][envoy-cel] |
| **Tekton** | Pipeline when expressions, CEL custom tasks | [Tekton CEL Interceptor][tekton-cel] |
| **Knative** | Trigger filters using CEL expressions | [Knative CEL filters][knative-cel] |
| **Google Cloud** | IAM Conditions, Cloud Deploy, Security Command Center | [Google IAM CEL][gcp-cel] |
| **Cert-Manager** | CertificateRequestPolicy approval using CEL | [cert-manager approver-policy CEL][cert-manager-cel] |
| **Cilium** | Hubble CEL filter logic | [Cilium CEL docs][cilium-cel] |
| **Crossplane** | Composition functions with CEL-based patch transforms | [Crossplane CEL transforms][crossplane-cel] |
| **Kube-OVN** | Network policy extensions using CEL | [Kube-OVN CEL][kube-ovn-cel] |
[k8s-cel-kep]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3488-cel-admission-control
[k8s-crd-cel]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules
[k8s-authz-cel]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/3221-structured-authorization-configuration
[kyverno-cel]: https://kyverno.io/docs/writing-policies/cel/
[gatekeeper-cel]: https://open-policy-agent.github.io/gatekeeper/website/docs/validating-admission-policy/#policy-updates-to-add-vap-cel
[istio-cel]: https://istio.io/latest/docs/reference/config/security/conditions/
[envoy-cel]: https://www.envoyproxy.io/docs/envoy/latest/xds/type/v3/cel.proto
[tekton-cel]: https://tekton.dev/docs/triggers/cel_expressions/
[knative-cel]: https://github.com/knative/eventing/blob/main/docs/broker/filtering.md#add-cel-expression-filter
[gcp-cel]: https://cloud.google.com/iam/docs/conditions-overview
[cert-manager-cel]: https://cert-manager.io/docs/policy/approval/approver-policy/#validations
[cilium-cel]: https://docs.cilium.io/en/stable/_api/v1/flow/README/#flowfilter-experimental
[crossplane-cel]: https://github.com/crossplane-contrib/function-cel-filter
[kube-ovn-cel]: https://kubeovn.github.io/docs/stable/en/advance/cel-expression/
By choosing CEL, Dex operators who already use Kubernetes or other CNCF tools can reuse their
existing knowledge of the expression language.
### Non-Goals
- **Full policy engine** — This DEP does not aim to replace dedicated external policy engines
(OPA, Kyverno). CEL in Dex is scoped to identity and token operations.
- **Breaking changes to existing configuration** — All existing config fields (`ClaimMapping`,
`ClaimMutations`, etc.) will continue to work. CEL expressions are additive/opt-in.
- **Authorization (beyond Dex scope)** — Dex is an identity provider; downstream authorization
decisions remain the responsibility of relying parties. CEL policies in Dex are limited to
authentication and token issuance concerns.
- **Multi-phase CEL in a single DEP** — Only Phase 1 (`pkg/cel` package) is targeted for
immediate implementation. Phases 2-4 are included here for design context and will have their
own implementation PRs.
- **Multi-step logic** — CEL in Dex is scoped to single-expression evaluation. Each expression
is a standalone, stateless computation with no intermediate variables, chaining, or
multi-step transformations. If a use case requires sequential logic or conditionally chained
expressions, it belongs outside Dex (e.g. in an external policy engine or middleware).
This boundary protects the design from scope creep that pushes CEL beyond what it's good at.
## Proposal
### User Experience
#### Authentication Policy (Phase 2)
Operators can define global and per-client authentication policies in the Dex config:
```yaml
# Global authentication policy — each expression evaluates to bool.
# If true — the request is denied. Evaluated in order; first match wins.
authPolicy:
- expression: "!identity.email.endsWith('@example.com')"
message: "'Login restricted to example.com domain'"
- expression: "!identity.email_verified"
message: "'Email must be verified'"
staticClients:
- id: admin-app
name: Admin Application
secret: ...
redirectURIs: [...]
# Per-client policy — same structure as global
authPolicy:
- expression: "!(request.connector_id in ['okta', 'ldap'])"
message: "'This application requires Okta or LDAP login'"
- expression: "!('admin' in identity.groups)"
message: "'Admin group membership required'"
```
#### Token Policy (Phase 3)
Operators can add extra claims or mutate token contents:
```yaml
tokenPolicy:
# Global mutations applied to all ID tokens
claims:
# Add a custom claim based on group membership
- key: "'role'"
value: "identity.groups.exists(g, g == 'admin') ? 'admin' : 'user'"
# Include connector ID as a claim
- key: "'idp'"
value: "request.connector_id"
# Add department from upstream claims (only if present)
- key: "'department'"
value: "identity.extra['department']"
condition: "'department' in identity.extra"
staticClients:
- id: internal-api
name: Internal API
secret: ...
redirectURIs: [...]
tokenPolicy:
claims:
- key: "'custom-claim.company.com/team'"
value: "identity.extra['team'].orValue('engineering')"
# Only add on-call claim for ops group members
- key: "'on_call'"
value: "true"
condition: "identity.groups.exists(g, g == 'ops')"
# Restrict scopes
filter:
expression: "request.scopes.all(s, s in ['openid', 'email', 'profile'])"
message: "'Unsupported scope requested'"
```
#### OIDC Connector Claim Mapping (Phase 4)
Replace ad-hoc claim mapping with CEL:
```yaml
connectors:
- type: oidc
id: corporate-idp
name: Corporate IdP
config:
issuer: https://idp.example.com
clientID: dex-client
clientSecret: ...
# CEL-based claim mapping — replaces claimMapping and claimModifications
claimMappingExpressions:
username: "claims.preferred_username.orValue(claims.email)"
email: "claims.email"
groups: >
claims.groups
.filter(g, g.startsWith('dex:'))
.map(g, g.trimPrefix('dex:'))
emailVerified: "claims.email_verified.orValue(true)"
# Extra claims to pass through to token policies
extra:
department: "claims.department.orValue('unknown')"
cost_center: "claims.cost_center.orValue('')"
```
### Implementation Details/Notes/Constraints
### Phase 1: `pkg/cel` — Core CEL Library
This is the foundation that all subsequent phases build upon. The package provides a safe,
reusable CEL environment with Kubernetes-grade guarantees.
#### Package Structure
```
pkg/
cel/
cel.go # Core Environment, compilation, evaluation
types.go # CEL type declarations (Identity, Request, etc.)
cost.go # Cost estimation and budgeting
doc.go # Package documentation
library/
email.go # Email-related CEL functions
groups.go # Group-related CEL functions
```
#### Dependencies
```
github.com/google/cel-go v0.27.0
```
The `cel-go` library is the canonical Go implementation maintained by Google, used by Kubernetes
and all major CNCF projects. It follows semantic versioning and provides strong backward
compatibility guarantees.
#### Core API Design
**Public types:**
```go
// CompilationResult holds a compiled CEL program ready for evaluation.
type CompilationResult struct {
Program cel.Program
OutputType *cel.Type
Expression string
}
// Compiler compiles CEL expressions against a specific environment.
type Compiler struct { /* ... */ }
// CompilerOption configures a Compiler.
type CompilerOption func(*compilerConfig)
```
**Compilation pipeline:**
Each `Compile*` call performs these steps sequentially:
1. Reject expressions exceeding `MaxExpressionLength` (10,240 chars).
2. Compile and type-check the expression via `cel-go`.
3. Validate output type matches the expected type (for typed variants).
4. Estimate cost using `defaultCostEstimator` with size hints — reject if estimated max cost
exceeds the cost budget.
5. Create an optimized `cel.Program` with runtime cost limit.
Presence tests (`has(field)`, `'key' in map`) have zero cost, matching Kubernetes CEL behavior.
#### Variable Declarations
Variables are declared via `VariableDeclaration{Name, Type}` and registered with `NewCompiler`.
Helper constructors provide pre-defined variable sets:
**`IdentityVariables()`** — the `identity` variable (from `connector.Identity`),
typed as `cel.ObjectType`:
| Field | CEL Type | Source |
|-------|----------|--------|
| `identity.user_id` | `string` | `connector.Identity.UserID` |
| `identity.username` | `string` | `connector.Identity.Username` |
| `identity.preferred_username` | `string` | `connector.Identity.PreferredUsername` |
| `identity.email` | `string` | `connector.Identity.Email` |
| `identity.email_verified` | `bool` | `connector.Identity.EmailVerified` |
| `identity.groups` | `list(string)` | `connector.Identity.Groups` |
**`RequestVariables()`** — the `request` variable (from `RequestContext`),
typed as `cel.ObjectType`:
| Field | CEL Type |
|-------|----------|
| `request.client_id` | `string` |
| `request.connector_id` | `string` |
| `request.scopes` | `list(string)` |
| `request.redirect_uri` | `string` |
**`ClaimsVariable()`** — the `claims` variable for raw upstream claims as `map(string, dyn)`.
**Typing strategy:**
`identity` and `request` use `cel.ObjectType` with explicitly declared fields. This gives
compile-time type checking: a typo like `identity.emial` is rejected at config load time
rather than silently evaluating to null in production — critical for an auth system where a
misconfigured policy could lock users out.
`claims` remains `map(string, dyn)` because its shape is genuinely unknown — it carries
arbitrary upstream IdP data.
#### Compatibility Guarantees
Following the Kubernetes CEL compatibility model
([KEP-3488: CEL for Admission Control][kep-3488], [Kubernetes CEL Migration Guide][k8s-cel-compat]):
1. **Environment versioning** — The CEL environment is versioned. When new functions or variables
are added, they are introduced under a new environment version. Existing expressions compiled
against an older version continue to work.
```go
// EnvironmentVersion represents the version of the CEL environment.
// New variables, functions, or libraries are introduced in new versions.
type EnvironmentVersion uint32
const (
// EnvironmentV1 is the initial CEL environment.
EnvironmentV1 EnvironmentVersion = 1
)
// WithVersion sets the target environment version for the compiler.
func WithVersion(v EnvironmentVersion) CompilerOption
```
This is directly modeled on `k8s.io/apiserver/pkg/cel/environment`.
2. **Library stability** — Custom functions in the `pkg/cel/library` subpackage follow these rules:
- Functions MUST NOT be removed once released.
- Function signatures MUST NOT change once released.
- New functions MUST be added under a new `EnvironmentVersion`.
- If a function needs to be replaced, the old one is deprecated but kept forever.
3. **Type stability** — CEL types (`Identity`, `Request`, `Claims`) follow the same rules:
- Fields MUST NOT be removed.
- Field types MUST NOT change.
- New fields are added in a new `EnvironmentVersion`.
4. **Semantic versioning of `cel-go`** — The `cel-go` dependency follows semver. Dex pins to a
minor version range and updates are tested for behavioral changes. This is exactly the approach
Kubernetes takes: `k8s.io/apiextensions-apiserver` pins `cel-go` and gates new features behind
environment versions.
5. **Feature gates** — New CEL-powered features are gated behind Dex feature flags (using the
existing `pkg/featureflags` mechanism) during their alpha phase.
[kep-3488]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3488-cel-admission-control
[k8s-cel-compat]: https://kubernetes.io/docs/reference/using-api/cel/
#### Cost Estimation and Budgets
Like Kubernetes, Dex CEL expressions must be bounded to prevent denial-of-service.
**Constants:**
| Constant | Value | Description |
|----------|-------|-------------|
| `DefaultCostBudget` | `10_000_000` | Max cost units per evaluation (aligned with Kubernetes) |
| `MaxExpressionLength` | `10_240` | Max expression string length in characters |
| `DefaultStringMaxLength` | `256` | Estimated max string size for cost estimation |
| `DefaultListMaxLength` | `100` | Estimated max list size for cost estimation |
**How it works:**
A `defaultCostEstimator` (implementing `checker.CostEstimator`) provides size hints for known
variables (`identity`, `request`, `claims`) so the `cel-go` cost estimator doesn't assume
unbounded sizes. It also provides call cost estimates for custom Dex functions
(`dex.emailDomain`, `dex.emailLocalPart`, `dex.groupMatches`, `dex.groupFilter`).
Expressions are validated at three levels:
1. **Length check** — reject expressions exceeding `MaxExpressionLength`.
2. **Compile-time cost estimation** — reject expressions whose estimated max cost exceeds
the cost budget.
3. **Runtime cost limit** — abort evaluation if actual cost exceeds the budget.
#### Extension Libraries
The `pkg/cel` environment includes these cel-go standard extensions (same set as Kubernetes):
| Library | Description | Examples |
|---------|-------------|---------|
| `ext.Strings()` | Extended string functions | `"hello".upperAscii()`, `"foo:bar".split(':')`, `s.trim()`, `s.replace('a','b')` |
| `ext.Encoders()` | Base64 encoding/decoding | `base64.encode(bytes)`, `base64.decode(str)` |
| `ext.Lists()` | Extended list functions | `list.slice(1, 3)`, `list.flatten()` |
| `ext.Sets()` | Set operations on lists | `sets.contains(a, b)`, `sets.intersects(a, b)`, `sets.equivalent(a, b)` |
| `ext.Math()` | Math functions | `math.greatest(a, b)`, `math.least(a, b)` |
Plus custom Dex libraries in the `pkg/cel/library` subpackage, each implementing the
`cel.Library` interface:
**`library.Email`** — email-related helpers:
| Function | Signature | Description |
|----------|-----------|-------------|
| `dex.emailDomain` | `(string) -> string` | Returns the domain portion of an email address. `dex.emailDomain("user@example.com") == "example.com"` |
| `dex.emailLocalPart` | `(string) -> string` | Returns the local part of an email address. `dex.emailLocalPart("user@example.com") == "user"` |
**`library.Groups`** — group-related helpers:
| Function | Signature | Description |
|----------|-----------|-------------|
| `dex.groupMatches` | `(list(string), string) -> list(string)` | Returns groups matching a glob pattern. `dex.groupMatches(identity.groups, "team:*")` |
| `dex.groupFilter` | `(list(string), list(string)) -> list(string)` | Returns only groups present in the allowed list. `dex.groupFilter(identity.groups, ["admin", "ops"])` |
#### Example: Compile and Evaluate
```go
// 1. Create a compiler with identity and request variables
compiler, _ := cel.NewCompiler(
append(cel.IdentityVariables(), cel.RequestVariables()...),
)
// 2. Compile a policy expression (type-checked, cost-estimated)
prog, _ := compiler.CompileBool(
`identity.email.endsWith('@example.com') && 'admin' in identity.groups`,
)
// 3. Evaluate against real data
result, _ := cel.EvalBool(ctx, prog, map[string]any{
"identity": cel.IdentityFromConnector(connectorIdentity),
"request": cel.RequestFromContext(cel.RequestContext{...}),
})
// result == true
```
### Phase 2: Authentication Policies
**Config Model:**
```go
// AuthPolicy is a list of deny expressions evaluated after a user
// authenticates with a connector. Each expression evaluates to bool.
// If true — the request is denied. Evaluated in order; first match wins.
type AuthPolicy []PolicyExpression
// PolicyExpression is a CEL expression with an optional human-readable message.
type PolicyExpression struct {
// Expression is a CEL expression that evaluates to bool.
Expression string `json:"expression"`
// Message is a CEL expression that evaluates to string (displayed to the user on deny).
// If empty, a generic message is shown.
Message string `json:"message,omitempty"`
}
```
**Evaluation point:** After `connector.CallbackConnector.HandleCallback()` or
`connector.PasswordConnector.Login()` returns an identity, and before the auth request is
finalized. Implemented in `server/handlers.go` at `handleConnectorCallback`.
**Available CEL variables:** `identity` (from connector), `request` (client_id, connector_id,
scopes, redirect_uri).
**Compilation:** All policy expressions are compiled once at config load time (in
`cmd/dex/serve.go`) and stored in the `Server` struct. This ensures:
- Syntax/type errors are caught at startup, not at runtime.
- No compilation overhead per request.
- Cost estimation can warn operators about expensive expressions at startup.
**Evaluation flow:**
```
User authenticates via connector
v
connector.HandleCallback() returns Identity
v
Evaluate global authPolicy (in order)
- For each expression: evaluate → bool
- If true → deny with message, HTTP 403
v
Evaluate per-client authPolicy (in order)
- Same logic as global
v
Continue normal flow (approval screen or redirect)
```
### Phase 3: Token Policies
**Config Model:**
```go
// TokenPolicy defines policies for token issuance.
type TokenPolicy struct {
// Claims adds or overrides claims in the issued ID token.
Claims []ClaimExpression `json:"claims,omitempty"`
// Filter validates the token request. If expression evaluates to false,
// the request is denied.
Filter *PolicyExpression `json:"filter,omitempty"`
}
type ClaimExpression struct {
// Key is a CEL expression evaluating to string — the claim name.
Key string `json:"key"`
// Value is a CEL expression evaluating to dyn — the claim value.
Value string `json:"value"`
// Condition is an optional CEL expression evaluating to bool.
// When set, the claim is only included in the token if the condition
// evaluates to true. If omitted, the claim is always included.
Condition string `json:"condition,omitempty"`
}
```
**Evaluation point:** In `server/oauth2.go` during ID token construction, after standard
claims are built but before JWT signing.
**Available CEL variables:** `identity`, `request`, `existing_claims` (the standard claims already
computed as `map(string, dyn)`).
**Claim merge order:**
1. Standard Dex claims (sub, iss, aud, email, groups, etc.)
2. Global `tokenPolicy.claims` evaluated and merged
3. Per-client `tokenPolicy.claims` evaluated and merged (overrides global)
**Reserved (forbidden) claim names:**
Certain claim names are reserved and MUST NOT be set or overridden by CEL token policy
expressions. Attempting to use a reserved claim key will result in a config validation error at
startup. This prevents operators from accidentally breaking the OIDC/OAuth2 contract or
undermining Dex's security guarantees.
```go
// ReservedClaimNames is the set of claim names that CEL token policy
// expressions are forbidden from setting. These are core OIDC/OAuth2 claims
// managed exclusively by Dex.
var ReservedClaimNames = map[string]struct{}{
"iss": {}, // Issuer — always set by Dex to its own issuer URL
"sub": {}, // Subject — derived from connector identity, must not be spoofed
"aud": {}, // Audience — determined by the OAuth2 client, not policy
"exp": {}, // Expiration — controlled by Dex token TTL configuration
"iat": {}, // Issued At — set by Dex at signing time
"nbf": {}, // Not Before — set by Dex at signing time
"jti": {}, // JWT ID — generated by Dex for token revocation/uniqueness
"auth_time": {}, // Authentication Time — set by Dex from the auth session
"nonce": {}, // Nonce — echoed from the client's authorization request
"at_hash": {}, // Access Token Hash — computed by Dex from the access token
"c_hash": {}, // Code Hash — computed by Dex from the authorization code
}
```
The reserved list is enforced in two places:
1. **Config load time** — When compiling token policy `ClaimExpression` entries, Dex statically
evaluates the `Key` expression (which must be a string literal or constant-foldable) and rejects
it if the result is in `ReservedClaimNames`.
2. **Runtime (defense in depth)** — Before merging evaluated claims into the ID token, Dex checks
each key against `ReservedClaimNames` and logs a warning + skips the claim if it matches. This
guards against dynamic key expressions that couldn't be statically checked.
### Phase 4: OIDC Connector Claim Mapping
**Config Model:**
In `connector/oidc/oidc.go`:
```go
type Config struct {
// ... existing fields ...
// ClaimMappingExpressions provides CEL-based claim mapping.
// When set, these take precedence over ClaimMapping and ClaimMutations.
ClaimMappingExpressions *ClaimMappingExpression `json:"claimMappingExpressions,omitempty"`
}
type ClaimMappingExpression struct {
// Username is a CEL expression evaluating to string.
// Available variable: 'claims' (map of upstream claims).
Username string `json:"username,omitempty"`
// Email is a CEL expression evaluating to string.
Email string `json:"email,omitempty"`
// Groups is a CEL expression evaluating to list(string).
Groups string `json:"groups,omitempty"`
// EmailVerified is a CEL expression evaluating to bool.
EmailVerified string `json:"emailVerified,omitempty"`
// Extra is a map of claim names to CEL expressions evaluating to dyn.
// These are carried through to token policies.
Extra map[string]string `json:"extra,omitempty"`
}
```
**Available CEL variable:** `claims``map(string, dyn)` containing all raw upstream claims from
the ID token and/or UserInfo endpoint.
This replaces the need for `ClaimMapping`, `NewGroupFromClaims`, `FilterGroupClaims`, and
`ModifyGroupNames` with a single, more powerful mechanism.
**Backward compatibility:** When `claimMappingExpressions` is nil, the existing `ClaimMapping` and
`ClaimMutations` logic is used unchanged. When `claimMappingExpressions` is set, a startup warning is
logged if legacy mapping fields are also configured.
### Policy Application Flow
The following diagram shows the order in which CEL policies are applied.
Each step is optional — if not configured, it is skipped.
```
Connector Authentication
│ upstream claims → connector.Identity
v
Authentication Policies
│ Global authPolicy
│ Per-client authPolicy
v
Token Issuance
│ Global tokenPolicy.filter
│ Per-client tokenPolicy.filter
│ Global tokenPolicy.claims
│ Per-client tokenPolicy.claims
│ Sign JWT
v
Token Response
```
| Step | Policy | Scope | Action on match |
|------|--------|-------|-----------------|
| 2 | `authPolicy` (global) | Global | Expression → `true` = DENY login |
| 3 | `authPolicy` (per-client) | Per-client | Expression → `true` = DENY login |
| 4 | `tokenPolicy.filter` (global) | Global | Expression → `false` = DENY token |
| 5 | `tokenPolicy.filter` (per-client) | Per-client | Expression → `false` = DENY token |
| 6 | `tokenPolicy.claims` (global) | Global | Adds/overrides claims (with optional condition) |
| 7 | `tokenPolicy.claims` (per-client) | Per-client | Adds/overrides claims (overrides global) |
### Risks and Mitigations
| Risk | Mitigation |
|------|------------|
| **CEL expression complexity / DoS** | Cost budgets with configurable limits (default aligned with Kubernetes). Expressions are validated at config load time. Runtime evaluation is aborted if cost exceeds budget. |
| **Learning curve for operators** | CEL has excellent documentation, playground ([cel.dev](https://cel.dev)), and massive CNCF adoption. Dex docs will include a dedicated CEL guide with examples. Most operators already know CEL from Kubernetes. |
| **`cel-go` dependency size** | `cel-go` adds ~5MB to binary. This is acceptable for the functionality provided. Kubernetes, Istio, Envoy all accept this trade-off. |
| **Breaking changes in `cel-go`** | Pin to semver minor range. Environment versioning ensures existing expressions continue to work across upgrades. |
| **Security: CEL expression injection** | CEL expressions are defined by operators in the server config, not by end users. No CEL expression is ever constructed from user input at runtime. |
| **Config migration** | Old config fields (`ClaimMapping`, `ClaimMutations`) continue to work. CEL expressions are opt-in. If both are specified, CEL takes precedence with a config-time warning. |
| **Error messages exposing internals** | CEL deny `message` expressions are controlled by the operator. Default messages are generic. Evaluation errors are logged server-side, not exposed to end users. |
| **Performance** | Expressions are compiled once at startup. Evaluation is sub-millisecond for typical identity operations. Cost budgets prevent pathological cases. Benchmarks will be included in `pkg/cel` tests. |
### Alternatives
#### OPA/Rego
OPA was previously considered ([#1635], token exchange DEP). While powerful, it has significant
drawbacks for Dex:
- **Separate daemon** — OPA typically runs as a sidecar or daemon; adds operational complexity.
Even the embedded Go library (`github.com/open-policy-agent/opa/rego`) is significantly
heavier than `cel-go`.
- **Rego learning curve** — Rego is a Datalog-derived language unfamiliar to most developers.
CEL syntax is closer to C/Java/Go and is immediately readable.
- **Overkill** — Dex needs simple expression evaluation, not a full policy engine with data
loading, bundles, and partial evaluation.
- **No inline expressions** — Rego policies are typically separate files, not inline config
expressions. This makes the config harder to understand and deploy.
- **Smaller CNCF footprint for embedding** — While OPA is a graduated CNCF project, CEL has
broader adoption as an _embedded_ language (Kubernetes, Istio, Envoy, Kyverno, etc.).
#### JMESPath
JMESPath was proposed for claim mapping. Drawbacks:
- **Query-only** — JMESPath is a JSON query language. It cannot express boolean conditions,
mutations, or string operations naturally.
- **Limited type system** — No type checking at compile time. Errors are only caught at runtime.
- **Small ecosystem** — Limited adoption compared to CEL. No CNCF projects use JMESPath for
policy evaluation.
- **No cost estimation** — No way to bound execution time.
#### Hardcoded Go Logic
The current approach: each feature requires new Go structs, config fields, and code. This is
unsustainable:
- `ClaimMapping`, `NewGroupFromClaims`, `FilterGroupClaims`, `ModifyGroupNames` are each separate
features that could be one CEL expression.
- Every new policy need requires a Dex code change and release.
- Combinatorial explosion of config options.
#### No Change
Without CEL or an equivalent:
- Operators continue to request per-client connector restrictions, custom claims, claim
transformations, and access policies — issues remain open indefinitely.
- Dex accumulates more ad-hoc config fields, increasing maintenance burden.
- Complex use cases require external reverse proxies, forking Dex, or middleware.
## Future Improvements
- **CEL in other connectors** — Extend CEL claim mapping beyond OIDC to LDAP (attribute mapping),
SAML (assertion mapping), and other connectors with complex attribute mapping needs.
- **Policy testing framework** — Unit test framework for operators to validate their CEL
expressions against fixture data before deployment.
- **Connector selection via CEL** — Replace the static connector-per-client mapping with a CEL
expression that dynamically determines which connectors to show based on request attributes.

43
examples/config-dev.yaml

@ -68,6 +68,7 @@ web:
# issuer: dex
# logoURL: theme/logo.png
# dir: web/
# Allowed values: light, dark
# theme: light
# Configuration for telemetry
@ -101,26 +102,33 @@ telemetry:
# Default values shown below
# oauth2:
# grantTypes determines the allowed set of authorization flows.
# # grantTypes determines the allowed set of authorization flows.
# grantTypes:
# - "authorization_code"
# - "client_credentials"
# - "refresh_token"
# - "implicit"
# - "password"
# - "urn:ietf:params:oauth:grant-type:device_code"
# - "urn:ietf:params:oauth:grant-type:token-exchange"
# responseTypes determines the allowed response contents of a successful authorization flow.
# use ["code", "token", "id_token"] to enable implicit flow for web-only clients.
# # responseTypes determines the allowed response contents of a successful authorization flow.
# # use ["code", "token", "id_token"] to enable implicit flow for web-only clients.
# responseTypes: [ "code" ] # also allowed are "token" and "id_token"
# By default, Dex will ask for approval to share data with application
# (approval for sharing data from connected IdP to Dex is separate process on IdP)
# # By default, Dex will ask for approval to share data with application
# # (approval for sharing data from connected IdP to Dex is separate process on IdP)
# skipApprovalScreen: false
# If only one authentication method is enabled, the default behavior is to
# go directly to it. For connected IdPs, this redirects the browser away
# from application to upstream provider such as the Google login page
# # If only one authentication method is enabled, the default behavior is to
# # go directly to it. For connected IdPs, this redirects the browser away
# # from application to upstream provider such as the Google login page
# alwaysShowLoginScreen: false
# Uncomment the passwordConnector to use a specific connector for password grants
# # Uncomment the passwordConnector to use a specific connector for password grants
# passwordConnector: local
# # PKCE (Proof Key for Code Exchange) configuration
# pkce:
# # If true, PKCE is required for all authorization code flows (OAuth 2.1).
# enforce: false
# # Supported code challenge methods. Defaults to ["S256", "plain"].
# codeChallengeMethodsSupported: ["S256", "plain"]
# Instead of reading from an external storage, use this list of clients.
#
@ -132,6 +140,10 @@ staticClients:
- '/dex/device/callback'
name: 'Example App'
secret: ZXhhbXBsZS1hcHAtc2VjcmV0
# Optional: restrict which connectors this client can use for authentication.
# If omitted or empty, all connectors are allowed.
# allowedConnectors:
# - mock
# Example using environment variables
# Set DEX_CLIENT_ID and DEX_SECURE_CLIENT_SECRET before starting Dex
@ -146,10 +158,23 @@ staticClients:
# - /device/callback
# name: 'Static Client for Device Flow'
# public: true
connectors:
- type: mockCallback
id: mock
name: Example
# grantTypes restricts which grant types can use this connector.
# If not specified, all grant types are allowed.
# Supported values:
# - "authorization_code"
# - "implicit"
# - "refresh_token"
# - "password"
# - "urn:ietf:params:oauth:grant-type:device_code"
# - "urn:ietf:params:oauth:grant-type:token-exchange"
# grantTypes:
# - "authorization_code"
# - "refresh_token"
# - type: google
# id: google
# name: Google

6
examples/go.mod

@ -1,13 +1,13 @@
module github.com/dexidp/dex/examples
go 1.24.0
go 1.25.0
require (
github.com/coreos/go-oidc/v3 v3.17.0
github.com/dexidp/dex/api/v2 v2.4.0
github.com/spf13/cobra v1.10.2
golang.org/x/oauth2 v0.35.0
google.golang.org/grpc v1.79.1
golang.org/x/oauth2 v0.36.0
google.golang.org/grpc v1.79.2
)
require (

8
examples/go.sum

@ -39,8 +39,8 @@ go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4Etq
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
@ -49,8 +49,8 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU=
google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

31
go.mod

@ -17,6 +17,7 @@ require (
github.com/go-ldap/ldap/v3 v3.4.12
github.com/go-sql-driver/mysql v1.9.3
github.com/golang-jwt/jwt/v5 v5.3.1
github.com/google/cel-go v0.27.0
github.com/google/uuid v1.6.0
github.com/gorilla/handlers v1.5.2
github.com/gorilla/mux v1.8.1
@ -34,17 +35,18 @@ require (
github.com/stretchr/testify v1.11.1
go.etcd.io/etcd/client/pkg/v3 v3.6.8
go.etcd.io/etcd/client/v3 v3.6.8
golang.org/x/crypto v0.48.0
golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741
golang.org/x/net v0.50.0
golang.org/x/oauth2 v0.35.0
google.golang.org/api v0.269.0
google.golang.org/grpc v1.79.1
golang.org/x/crypto v0.49.0
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948
golang.org/x/net v0.52.0
golang.org/x/oauth2 v0.36.0
google.golang.org/api v0.271.0
google.golang.org/grpc v1.79.2
google.golang.org/protobuf v1.36.11
)
require (
ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 // indirect
cel.dev/expr v0.25.1 // indirect
cloud.google.com/go/auth v0.18.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
dario.cat/mergo v1.0.1 // indirect
@ -53,6 +55,7 @@ require (
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bmatcuk/doublestar v1.3.4 // indirect
@ -71,7 +74,7 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.12 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@ -112,15 +115,15 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/text v0.34.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.41.0 // indirect
golang.org/x/mod v0.33.0 // indirect
golang.org/x/sync v0.20.0 // indirect
golang.org/x/sys v0.42.0 // indirect
golang.org/x/text v0.35.0 // indirect
golang.org/x/time v0.15.0 // indirect
golang.org/x/tools v0.42.0 // indirect
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

67
go.sum

@ -1,5 +1,7 @@
ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 h1:E0wvcUXTkgyN4wy4LGtNzMNGMytJN8afmIWXJVMi4cc=
ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w=
cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM=
cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
@ -30,6 +32,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE=
@ -91,14 +95,16 @@ github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63Y
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo=
github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.12 h1:Fg+zsqzYEs1ZnvmcztTYxhgCBsx3eEhEwQ1W/lHq/sQ=
github.com/googleapis/enterprise-certificate-proxy v0.3.12/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg=
github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8=
github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg=
github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc=
github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
@ -266,50 +272,51 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741 h1:fGZugkZk2UgYBxtpKmvub51Yno1LJDeEsRp2xGD+0gY=
golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA=
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U=
golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
@ -320,16 +327,16 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.269.0 h1:qDrTOxKUQ/P0MveH6a7vZ+DNHxJQjtGm/uvdbdGXCQg=
google.golang.org/api v0.269.0/go.mod h1:N8Wpcu23Tlccl0zSHEkcAZQKDLdquxK+l9r2LkwAauE=
google.golang.org/api v0.271.0 h1:cIPN4qcUc61jlh7oXu6pwOQqbJW2GqYh5PS6rB2C/JY=
google.golang.org/api v0.271.0/go.mod h1:CGT29bhwkbF+i11qkRUJb2KMKqcJ1hdFceEIRd9u64Q=
google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM=
google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM=
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d h1:t/LOSXPJ9R0B6fnZNyALBRfZBH0Uy0gT+uR+SJ6syqQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU=
google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

232
pkg/cel/cel.go

@ -0,0 +1,232 @@
package cel
import (
"context"
"fmt"
"reflect"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/ext"
"github.com/dexidp/dex/pkg/cel/library"
)
// EnvironmentVersion represents the version of the CEL environment.
// New variables, functions, or libraries are introduced in new versions.
type EnvironmentVersion uint32
const (
// EnvironmentV1 is the initial CEL environment.
EnvironmentV1 EnvironmentVersion = 1
)
// CompilationResult holds a compiled CEL program ready for evaluation.
type CompilationResult struct {
Program cel.Program
OutputType *cel.Type
Expression string
ast *cel.Ast
}
// CompilerOption configures a Compiler.
type CompilerOption func(*compilerConfig)
type compilerConfig struct {
costBudget uint64
version EnvironmentVersion
}
func defaultCompilerConfig() *compilerConfig {
return &compilerConfig{
costBudget: DefaultCostBudget,
version: EnvironmentV1,
}
}
// WithCostBudget sets a custom cost budget for expression evaluation.
func WithCostBudget(budget uint64) CompilerOption {
return func(cfg *compilerConfig) {
cfg.costBudget = budget
}
}
// WithVersion sets the target environment version for the compiler.
// Defaults to the latest version. Specifying an older version ensures
// that only functions/types available at that version are used.
func WithVersion(v EnvironmentVersion) CompilerOption {
return func(cfg *compilerConfig) {
cfg.version = v
}
}
// Compiler compiles CEL expressions against a specific environment.
type Compiler struct {
env *cel.Env
cfg *compilerConfig
}
// NewCompiler creates a new CEL compiler with the specified variable
// declarations and options.
//
// All custom Dex libraries are automatically included.
// The environment is configured with cost limits and safe defaults.
func NewCompiler(variables []VariableDeclaration, opts ...CompilerOption) (*Compiler, error) {
cfg := defaultCompilerConfig()
for _, opt := range opts {
opt(cfg)
}
envOpts := make([]cel.EnvOption, 0, 8+len(variables))
envOpts = append(envOpts,
cel.DefaultUTCTimeZone(true),
// Standard extension libraries (same set as Kubernetes)
ext.Strings(),
ext.Encoders(),
ext.Lists(),
ext.Sets(),
ext.Math(),
// Native Go types for typed variable access.
// This gives compile-time field checking: identity.emial → error at config load.
ext.NativeTypes(
ext.ParseStructTags(true),
reflect.TypeOf(IdentityVal{}),
reflect.TypeOf(RequestVal{}),
),
// Custom Dex libraries
cel.Lib(&library.Email{}),
cel.Lib(&library.Groups{}),
// Presence tests like has(field) and 'key' in map are O(1) hash
// lookups on map(string, dyn) variables, so they should not count
// toward the cost budget. Without this, expressions with multiple
// 'in' checks (e.g. "'admin' in identity.groups") would accumulate
// inflated cost estimates. This matches Kubernetes CEL behavior
// where presence tests are free for CRD validation rules.
cel.CostEstimatorOptions(
checker.PresenceTestHasCost(false),
),
)
for _, v := range variables {
envOpts = append(envOpts, cel.Variable(v.Name, v.Type))
}
env, err := cel.NewEnv(envOpts...)
if err != nil {
return nil, fmt.Errorf("failed to create CEL environment: %w", err)
}
return &Compiler{env: env, cfg: cfg}, nil
}
// CompileBool compiles a CEL expression that must evaluate to bool.
func (c *Compiler) CompileBool(expression string) (*CompilationResult, error) {
return c.compile(expression, cel.BoolType)
}
// CompileString compiles a CEL expression that must evaluate to string.
func (c *Compiler) CompileString(expression string) (*CompilationResult, error) {
return c.compile(expression, cel.StringType)
}
// CompileStringList compiles a CEL expression that must evaluate to list(string).
func (c *Compiler) CompileStringList(expression string) (*CompilationResult, error) {
return c.compile(expression, cel.ListType(cel.StringType))
}
// Compile compiles a CEL expression with any output type.
func (c *Compiler) Compile(expression string) (*CompilationResult, error) {
return c.compile(expression, nil)
}
func (c *Compiler) compile(expression string, expectedType *cel.Type) (*CompilationResult, error) {
if len(expression) > MaxExpressionLength {
return nil, fmt.Errorf("expression exceeds maximum length of %d characters", MaxExpressionLength)
}
ast, issues := c.env.Compile(expression)
if issues != nil && issues.Err() != nil {
return nil, fmt.Errorf("CEL compilation failed: %w", issues.Err())
}
if expectedType != nil && !ast.OutputType().IsEquivalentType(expectedType) {
return nil, fmt.Errorf(
"expected expression output type %s, got %s",
expectedType, ast.OutputType(),
)
}
// Estimate cost at compile time and reject expressions that are too expensive.
costEst, err := c.env.EstimateCost(ast, &defaultCostEstimator{})
if err != nil {
return nil, fmt.Errorf("CEL cost estimation failed: %w", err)
}
if costEst.Max > c.cfg.costBudget {
return nil, fmt.Errorf(
"CEL expression estimated cost %d exceeds budget %d",
costEst.Max, c.cfg.costBudget,
)
}
prog, err := c.env.Program(ast,
cel.EvalOptions(cel.OptOptimize),
cel.CostLimit(c.cfg.costBudget),
)
if err != nil {
return nil, fmt.Errorf("CEL program creation failed: %w", err)
}
return &CompilationResult{
Program: prog,
OutputType: ast.OutputType(),
Expression: expression,
ast: ast,
}, nil
}
// Eval evaluates a compiled program against the given variables.
func Eval(ctx context.Context, result *CompilationResult, variables map[string]any) (ref.Val, error) {
out, _, err := result.Program.ContextEval(ctx, variables)
if err != nil {
return nil, fmt.Errorf("CEL evaluation failed: %w", err)
}
return out, nil
}
// EvalBool is a convenience function that evaluates and asserts bool output.
func EvalBool(ctx context.Context, result *CompilationResult, variables map[string]any) (bool, error) {
out, err := Eval(ctx, result, variables)
if err != nil {
return false, err
}
v, ok := out.Value().(bool)
if !ok {
return false, fmt.Errorf("expected bool result, got %T", out.Value())
}
return v, nil
}
// EvalString is a convenience function that evaluates and asserts string output.
func EvalString(ctx context.Context, result *CompilationResult, variables map[string]any) (string, error) {
out, err := Eval(ctx, result, variables)
if err != nil {
return "", err
}
v, ok := out.Value().(string)
if !ok {
return "", fmt.Errorf("expected string result, got %T", out.Value())
}
return v, nil
}

280
pkg/cel/cel_test.go

@ -0,0 +1,280 @@
package cel_test
import (
"context"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dexidp/dex/connector"
dexcel "github.com/dexidp/dex/pkg/cel"
)
func TestCompileBool(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
tests := map[string]struct {
expr string
wantErr bool
}{
"true literal": {
expr: "true",
},
"comparison": {
expr: "1 == 1",
},
"string type mismatch": {
expr: "'hello'",
wantErr: true,
},
"int type mismatch": {
expr: "42",
wantErr: true,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
result, err := compiler.CompileBool(tc.expr)
if tc.wantErr {
assert.Error(t, err)
assert.Nil(t, result)
} else {
assert.NoError(t, err)
assert.NotNil(t, result)
}
})
}
}
func TestCompileString(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
tests := map[string]struct {
expr string
wantErr bool
}{
"string literal": {
expr: "'hello'",
},
"string concatenation": {
expr: "'hello' + ' ' + 'world'",
},
"bool type mismatch": {
expr: "true",
wantErr: true,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
result, err := compiler.CompileString(tc.expr)
if tc.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, result)
}
})
}
}
func TestCompileStringList(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
result, err := compiler.CompileStringList("['a', 'b', 'c']")
assert.NoError(t, err)
assert.NotNil(t, result)
_, err = compiler.CompileStringList("'not a list'")
assert.Error(t, err)
}
func TestCompile(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
// Compile accepts any type
result, err := compiler.Compile("true")
assert.NoError(t, err)
assert.NotNil(t, result)
result, err = compiler.Compile("'hello'")
assert.NoError(t, err)
assert.NotNil(t, result)
result, err = compiler.Compile("42")
assert.NoError(t, err)
assert.NotNil(t, result)
}
func TestCompileErrors(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
tests := map[string]struct {
expr string
}{
"syntax error": {
expr: "1 +",
},
"undefined variable": {
expr: "undefined_var",
},
"undefined function": {
expr: "undefinedFunc()",
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
_, err := compiler.Compile(tc.expr)
assert.Error(t, err)
})
}
}
func TestCompileRejectsUnknownFields(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
// Typo in field name: should fail at compile time with ObjectType
_, err = compiler.CompileBool("identity.emial == 'test@example.com'")
assert.Error(t, err)
assert.Contains(t, err.Error(), "compilation failed")
// Type mismatch: comparing string field to int should fail at compile time
_, err = compiler.CompileBool("identity.email == 123")
assert.Error(t, err)
assert.Contains(t, err.Error(), "compilation failed")
// Valid field: should compile fine
_, err = compiler.CompileBool("identity.email == 'test@example.com'")
assert.NoError(t, err)
}
func TestMaxExpressionLength(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
longExpr := "'" + strings.Repeat("a", dexcel.MaxExpressionLength) + "'"
_, err = compiler.Compile(longExpr)
assert.Error(t, err)
assert.Contains(t, err.Error(), "maximum length")
}
func TestEvalBool(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
tests := map[string]struct {
expr string
identity dexcel.IdentityVal
want bool
}{
"email endsWith": {
expr: "identity.email.endsWith('@example.com')",
identity: dexcel.IdentityVal{Email: "user@example.com"},
want: true,
},
"email endsWith false": {
expr: "identity.email.endsWith('@example.com')",
identity: dexcel.IdentityVal{Email: "user@other.com"},
want: false,
},
"email_verified": {
expr: "identity.email_verified == true",
identity: dexcel.IdentityVal{EmailVerified: true},
want: true,
},
"group membership": {
expr: "identity.groups.exists(g, g == 'admin')",
identity: dexcel.IdentityVal{Groups: []string{"admin", "dev"}},
want: true,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
prog, err := compiler.CompileBool(tc.expr)
require.NoError(t, err)
result, err := dexcel.EvalBool(context.Background(), prog, map[string]any{
"identity": tc.identity,
})
require.NoError(t, err)
assert.Equal(t, tc.want, result)
})
}
}
func TestEvalString(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
// With ObjectType, identity.email is typed as string, so CompileString works.
prog, err := compiler.CompileString("identity.email")
require.NoError(t, err)
result, err := dexcel.EvalString(context.Background(), prog, map[string]any{
"identity": dexcel.IdentityVal{Email: "user@example.com"},
})
require.NoError(t, err)
assert.Equal(t, "user@example.com", result)
}
func TestEvalWithIdentityAndRequest(t *testing.T) {
vars := append(dexcel.IdentityVariables(), dexcel.RequestVariables()...)
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
prog, err := compiler.CompileBool(
`identity.email.endsWith('@example.com') && 'admin' in identity.groups && request.connector_id == 'okta'`,
)
require.NoError(t, err)
identity := dexcel.IdentityFromConnector(connector.Identity{
UserID: "123",
Username: "john",
Email: "john@example.com",
Groups: []string{"admin", "dev"},
})
request := dexcel.RequestFromContext(dexcel.RequestContext{
ClientID: "my-app",
ConnectorID: "okta",
Scopes: []string{"openid", "email"},
})
result, err := dexcel.EvalBool(context.Background(), prog, map[string]any{
"identity": identity,
"request": request,
})
require.NoError(t, err)
assert.True(t, result)
}
func TestNewCompilerWithVariables(t *testing.T) {
// Claims variable — remains map(string, dyn)
compiler, err := dexcel.NewCompiler(dexcel.ClaimsVariable())
require.NoError(t, err)
// claims.email returns dyn from map access, use Compile (not CompileString)
prog, err := compiler.Compile("claims.email")
require.NoError(t, err)
result, err := dexcel.EvalString(context.Background(), prog, map[string]any{
"claims": map[string]any{
"email": "test@example.com",
},
})
require.NoError(t, err)
assert.Equal(t, "test@example.com", result)
}

105
pkg/cel/cost.go

@ -0,0 +1,105 @@
package cel
import (
"fmt"
"github.com/google/cel-go/checker"
)
// DefaultCostBudget is the default cost budget for a single expression
// evaluation. Aligned with Kubernetes defaults: enough for typical identity
// operations but prevents runaway expressions.
const DefaultCostBudget uint64 = 10_000_000
// MaxExpressionLength is the maximum length of a CEL expression string.
const MaxExpressionLength = 10_240
// DefaultStringMaxLength is the estimated max length of string values
// (emails, usernames, group names, etc.) used for compile-time cost estimation.
const DefaultStringMaxLength = 256
// DefaultListMaxLength is the estimated max length of list values
// (groups, scopes) used for compile-time cost estimation.
const DefaultListMaxLength = 100
// CostEstimate holds the estimated cost range for a compiled expression.
type CostEstimate struct {
Min uint64
Max uint64
}
// EstimateCost returns the estimated cost range for a compiled expression.
// This is computed statically at compile time without evaluating the expression.
func (c *Compiler) EstimateCost(result *CompilationResult) (CostEstimate, error) {
costEst, err := c.env.EstimateCost(result.ast, &defaultCostEstimator{})
if err != nil {
return CostEstimate{}, fmt.Errorf("CEL cost estimation failed: %w", err)
}
return CostEstimate{Min: costEst.Min, Max: costEst.Max}, nil
}
// defaultCostEstimator provides size hints for compile-time cost estimation.
// Without these hints, the CEL cost estimator assumes unbounded sizes for
// variables, leading to wildly overestimated max costs.
type defaultCostEstimator struct{}
func (defaultCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate {
// Provide size hints for map(string, dyn) variables: identity, request, claims.
// Without these, the estimator assumes lists/strings can be infinitely large.
if element.Path() == nil {
return nil
}
path := element.Path()
if len(path) == 0 {
return nil
}
root := path[0]
switch root {
case "identity", "request", "claims":
// Nested field access (e.g. identity.email, identity.groups)
if len(path) >= 2 {
field := path[1]
switch field {
case "groups", "scopes":
// list(string) fields
return &checker.SizeEstimate{Min: 0, Max: DefaultListMaxLength}
case "email_verified":
// bool field — size is always 1
return &checker.SizeEstimate{Min: 1, Max: 1}
default:
// string fields (email, username, user_id, client_id, etc.)
return &checker.SizeEstimate{Min: 0, Max: DefaultStringMaxLength}
}
}
// The map itself: number of keys
return &checker.SizeEstimate{Min: 0, Max: 20}
}
return nil
}
func (defaultCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate {
switch function {
case "dex.emailDomain", "dex.emailLocalPart":
// Simple string split — O(n) where n is string length, bounded.
return &checker.CallEstimate{
CostEstimate: checker.CostEstimate{Min: 1, Max: 2},
}
case "dex.groupMatches":
// Iterates over groups list and matches each against a pattern.
return &checker.CallEstimate{
CostEstimate: checker.CostEstimate{Min: 1, Max: DefaultListMaxLength},
}
case "dex.groupFilter":
// Builds a set from allowed list, then iterates groups.
return &checker.CallEstimate{
CostEstimate: checker.CostEstimate{Min: 1, Max: 2 * DefaultListMaxLength},
}
}
return nil
}

137
pkg/cel/cost_test.go

@ -0,0 +1,137 @@
package cel_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dexcel "github.com/dexidp/dex/pkg/cel"
)
func TestEstimateCost(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
tests := map[string]struct {
expr string
}{
"simple bool": {
expr: "true",
},
"string comparison": {
expr: "identity.email == 'test@example.com'",
},
"group membership": {
expr: "identity.groups.exists(g, g == 'admin')",
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
prog, err := compiler.Compile(tc.expr)
require.NoError(t, err)
est, err := compiler.EstimateCost(prog)
require.NoError(t, err)
assert.True(t, est.Max >= est.Min, "max cost should be >= min cost")
assert.True(t, est.Max <= dexcel.DefaultCostBudget,
"estimated max cost %d should be within default budget %d", est.Max, dexcel.DefaultCostBudget)
})
}
}
func TestCompileTimeCostAcceptsSimpleExpressions(t *testing.T) {
vars := append(dexcel.IdentityVariables(), dexcel.RequestVariables()...)
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
tests := map[string]string{
"literal": "true",
"email endsWith": "identity.email.endsWith('@example.com')",
"group check": "'admin' in identity.groups",
"emailDomain": `dex.emailDomain(identity.email)`,
"groupMatches": `dex.groupMatches(identity.groups, "team:*")`,
"groupFilter": `dex.groupFilter(identity.groups, ["admin", "dev"])`,
"combined policy": `identity.email.endsWith('@example.com') && 'admin' in identity.groups`,
"complex policy": `identity.email.endsWith('@example.com') &&
identity.groups.exists(g, g == 'admin') &&
request.connector_id == 'okta' &&
request.scopes.exists(s, s == 'openid')`,
"filter+map chain": `identity.groups
.filter(g, g.startsWith('team:'))
.map(g, g.replace('team:', ''))
.size() > 0`,
}
for name, expr := range tests {
t.Run(name, func(t *testing.T) {
_, err := compiler.Compile(expr)
assert.NoError(t, err, "expression should compile within default budget")
})
}
}
func TestCompileTimeCostRejection(t *testing.T) {
vars := append(dexcel.IdentityVariables(), dexcel.RequestVariables()...)
tests := map[string]struct {
budget uint64
expr string
}{
"simple exists exceeds tiny budget": {
budget: 1,
expr: "identity.groups.exists(g, g == 'admin')",
},
"endsWith exceeds tiny budget": {
budget: 2,
expr: "identity.email.endsWith('@example.com')",
},
"nested comprehension over groups exceeds moderate budget": {
// Two nested iterations over groups: O(n^2) where n=100 → ~280K
budget: 10_000,
expr: `identity.groups.exists(g1,
identity.groups.exists(g2,
g1 != g2 && g1.startsWith(g2)
)
)`,
},
"cross-variable comprehension exceeds moderate budget": {
// filter groups then check each against scopes: O(n*m) → ~162K
budget: 10_000,
expr: `identity.groups
.filter(g, g.startsWith('team:'))
.exists(g, request.scopes.exists(s, s == g))`,
},
"chained filter+map+filter+map exceeds small budget": {
budget: 1000,
expr: `identity.groups
.filter(g, g.startsWith('team:'))
.map(g, g.replace('team:', ''))
.filter(g, g.size() > 3)
.map(g, g.upperAscii())
.size() > 0`,
},
"many independent exists exceeds small budget": {
budget: 5000,
expr: `identity.groups.exists(g, g.contains('a')) &&
identity.groups.exists(g, g.contains('b')) &&
identity.groups.exists(g, g.contains('c')) &&
identity.groups.exists(g, g.contains('d')) &&
identity.groups.exists(g, g.contains('e'))`,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
compiler, err := dexcel.NewCompiler(vars, dexcel.WithCostBudget(tc.budget))
require.NoError(t, err)
_, err = compiler.Compile(tc.expr)
assert.Error(t, err)
assert.Contains(t, err.Error(), "estimated cost")
assert.Contains(t, err.Error(), "exceeds budget")
})
}
}

5
pkg/cel/doc.go

@ -0,0 +1,5 @@
// Package cel provides a safe, sandboxed CEL (Common Expression Language)
// environment for policy evaluation, claim mapping, and token customization
// in Dex. It includes cost budgets, Kubernetes-grade compatibility guarantees,
// and a curated set of extension libraries.
package cel

4
pkg/cel/library/doc.go

@ -0,0 +1,4 @@
// Package library provides custom CEL function libraries for Dex.
// Each library implements the cel.Library interface and can be registered
// in a CEL environment.
package library

73
pkg/cel/library/email.go

@ -0,0 +1,73 @@
package library
import (
"strings"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
)
// Email provides email-related CEL functions.
//
// Functions (V1):
//
// dex.emailDomain(email: string) -> string
// Returns the domain portion of an email address.
// Example: dex.emailDomain("user@example.com") == "example.com"
//
// dex.emailLocalPart(email: string) -> string
// Returns the local part of an email address.
// Example: dex.emailLocalPart("user@example.com") == "user"
type Email struct{}
func (Email) CompileOptions() []cel.EnvOption {
return []cel.EnvOption{
cel.Function("dex.emailDomain",
cel.Overload("dex_email_domain_string",
[]*cel.Type{cel.StringType},
cel.StringType,
cel.UnaryBinding(emailDomainImpl),
),
),
cel.Function("dex.emailLocalPart",
cel.Overload("dex_email_local_part_string",
[]*cel.Type{cel.StringType},
cel.StringType,
cel.UnaryBinding(emailLocalPartImpl),
),
),
}
}
func (Email) ProgramOptions() []cel.ProgramOption {
return nil
}
func emailDomainImpl(arg ref.Val) ref.Val {
email, ok := arg.Value().(string)
if !ok {
return types.NewErr("dex.emailDomain: expected string argument")
}
_, domain, found := strings.Cut(email, "@")
if !found {
return types.String("")
}
return types.String(domain)
}
func emailLocalPartImpl(arg ref.Val) ref.Val {
email, ok := arg.Value().(string)
if !ok {
return types.NewErr("dex.emailLocalPart: expected string argument")
}
localPart, _, found := strings.Cut(email, "@")
if !found {
return types.String(email)
}
return types.String(localPart)
}

106
pkg/cel/library/email_test.go

@ -0,0 +1,106 @@
package library_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dexcel "github.com/dexidp/dex/pkg/cel"
)
func TestEmailDomain(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
tests := map[string]struct {
expr string
want string
}{
"standard email": {
expr: `dex.emailDomain("user@example.com")`,
want: "example.com",
},
"subdomain": {
expr: `dex.emailDomain("admin@sub.domain.org")`,
want: "sub.domain.org",
},
"no at sign": {
expr: `dex.emailDomain("nodomain")`,
want: "",
},
"empty string": {
expr: `dex.emailDomain("")`,
want: "",
},
"multiple at signs": {
expr: `dex.emailDomain("user@name@example.com")`,
want: "name@example.com",
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
prog, err := compiler.CompileString(tc.expr)
require.NoError(t, err)
result, err := dexcel.EvalString(context.Background(), prog, map[string]any{})
require.NoError(t, err)
assert.Equal(t, tc.want, result)
})
}
}
func TestEmailLocalPart(t *testing.T) {
compiler, err := dexcel.NewCompiler(nil)
require.NoError(t, err)
tests := map[string]struct {
expr string
want string
}{
"standard email": {
expr: `dex.emailLocalPart("user@example.com")`,
want: "user",
},
"no at sign": {
expr: `dex.emailLocalPart("justuser")`,
want: "justuser",
},
"empty string": {
expr: `dex.emailLocalPart("")`,
want: "",
},
"multiple at signs": {
expr: `dex.emailLocalPart("user@name@example.com")`,
want: "user",
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
prog, err := compiler.CompileString(tc.expr)
require.NoError(t, err)
result, err := dexcel.EvalString(context.Background(), prog, map[string]any{})
require.NoError(t, err)
assert.Equal(t, tc.want, result)
})
}
}
func TestEmailDomainWithIdentityVariable(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
prog, err := compiler.CompileString(`dex.emailDomain(identity.email)`)
require.NoError(t, err)
result, err := dexcel.EvalString(context.Background(), prog, map[string]any{
"identity": dexcel.IdentityVal{Email: "admin@corp.example.com"},
})
require.NoError(t, err)
assert.Equal(t, "corp.example.com", result)
}

123
pkg/cel/library/groups.go

@ -0,0 +1,123 @@
package library
import (
"path"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
)
// Groups provides group-related CEL functions.
//
// Functions (V1):
//
// dex.groupMatches(groups: list(string), pattern: string) -> list(string)
// Returns groups matching a glob pattern.
// Example: dex.groupMatches(["team:dev", "team:ops", "admin"], "team:*")
//
// dex.groupFilter(groups: list(string), allowed: list(string)) -> list(string)
// Returns only groups present in the allowed list.
// Example: dex.groupFilter(["admin", "dev", "ops"], ["admin", "ops"])
type Groups struct{}
func (Groups) CompileOptions() []cel.EnvOption {
return []cel.EnvOption{
cel.Function("dex.groupMatches",
cel.Overload("dex_group_matches_list_string",
[]*cel.Type{cel.ListType(cel.StringType), cel.StringType},
cel.ListType(cel.StringType),
cel.BinaryBinding(groupMatchesImpl),
),
),
cel.Function("dex.groupFilter",
cel.Overload("dex_group_filter_list_list",
[]*cel.Type{cel.ListType(cel.StringType), cel.ListType(cel.StringType)},
cel.ListType(cel.StringType),
cel.BinaryBinding(groupFilterImpl),
),
),
}
}
func (Groups) ProgramOptions() []cel.ProgramOption {
return nil
}
func groupMatchesImpl(lhs, rhs ref.Val) ref.Val {
groupList, ok := lhs.(traits.Lister)
if !ok {
return types.NewErr("dex.groupMatches: expected list(string) as first argument")
}
pattern, ok := rhs.Value().(string)
if !ok {
return types.NewErr("dex.groupMatches: expected string pattern as second argument")
}
iter := groupList.Iterator()
var matched []ref.Val
for iter.HasNext() == types.True {
item := iter.Next()
group, ok := item.Value().(string)
if !ok {
continue
}
ok, err := path.Match(pattern, group)
if err != nil {
return types.NewErr("dex.groupMatches: invalid pattern %q: %v", pattern, err)
}
if ok {
matched = append(matched, types.String(group))
}
}
return types.NewRefValList(types.DefaultTypeAdapter, matched)
}
func groupFilterImpl(lhs, rhs ref.Val) ref.Val {
groupList, ok := lhs.(traits.Lister)
if !ok {
return types.NewErr("dex.groupFilter: expected list(string) as first argument")
}
allowedList, ok := rhs.(traits.Lister)
if !ok {
return types.NewErr("dex.groupFilter: expected list(string) as second argument")
}
allowed := make(map[string]struct{})
iter := allowedList.Iterator()
for iter.HasNext() == types.True {
item := iter.Next()
s, ok := item.Value().(string)
if !ok {
continue
}
allowed[s] = struct{}{}
}
var filtered []ref.Val
iter = groupList.Iterator()
for iter.HasNext() == types.True {
item := iter.Next()
group, ok := item.Value().(string)
if !ok {
continue
}
if _, exists := allowed[group]; exists {
filtered = append(filtered, types.String(group))
}
}
return types.NewRefValList(types.DefaultTypeAdapter, filtered)
}

141
pkg/cel/library/groups_test.go

@ -0,0 +1,141 @@
package library_test
import (
"context"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dexcel "github.com/dexidp/dex/pkg/cel"
)
func TestGroupMatches(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
tests := map[string]struct {
expr string
groups []string
want []string
}{
"wildcard pattern": {
expr: `dex.groupMatches(identity.groups, "team:*")`,
groups: []string{"team:dev", "team:ops", "admin"},
want: []string{"team:dev", "team:ops"},
},
"exact match": {
expr: `dex.groupMatches(identity.groups, "admin")`,
groups: []string{"team:dev", "admin", "user"},
want: []string{"admin"},
},
"no matches": {
expr: `dex.groupMatches(identity.groups, "nonexistent")`,
groups: []string{"team:dev", "admin"},
want: []string{},
},
"question mark pattern": {
expr: `dex.groupMatches(identity.groups, "team?")`,
groups: []string{"teamA", "teamB", "teams-long"},
want: []string{"teamA", "teamB"},
},
"match all": {
expr: `dex.groupMatches(identity.groups, "*")`,
groups: []string{"a", "b", "c"},
want: []string{"a", "b", "c"},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
prog, err := compiler.CompileStringList(tc.expr)
require.NoError(t, err)
out, err := dexcel.Eval(context.Background(), prog, map[string]any{
"identity": dexcel.IdentityVal{Groups: tc.groups},
})
require.NoError(t, err)
nativeVal, err := out.ConvertToNative(reflect.TypeOf([]string{}))
require.NoError(t, err)
got, ok := nativeVal.([]string)
require.True(t, ok, "expected []string, got %T", nativeVal)
assert.Equal(t, tc.want, got)
})
}
}
func TestGroupMatchesInvalidPattern(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
prog, err := compiler.CompileStringList(`dex.groupMatches(identity.groups, "[invalid")`)
require.NoError(t, err)
_, err = dexcel.Eval(context.Background(), prog, map[string]any{
"identity": dexcel.IdentityVal{Groups: []string{"admin"}},
})
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid pattern")
}
func TestGroupFilter(t *testing.T) {
vars := dexcel.IdentityVariables()
compiler, err := dexcel.NewCompiler(vars)
require.NoError(t, err)
tests := map[string]struct {
expr string
groups []string
want []string
}{
"filter to allowed": {
expr: `dex.groupFilter(identity.groups, ["admin", "ops"])`,
groups: []string{"admin", "dev", "ops"},
want: []string{"admin", "ops"},
},
"no overlap": {
expr: `dex.groupFilter(identity.groups, ["marketing"])`,
groups: []string{"admin", "dev"},
want: []string{},
},
"all allowed": {
expr: `dex.groupFilter(identity.groups, ["a", "b", "c"])`,
groups: []string{"a", "b", "c"},
want: []string{"a", "b", "c"},
},
"empty allowed list": {
expr: `dex.groupFilter(identity.groups, [])`,
groups: []string{"admin", "dev"},
want: []string{},
},
"preserves order": {
expr: `dex.groupFilter(identity.groups, ["z", "a"])`,
groups: []string{"a", "b", "z"},
want: []string{"a", "z"},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
prog, err := compiler.CompileStringList(tc.expr)
require.NoError(t, err)
out, err := dexcel.Eval(context.Background(), prog, map[string]any{
"identity": dexcel.IdentityVal{Groups: tc.groups},
})
require.NoError(t, err)
nativeVal, err := out.ConvertToNative(reflect.TypeOf([]string{}))
require.NoError(t, err)
got, ok := nativeVal.([]string)
require.True(t, ok, "expected []string, got %T", nativeVal)
assert.Equal(t, tc.want, got)
})
}
}

109
pkg/cel/types.go

@ -0,0 +1,109 @@
package cel
import (
"github.com/google/cel-go/cel"
"github.com/dexidp/dex/connector"
)
// VariableDeclaration declares a named variable and its CEL type
// that will be available in expressions.
type VariableDeclaration struct {
Name string
Type *cel.Type
}
// IdentityVal is the CEL native type for the identity variable.
// Fields are typed so that the CEL compiler rejects unknown field access
// (e.g. identity.emial) at config load time rather than at evaluation time.
type IdentityVal struct {
UserID string `cel:"user_id"`
Username string `cel:"username"`
PreferredUsername string `cel:"preferred_username"`
Email string `cel:"email"`
EmailVerified bool `cel:"email_verified"`
Groups []string `cel:"groups"`
}
// RequestVal is the CEL native type for the request variable.
type RequestVal struct {
ClientID string `cel:"client_id"`
ConnectorID string `cel:"connector_id"`
Scopes []string `cel:"scopes"`
RedirectURI string `cel:"redirect_uri"`
}
// identityTypeName is the CEL type name for IdentityVal.
// Derived by ext.NativeTypes as simplePkgAlias(pkgPath) + "." + structName.
const identityTypeName = "cel.IdentityVal"
// requestTypeName is the CEL type name for RequestVal.
const requestTypeName = "cel.RequestVal"
// IdentityVariables provides the 'identity' variable with typed fields.
//
// identity.user_id — string
// identity.username — string
// identity.preferred_username — string
// identity.email — string
// identity.email_verified — bool
// identity.groups — list(string)
func IdentityVariables() []VariableDeclaration {
return []VariableDeclaration{
{Name: "identity", Type: cel.ObjectType(identityTypeName)},
}
}
// RequestVariables provides the 'request' variable with typed fields.
//
// request.client_id — string
// request.connector_id — string
// request.scopes — list(string)
// request.redirect_uri — string
func RequestVariables() []VariableDeclaration {
return []VariableDeclaration{
{Name: "request", Type: cel.ObjectType(requestTypeName)},
}
}
// ClaimsVariable provides a 'claims' map for raw upstream claims.
// Claims remain map(string, dyn) because their shape is genuinely
// unknown — they carry arbitrary upstream IdP data.
//
// claims — map(string, dyn)
func ClaimsVariable() []VariableDeclaration {
return []VariableDeclaration{
{Name: "claims", Type: cel.MapType(cel.StringType, cel.DynType)},
}
}
// IdentityFromConnector converts a connector.Identity to a CEL-compatible IdentityVal.
func IdentityFromConnector(id connector.Identity) IdentityVal {
return IdentityVal{
UserID: id.UserID,
Username: id.Username,
PreferredUsername: id.PreferredUsername,
Email: id.Email,
EmailVerified: id.EmailVerified,
Groups: id.Groups,
}
}
// RequestContext represents the authentication/token request context
// available as the 'request' variable in CEL expressions.
type RequestContext struct {
ClientID string
ConnectorID string
Scopes []string
RedirectURI string
}
// RequestFromContext converts a RequestContext to a CEL-compatible RequestVal.
func RequestFromContext(rc RequestContext) RequestVal {
return RequestVal{
ClientID: rc.ClientID,
ConnectorID: rc.ConnectorID,
Scopes: rc.Scopes,
RedirectURI: rc.RedirectURI,
}
}

3
pkg/featureflags/doc.go

@ -0,0 +1,3 @@
// Package featureflags provides a mechanism for toggling experimental or
// optional Dex features via environment variables (DEX_<FLAG_NAME>).
package featureflags

10
pkg/featureflags/set.go

@ -14,4 +14,14 @@ var (
// ContinueOnConnectorFailure allows the server to start even if some connectors fail to initialize.
ContinueOnConnectorFailure = newFlag("continue_on_connector_failure", true)
// ConfigDisallowUnknownFields enables to forbid unknown fields in the config while unmarshaling.
ConfigDisallowUnknownFields = newFlag("config_disallow_unknown_fields", false)
// ClientCredentialGrantEnabledByDefault enables the client_credentials grant type by default
// without requiring explicit configuration in oauth2.grantTypes.
ClientCredentialGrantEnabledByDefault = newFlag("client_credential_grant_enabled_by_default", false)
// SessionsEnabled enables experimental auth sessions support.
SessionsEnabled = newFlag("sessions_enabled", false)
)

2
pkg/groups/doc.go

@ -0,0 +1,2 @@
// Package groups contains helper functions related to groups.
package groups

1
pkg/groups/groups.go

@ -1,4 +1,3 @@
// Package groups contains helper functions related to groups
package groups
// Filter filters out any groups of given that are not in required. Thus it may

3
pkg/httpclient/doc.go

@ -0,0 +1,3 @@
// Package httpclient provides a configurable HTTP client constructor with
// support for custom CA certificates, root CAs, and TLS settings.
package httpclient

88
server/api.go

@ -58,13 +58,14 @@ func (d dexAPI) GetClient(ctx context.Context, req *api.GetClientReq) (*api.GetC
return &api.GetClientResp{
Client: &api.Client{
Id: c.ID,
Name: c.Name,
Secret: c.Secret,
RedirectUris: c.RedirectURIs,
TrustedPeers: c.TrustedPeers,
Public: c.Public,
LogoUrl: c.LogoURL,
Id: c.ID,
Name: c.Name,
Secret: c.Secret,
RedirectUris: c.RedirectURIs,
TrustedPeers: c.TrustedPeers,
Public: c.Public,
LogoUrl: c.LogoURL,
AllowedConnectors: c.AllowedConnectors,
},
}, nil
}
@ -82,13 +83,14 @@ func (d dexAPI) CreateClient(ctx context.Context, req *api.CreateClientReq) (*ap
}
c := storage.Client{
ID: req.Client.Id,
Secret: req.Client.Secret,
RedirectURIs: req.Client.RedirectUris,
TrustedPeers: req.Client.TrustedPeers,
Public: req.Client.Public,
Name: req.Client.Name,
LogoURL: req.Client.LogoUrl,
ID: req.Client.Id,
Secret: req.Client.Secret,
RedirectURIs: req.Client.RedirectUris,
TrustedPeers: req.Client.TrustedPeers,
Public: req.Client.Public,
Name: req.Client.Name,
LogoURL: req.Client.LogoUrl,
AllowedConnectors: req.Client.AllowedConnectors,
}
if err := d.s.CreateClient(ctx, c); err != nil {
if err == storage.ErrAlreadyExists {
@ -121,6 +123,9 @@ func (d dexAPI) UpdateClient(ctx context.Context, req *api.UpdateClientReq) (*ap
if req.LogoUrl != "" {
old.LogoURL = req.LogoUrl
}
if req.AllowedConnectors != nil {
old.AllowedConnectors = req.AllowedConnectors
}
return old, nil
})
if err != nil {
@ -155,12 +160,13 @@ func (d dexAPI) ListClients(ctx context.Context, req *api.ListClientReq) (*api.L
clients := make([]*api.ClientInfo, 0, len(clientList))
for _, client := range clientList {
c := api.ClientInfo{
Id: client.ID,
Name: client.Name,
RedirectUris: client.RedirectURIs,
TrustedPeers: client.TrustedPeers,
Public: client.Public,
LogoUrl: client.LogoURL,
Id: client.ID,
Name: client.Name,
RedirectUris: client.RedirectURIs,
TrustedPeers: client.TrustedPeers,
Public: client.Public,
LogoUrl: client.LogoURL,
AllowedConnectors: client.AllowedConnectors,
}
clients = append(clients, &c)
}
@ -455,12 +461,19 @@ func (d dexAPI) CreateConnector(ctx context.Context, req *api.CreateConnectorReq
return nil, errors.New("invalid config supplied")
}
for _, gt := range req.Connector.GrantTypes {
if !ConnectorGrantTypes[gt] {
return nil, fmt.Errorf("unknown grant type %q", gt)
}
}
c := storage.Connector{
ID: req.Connector.Id,
Name: req.Connector.Name,
Type: req.Connector.Type,
ResourceVersion: "1",
Config: req.Connector.Config,
GrantTypes: req.Connector.GrantTypes,
}
if err := d.s.CreateConnector(ctx, c); err != nil {
if err == storage.ErrAlreadyExists {
@ -470,6 +483,11 @@ func (d dexAPI) CreateConnector(ctx context.Context, req *api.CreateConnectorReq
return nil, fmt.Errorf("create connector: %v", err)
}
// Make sure we don't reuse stale entries in the cache
if d.server != nil {
d.server.CloseConnector(req.Connector.Id)
}
return &api.CreateConnectorResp{}, nil
}
@ -482,14 +500,26 @@ func (d dexAPI) UpdateConnector(ctx context.Context, req *api.UpdateConnectorReq
return nil, errors.New("no email supplied")
}
if len(req.NewConfig) == 0 && req.NewName == "" && req.NewType == "" {
hasUpdate := len(req.NewConfig) != 0 ||
req.NewName != "" ||
req.NewType != "" ||
req.NewGrantTypes != nil
if !hasUpdate {
return nil, errors.New("nothing to update")
}
if !json.Valid(req.NewConfig) {
if len(req.NewConfig) != 0 && !json.Valid(req.NewConfig) {
return nil, errors.New("invalid config supplied")
}
if req.NewGrantTypes != nil {
for _, gt := range req.NewGrantTypes.GrantTypes {
if !ConnectorGrantTypes[gt] {
return nil, fmt.Errorf("unknown grant type %q", gt)
}
}
}
updater := func(old storage.Connector) (storage.Connector, error) {
if req.NewType != "" {
old.Type = req.NewType
@ -503,6 +533,10 @@ func (d dexAPI) UpdateConnector(ctx context.Context, req *api.UpdateConnectorReq
old.Config = req.NewConfig
}
if req.NewGrantTypes != nil {
old.GrantTypes = req.NewGrantTypes.GrantTypes
}
if rev, err := strconv.Atoi(defaultTo(old.ResourceVersion, "0")); err == nil {
old.ResourceVersion = strconv.Itoa(rev + 1)
}
@ -538,6 +572,7 @@ func (d dexAPI) DeleteConnector(ctx context.Context, req *api.DeleteConnectorReq
d.logger.Error("api: failed to delete connector", "err", err)
return nil, fmt.Errorf("delete connector: %v", err)
}
return &api.DeleteConnectorResp{}, nil
}
@ -555,10 +590,11 @@ func (d dexAPI) ListConnectors(ctx context.Context, req *api.ListConnectorReq) (
connectors := make([]*api.Connector, 0, len(connectorList))
for _, connector := range connectorList {
c := api.Connector{
Id: connector.ID,
Name: connector.Name,
Type: connector.Type,
Config: connector.Config,
Id: connector.ID,
Name: connector.Name,
Type: connector.Type,
Config: connector.Config,
GrantTypes: connector.GrantTypes,
}
connectors = append(connectors, &c)
}

133
server/api_cache_test.go

@ -0,0 +1,133 @@
package server
import (
"context"
"encoding/json"
"testing"
"github.com/dexidp/dex/api/v2"
"github.com/dexidp/dex/connector"
"github.com/dexidp/dex/connector/mock"
"github.com/dexidp/dex/storage/memory"
)
func TestConnectorCacheInvalidation(t *testing.T) {
t.Setenv("DEX_API_CONNECTORS_CRUD", "true")
logger := newLogger(t)
s := memory.New(logger)
serv := &Server{
storage: s,
logger: logger,
connectors: make(map[string]Connector),
}
apiServer := NewAPI(s, logger, "test", serv)
ctx := context.Background()
connID := "mock-conn"
// 1. Create a connector via API
config1 := mock.PasswordConfig{
Username: "user",
Password: "first-password",
}
config1Bytes, _ := json.Marshal(config1)
_, err := apiServer.CreateConnector(ctx, &api.CreateConnectorReq{
Connector: &api.Connector{
Id: connID,
Type: "mockPassword",
Name: "Mock",
Config: config1Bytes,
},
})
if err != nil {
t.Fatalf("failed to create connector: %v", err)
}
// 2. Load it into server cache
c1, err := serv.getConnector(ctx, connID)
if err != nil {
t.Fatalf("failed to get connector: %v", err)
}
pc1 := c1.Connector.(connector.PasswordConnector)
_, valid, err := pc1.Login(ctx, connector.Scopes{}, "user", "first-password")
if err != nil || !valid {
t.Fatalf("failed to login with first password: %v", err)
}
// 3. Delete it via API
_, err = apiServer.DeleteConnector(ctx, &api.DeleteConnectorReq{Id: connID})
if err != nil {
t.Fatalf("failed to delete connector: %v", err)
}
// 4. Create it again with different password
config2 := mock.PasswordConfig{
Username: "user",
Password: "second-password",
}
config2Bytes, _ := json.Marshal(config2)
_, err = apiServer.CreateConnector(ctx, &api.CreateConnectorReq{
Connector: &api.Connector{
Id: connID,
Type: "mockPassword",
Name: "Mock",
Config: config2Bytes,
},
})
if err != nil {
t.Fatalf("failed to create connector: %v", err)
}
// 5. Load it again
c2, err := serv.getConnector(ctx, connID)
if err != nil {
t.Fatalf("failed to get connector second time: %v", err)
}
pc2 := c2.Connector.(connector.PasswordConnector)
// If the fix works, it should now use the second password.
_, valid2, err := pc2.Login(ctx, connector.Scopes{}, "user", "second-password")
if err != nil || !valid2 {
t.Errorf("failed to login with second password, cache might still be stale")
}
_, valid1, _ := pc2.Login(ctx, connector.Scopes{}, "user", "first-password")
if valid1 {
t.Errorf("unexpectedly logged in with first password, cache is definitely stale")
}
// 6. Update it via API with a third password
config3 := mock.PasswordConfig{
Username: "user",
Password: "third-password",
}
config3Bytes, _ := json.Marshal(config3)
_, err = apiServer.UpdateConnector(ctx, &api.UpdateConnectorReq{
Id: connID,
NewConfig: config3Bytes,
})
if err != nil {
t.Fatalf("failed to update connector: %v", err)
}
// 7. Load it again
c3, err := serv.getConnector(ctx, connID)
if err != nil {
t.Fatalf("failed to get connector third time: %v", err)
}
pc3 := c3.Connector.(connector.PasswordConnector)
_, valid3, err := pc3.Login(ctx, connector.Scopes{}, "user", "third-password")
if err != nil || !valid3 {
t.Errorf("failed to login with third password, UpdateConnector might be missing cache invalidation")
}
}

99
server/api_test.go

@ -606,6 +606,105 @@ func TestUpdateConnector(t *testing.T) {
}
}
func TestUpdateConnectorGrantTypes(t *testing.T) {
t.Setenv("DEX_API_CONNECTORS_CRUD", "true")
logger := newLogger(t)
s := memory.New(logger)
client := newAPI(t, s, logger)
defer client.Close()
ctx := t.Context()
connectorID := "connector-gt"
// Create a connector without grant types
createReq := api.CreateConnectorReq{
Connector: &api.Connector{
Id: connectorID,
Name: "TestConnector",
Type: "TestType",
Config: []byte(`{"key": "value"}`),
},
}
_, err := client.CreateConnector(ctx, &createReq)
if err != nil {
t.Fatalf("failed to create connector: %v", err)
}
// Set grant types
_, err = client.UpdateConnector(ctx, &api.UpdateConnectorReq{
Id: connectorID,
NewGrantTypes: &api.GrantTypes{GrantTypes: []string{"authorization_code", "refresh_token"}},
})
if err != nil {
t.Fatalf("failed to update connector grant types: %v", err)
}
resp, err := client.ListConnectors(ctx, &api.ListConnectorReq{})
if err != nil {
t.Fatalf("failed to list connectors: %v", err)
}
for _, c := range resp.Connectors {
if c.Id == connectorID {
if !slices.Equal(c.GrantTypes, []string{"authorization_code", "refresh_token"}) {
t.Fatalf("expected grant types [authorization_code refresh_token], got %v", c.GrantTypes)
}
}
}
// Clear grant types by passing empty GrantTypes message
_, err = client.UpdateConnector(ctx, &api.UpdateConnectorReq{
Id: connectorID,
NewGrantTypes: &api.GrantTypes{},
})
if err != nil {
t.Fatalf("failed to clear connector grant types: %v", err)
}
resp, err = client.ListConnectors(ctx, &api.ListConnectorReq{})
if err != nil {
t.Fatalf("failed to list connectors: %v", err)
}
for _, c := range resp.Connectors {
if c.Id == connectorID {
if len(c.GrantTypes) != 0 {
t.Fatalf("expected empty grant types after clear, got %v", c.GrantTypes)
}
}
}
// Reject invalid grant type on update
_, err = client.UpdateConnector(ctx, &api.UpdateConnectorReq{
Id: connectorID,
NewGrantTypes: &api.GrantTypes{GrantTypes: []string{"bogus"}},
})
if err == nil {
t.Fatal("expected error for invalid grant type, got nil")
}
if !strings.Contains(err.Error(), `unknown grant type "bogus"`) {
t.Fatalf("unexpected error: %v", err)
}
// Reject invalid grant type on create
_, err = client.CreateConnector(ctx, &api.CreateConnectorReq{
Connector: &api.Connector{
Id: "bad-gt",
Name: "Bad",
Type: "TestType",
Config: []byte(`{}`),
GrantTypes: []string{"invalid_type"},
},
})
if err == nil {
t.Fatal("expected error for invalid grant type on create, got nil")
}
if !strings.Contains(err.Error(), `unknown grant type "invalid_type"`) {
t.Fatalf("unexpected error: %v", err)
}
}
func TestDeleteConnector(t *testing.T) {
t.Setenv("DEX_API_CONNECTORS_CRUD", "true")

4
server/deviceflowhandlers.go

@ -431,7 +431,7 @@ func (s *Server) verifyUserCode(w http.ResponseWriter, r *http.Request) {
}
// Redirect to Dex Auth Endpoint
authURL := path.Join(s.issuerURL.Path, "/auth")
authURL := s.absURL("/auth")
u, err := url.Parse(authURL)
if err != nil {
s.renderError(r, w, http.StatusInternalServerError, "Invalid auth URI.")
@ -442,7 +442,7 @@ func (s *Server) verifyUserCode(w http.ResponseWriter, r *http.Request) {
q.Set("client_secret", deviceRequest.ClientSecret)
q.Set("state", deviceRequest.UserCode)
q.Set("response_type", "code")
q.Set("redirect_uri", "/device/callback")
q.Set("redirect_uri", s.absPath(deviceCallbackURI))
q.Set("scope", strings.Join(deviceRequest.Scopes, " "))
u.RawQuery = q.Encode()

38
server/deviceflowhandlers_test.go

@ -364,7 +364,7 @@ func TestDeviceCallback(t *testing.T) {
// Setup a dex server.
httpServer, s := newTestServer(t, func(c *Config) {
// c.Issuer = c.Issuer + "/non-root-path"
c.Issuer = c.Issuer + "/non-root-path"
c.Now = now
})
defer httpServer.Close()
@ -752,7 +752,8 @@ func TestVerifyCodeResponse(t *testing.T) {
testDeviceRequest storage.DeviceRequest
userCode string
expectedResponseCode int
expectedRedirectPath string
expectedAuthPath string
shouldRedirectToAuth bool
}{
{
testName: "Unknown user code",
@ -765,7 +766,6 @@ func TestVerifyCodeResponse(t *testing.T) {
},
userCode: "CODE-TEST",
expectedResponseCode: http.StatusBadRequest,
expectedRedirectPath: "",
},
{
testName: "Expired user code",
@ -778,7 +778,6 @@ func TestVerifyCodeResponse(t *testing.T) {
},
userCode: "ABCD-WXYZ",
expectedResponseCode: http.StatusBadRequest,
expectedRedirectPath: "",
},
{
testName: "No user code",
@ -791,10 +790,9 @@ func TestVerifyCodeResponse(t *testing.T) {
},
userCode: "",
expectedResponseCode: http.StatusBadRequest,
expectedRedirectPath: "",
},
{
testName: "Valid user code, expect redirect to auth endpoint",
testName: "Valid user code, expect redirect to auth endpoint with device callback",
testDeviceRequest: storage.DeviceRequest{
UserCode: "ABCD-WXYZ",
DeviceCode: "f00bar",
@ -804,7 +802,8 @@ func TestVerifyCodeResponse(t *testing.T) {
},
userCode: "ABCD-WXYZ",
expectedResponseCode: http.StatusFound,
expectedRedirectPath: "/auth",
expectedAuthPath: "/auth",
shouldRedirectToAuth: true,
},
}
for _, tc := range tests {
@ -839,15 +838,24 @@ func TestVerifyCodeResponse(t *testing.T) {
t.Errorf("Unexpected Response Type. Expected %v got %v", tc.expectedResponseCode, rr.Code)
}
u, err = url.Parse(s.issuerURL.String())
if err != nil {
t.Errorf("Could not parse issuer URL %v", err)
}
u.Path = path.Join(u.Path, tc.expectedRedirectPath)
location := rr.Header().Get("Location")
if rr.Code == http.StatusFound && !strings.HasPrefix(location, u.Path) {
t.Errorf("Invalid Redirect. Expected %v got %v", u.Path, location)
if rr.Code == http.StatusFound && tc.shouldRedirectToAuth {
// Parse the redirect location
redirectURL, err := url.Parse(location)
if err != nil {
t.Errorf("Could not parse redirect URL: %v", err)
return
}
// Check that the redirect path contains /auth
if !strings.Contains(redirectURL.Path, tc.expectedAuthPath) {
t.Errorf("Invalid Redirect Path. Expected to contain %q got %q", tc.expectedAuthPath, redirectURL.Path)
}
// Check that redirect_uri parameter contains /device/callback
if !strings.Contains(location, "redirect_uri=%2Fnon-root-path%2Fdevice%2Fcallback") {
t.Errorf("Invalid redirect_uri parameter. Expected to contain /device/callback (URL encoded), got %v", location)
}
}
})
}

368
server/handlers.go

@ -23,6 +23,7 @@ import (
"github.com/gorilla/mux"
"github.com/dexidp/dex/connector"
"github.com/dexidp/dex/pkg/featureflags"
"github.com/dexidp/dex/server/internal"
"github.com/dexidp/dex/storage"
)
@ -116,7 +117,7 @@ func (s *Server) constructDiscovery(ctx context.Context) discovery {
Introspect: s.absURL("/token/introspect"),
Subjects: []string{"public"},
IDTokenAlgs: []string{string(jose.RS256)},
CodeChallengeAlgs: []string{codeChallengeMethodS256, codeChallengeMethodPlain},
CodeChallengeAlgs: s.pkce.CodeChallengeMethodsSupported,
Scopes: []string{"openid", "email", "groups", "profile", "offline_access"},
AuthMethods: []string{"client_secret_basic", "client_secret_post"},
Claims: []string{
@ -142,6 +143,21 @@ func (s *Server) constructDiscovery(ctx context.Context) discovery {
return d
}
// grantTypeFromAuthRequest determines the grant type from the authorization request parameters.
func (s *Server) grantTypeFromAuthRequest(r *http.Request) string {
redirectURI := r.Form.Get("redirect_uri")
if redirectURI == deviceCallbackURI || strings.HasSuffix(redirectURI, deviceCallbackURI) {
return grantTypeDeviceCode
}
responseType := r.Form.Get("response_type")
for _, rt := range strings.Fields(responseType) {
if rt == "token" || rt == "id_token" {
return grantTypeImplicit
}
}
return grantTypeAuthorizationCode
}
// handleAuthorization handles the OAuth2 auth endpoint.
func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -154,13 +170,36 @@ func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) {
}
connectorID := r.Form.Get("connector_id")
connectors, err := s.storage.ListConnectors(ctx)
allConnectors, err := s.storage.ListConnectors(ctx)
if err != nil {
s.logger.ErrorContext(r.Context(), "failed to get list of connectors", "err", err)
s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve connector list.")
return
}
// Determine the grant type from the authorization request to filter connectors.
grantType := s.grantTypeFromAuthRequest(r)
connectors := make([]storage.Connector, 0, len(allConnectors))
for _, c := range allConnectors {
if GrantTypeAllowed(c.GrantTypes, grantType) {
connectors = append(connectors, c)
}
}
// Filter connectors based on the client's allowed connectors list.
// client_id is required per RFC 6749 §4.1.1.
client, authErr := s.getClientWithAuthError(ctx, r.Form.Get("client_id"))
if authErr != nil {
s.renderError(r, w, authErr.Status, authErr.Error())
return
}
connectors = filterConnectors(connectors, client.AllowedConnectors)
if len(connectors) == 0 {
s.renderError(r, w, http.StatusBadRequest, "No connectors available for this client.")
return
}
// We don't need connector_id any more
r.Form.Del("connector_id")
@ -187,15 +226,15 @@ func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, connURL.String(), http.StatusFound)
}
connectorInfos := make([]connectorInfo, len(connectors))
for index, conn := range connectors {
connectorInfos := make([]connectorInfo, 0, len(connectors))
for _, conn := range connectors {
connURL.Path = s.absPath("/auth", url.PathEscape(conn.ID))
connectorInfos[index] = connectorInfo{
connectorInfos = append(connectorInfos, connectorInfo{
ID: conn.ID,
Name: conn.Name,
Type: conn.Type,
URL: template.URL(connURL.String()),
}
})
}
if err := s.templates.login(r, w, connectorInfos); err != nil {
@ -203,6 +242,57 @@ func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) {
}
}
// filterConnectors filters the list of connectors by the allowed connector IDs.
// If allowedConnectors is empty, all connectors are returned (no filtering).
func filterConnectors(connectors []storage.Connector, allowedConnectors []string) []storage.Connector {
if len(allowedConnectors) == 0 {
return connectors
}
allowed := make(map[string]bool, len(allowedConnectors))
for _, id := range allowedConnectors {
allowed[id] = true
}
filtered := make([]storage.Connector, 0, len(connectors))
for _, c := range connectors {
if allowed[c.ID] {
filtered = append(filtered, c)
}
}
return filtered
}
// isConnectorAllowed checks if a connector ID is in the client's allowed connectors list.
// If allowedConnectors is empty, all connectors are allowed.
func isConnectorAllowed(allowedConnectors []string, connectorID string) bool {
if len(allowedConnectors) == 0 {
return true
}
for _, id := range allowedConnectors {
if id == connectorID {
return true
}
}
return false
}
// getClientWithAuthError retrieves a client by ID and returns a displayedAuthErr on failure.
// Invalid client_id is not treated as a redirect error per RFC 6749 §4.1.2.1.
// https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.2.1
func (s *Server) getClientWithAuthError(ctx context.Context, clientID string) (storage.Client, *displayedAuthErr) {
client, err := s.storage.GetClient(ctx, clientID)
if err != nil {
if err == storage.ErrNotFound {
s.logger.ErrorContext(ctx, "invalid client_id provided", "client_id", clientID)
return storage.Client{}, newDisplayedErr(http.StatusBadRequest, "Invalid client_id provided.")
}
s.logger.ErrorContext(ctx, "failed to get client", "client_id", clientID, "err", err)
return storage.Client{}, newDisplayedErr(http.StatusInternalServerError, "Database error.")
}
return client, nil
}
func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
authReq, err := s.parseAuthorizationRequest(r)
@ -228,6 +318,19 @@ func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) {
return
}
// Validate that the connector is allowed for this client.
client, authErr := s.getClientWithAuthError(ctx, authReq.ClientID)
if authErr != nil {
s.renderError(r, w, authErr.Status, authErr.Error())
return
}
if !isConnectorAllowed(client.AllowedConnectors, connID) {
s.logger.ErrorContext(r.Context(), "connector not allowed for client",
"connector_id", connID, "client_id", authReq.ClientID)
s.renderError(r, w, http.StatusForbidden, "Connector not allowed for this client.")
return
}
conn, err := s.getConnector(ctx, connID)
if err != nil {
s.logger.ErrorContext(r.Context(), "Failed to get connector", "err", err)
@ -235,6 +338,15 @@ func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) {
return
}
// Check if the connector allows the requested grant type.
grantType := s.grantTypeFromAuthRequest(r)
if !GrantTypeAllowed(conn.GrantTypes, grantType) {
s.logger.ErrorContext(r.Context(), "connector does not allow requested grant type",
"connector_id", connID, "grant_type", grantType)
s.renderError(r, w, http.StatusBadRequest, "Requested connector does not support this grant type.")
return
}
// Set the connector being used for the login.
if authReq.ConnectorID != "" && authReq.ConnectorID != connID {
s.logger.ErrorContext(r.Context(), "mismatched connector ID in auth request",
@ -558,8 +670,9 @@ func (s *Server) finalizeLogin(ctx context.Context, identity connector.Identity,
}
s.logger.InfoContext(ctx, "login successful",
"connector_id", authReq.ConnectorID, "username", claims.Username,
"preferred_username", claims.PreferredUsername, "email", email, "groups", claims.Groups)
"connector_id", authReq.ConnectorID, "user_id", claims.UserID,
"username", claims.Username, "preferred_username", claims.PreferredUsername,
"email", email, "groups", claims.Groups)
offlineAccessRequested := false
for _, scope := range authReq.Scopes {
@ -605,11 +718,60 @@ func (s *Server) finalizeLogin(ctx context.Context, identity connector.Identity,
}
}
// Create or update UserIdentity to persist user claims across sessions.
var userIdentity *storage.UserIdentity
if featureflags.SessionsEnabled.Enabled() {
now := s.now()
ui, err := s.storage.GetUserIdentity(ctx, identity.UserID, authReq.ConnectorID)
switch {
case err != nil && errors.Is(err, storage.ErrNotFound):
ui = storage.UserIdentity{
UserID: identity.UserID,
ConnectorID: authReq.ConnectorID,
Claims: claims,
Consents: make(map[string][]string),
CreatedAt: now,
LastLogin: now,
}
if err := s.storage.CreateUserIdentity(ctx, ui); err != nil {
s.logger.ErrorContext(ctx, "failed to create user identity", "err", err)
return "", false, err
}
case err == nil:
if err := s.storage.UpdateUserIdentity(ctx, identity.UserID, authReq.ConnectorID, func(old storage.UserIdentity) (storage.UserIdentity, error) {
if len(identity.ConnectorData) > 0 {
old.Claims = claims
old.LastLogin = now
return old, nil
}
return old, nil
}); err != nil {
s.logger.ErrorContext(ctx, "failed to update user identity", "err", err)
return "", false, err
}
// Update the existing UserIdentity obj with new claims to use them later in the flow.
ui.Claims = claims
ui.LastLogin = now
default:
s.logger.ErrorContext(ctx, "failed to get user identity", "err", err)
return "", false, err
}
userIdentity = &ui
}
// we can skip the redirect to /approval and go ahead and send code if it's not required
if s.skipApproval && !authReq.ForceApprovalPrompt {
return "", true, nil
}
// Skip approval if user already consented to the requested scopes for this client.
if !authReq.ForceApprovalPrompt && userIdentity != nil {
if scopesCoveredByConsent(userIdentity.Consents[authReq.ClientID], authReq.Scopes) {
return "", true, nil
}
}
// an HMAC is used here to ensure that the request ID is unpredictable, ensuring that an attacker who intercepted the original
// flow would be unable to poll for the result at the /approval endpoint
h := hmac.New(sha256.New, authReq.HMACKey)
@ -635,6 +797,10 @@ func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) {
authReq, err := s.storage.GetAuthRequest(ctx, r.FormValue("req"))
if err != nil {
if err == storage.ErrNotFound {
s.renderError(r, w, http.StatusBadRequest, "User session error.")
return
}
s.logger.ErrorContext(r.Context(), "failed to get auth request", "err", err)
s.renderError(r, w, http.StatusInternalServerError, "Database error.")
return
@ -671,6 +837,18 @@ func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) {
s.renderError(r, w, http.StatusInternalServerError, "Approval rejected.")
return
}
// Persist user-approved scopes as consent for this client.
if featureflags.SessionsEnabled.Enabled() {
if err := s.storage.UpdateUserIdentity(ctx, authReq.Claims.UserID, authReq.ConnectorID, func(old storage.UserIdentity) (storage.UserIdentity, error) {
if old.Consents == nil {
old.Consents = make(map[string][]string)
}
old.Consents[authReq.ClientID] = authReq.Scopes
return old, nil
}); err != nil {
s.logger.ErrorContext(ctx, "failed to update user identity consents", "err", err)
}
}
s.sendCodeResponse(w, r, authReq)
}
}
@ -816,6 +994,27 @@ func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authRe
http.Redirect(w, r, u.String(), http.StatusSeeOther)
}
// scopesCoveredByConsent checks whether the approved scopes cover all requested scopes.
// The openid scope is excluded from the comparison as it is a technical scope
// that does not require user consent.
func scopesCoveredByConsent(approved, requested []string) bool {
approvedSet := make(map[string]struct{}, len(approved))
for _, s := range approved {
approvedSet[s] = struct{}{}
}
for _, scope := range requested {
if scope == scopeOpenID {
continue
}
if _, ok := approvedSet[scope]; !ok {
return false
}
}
return true
}
func (s *Server) withClientFromStorage(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, storage.Client)) {
ctx := r.Context()
clientID, clientSecret, ok := r.BasicAuth()
@ -889,6 +1088,8 @@ func (s *Server) handleToken(w http.ResponseWriter, r *http.Request) {
s.withClientFromStorage(w, r, s.handlePasswordGrant)
case grantTypeTokenExchange:
s.withClientFromStorage(w, r, s.handleTokenExchange)
case grantTypeClientCredentials:
s.withClientFromStorage(w, r, s.handleClientCredentialsGrant)
default:
s.tokenErrHelper(w, errUnsupportedGrantType, "", http.StatusBadRequest)
}
@ -989,9 +1190,16 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au
}
reqRefresh := func() bool {
// Ensure the connector supports refresh tokens.
// Determine whether to issue a refresh token. A refresh token is only
// issued when all of the following are true:
// 1. The connector implements RefreshConnector.
// 2. The connector's grantTypes config allows refresh_token.
// 3. The client requested the offline_access scope.
//
// Connectors like `saml` do not implement RefreshConnector.
// When any condition is not met, the refresh token is silently omitted
// rather than returning an error. This matches the OAuth2 spec: the
// server is never required to issue a refresh token (RFC 6749 §1.5).
// https://datatracker.ietf.org/doc/html/rfc6749#section-1.5
conn, err := s.getConnector(ctx, authCode.ConnectorID)
if err != nil {
s.logger.ErrorContext(ctx, "connector not found", "connector_id", authCode.ConnectorID, "err", err)
@ -1004,6 +1212,10 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au
return false
}
if !GrantTypeAllowed(conn.GrantTypes, grantTypeRefreshToken) {
return false
}
for _, scope := range authCode.Scopes {
if scope == scopeOfflineAccess {
return true
@ -1072,9 +1284,10 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au
return nil, err
}
offlineSessions := storage.OfflineSessions{
UserID: refresh.Claims.UserID,
ConnID: refresh.ConnectorID,
Refresh: make(map[string]*storage.RefreshTokenRef),
UserID: refresh.Claims.UserID,
ConnID: refresh.ConnectorID,
Refresh: make(map[string]*storage.RefreshTokenRef),
ConnectorData: refresh.ConnectorData,
}
offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef
@ -1100,6 +1313,9 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au
// Update existing OfflineSession obj with new RefreshTokenRef.
if err := s.storage.UpdateOfflineSessions(ctx, session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) {
old.Refresh[tokenRef.ClientID] = &tokenRef
if len(refresh.ConnectorData) > 0 {
old.ConnectorData = refresh.ConnectorData
}
return old, nil
}); err != nil {
s.logger.ErrorContext(ctx, "failed to update offline session", "err", err)
@ -1205,6 +1421,11 @@ func (s *Server) handlePasswordGrant(w http.ResponseWriter, r *http.Request, cli
s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest)
return
}
if !GrantTypeAllowed(conn.GrantTypes, grantTypePassword) {
s.logger.ErrorContext(r.Context(), "connector does not allow password grant", "connector_id", connID)
s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not support password grant.", http.StatusBadRequest)
return
}
passwordConnector, ok := conn.Connector.(connector.PasswordConnector)
if !ok {
@ -1251,11 +1472,15 @@ func (s *Server) handlePasswordGrant(w http.ResponseWriter, r *http.Request, cli
}
reqRefresh := func() bool {
// Ensure the connector supports refresh tokens.
//
// Connectors like `saml` do not implement RefreshConnector.
_, ok := conn.Connector.(connector.RefreshConnector)
if !ok {
// Same logic as in exchangeAuthCode: silently omit refresh token
// when the connector doesn't support it or grantTypes forbids it.
// See RFC 6749 §1.5 — refresh tokens are never mandatory.
// https://datatracker.ietf.org/doc/html/rfc6749#section-1.5
if _, ok := conn.Connector.(connector.RefreshConnector); !ok {
return false
}
if !GrantTypeAllowed(conn.GrantTypes, grantTypeRefreshToken) {
return false
}
@ -1417,6 +1642,11 @@ func (s *Server) handleTokenExchange(w http.ResponseWriter, r *http.Request, cli
s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest)
return
}
if !GrantTypeAllowed(conn.GrantTypes, grantTypeTokenExchange) {
s.logger.ErrorContext(r.Context(), "connector does not allow token exchange", "connector_id", connID)
s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not support token exchange.", http.StatusBadRequest)
return
}
teConn, ok := conn.Connector.(connector.TokenIdentityConnector)
if !ok {
s.logger.ErrorContext(r.Context(), "connector doesn't implement token exchange", "connector_id", connID)
@ -1476,6 +1706,108 @@ func (s *Server) handleTokenExchange(w http.ResponseWriter, r *http.Request, cli
json.NewEncoder(w).Encode(resp)
}
func (s *Server) handleClientCredentialsGrant(w http.ResponseWriter, r *http.Request, client storage.Client) {
ctx := r.Context()
// client_credentials requires a confidential client.
if client.Public {
s.tokenErrHelper(w, errUnauthorizedClient, "Public clients cannot use client_credentials grant.", http.StatusBadRequest)
return
}
// Parse scopes from request.
if err := r.ParseForm(); err != nil {
s.tokenErrHelper(w, errInvalidRequest, "Couldn't parse data", http.StatusBadRequest)
return
}
scopes := strings.Fields(r.Form.Get("scope"))
// Validate scopes.
var (
unrecognized []string
invalidScopes []string
)
hasOpenIDScope := false
for _, scope := range scopes {
switch scope {
case scopeOpenID:
hasOpenIDScope = true
case scopeEmail, scopeProfile, scopeGroups:
// allowed
case scopeOfflineAccess:
s.tokenErrHelper(w, errInvalidScope, "client_credentials grant does not support offline_access scope.", http.StatusBadRequest)
return
case scopeFederatedID:
s.tokenErrHelper(w, errInvalidScope, "client_credentials grant does not support federated:id scope.", http.StatusBadRequest)
return
default:
peerID, ok := parseCrossClientScope(scope)
if !ok {
unrecognized = append(unrecognized, scope)
continue
}
isTrusted, err := s.validateCrossClientTrust(ctx, client.ID, peerID)
if err != nil {
s.logger.ErrorContext(ctx, "error validating cross client trust", "client_id", client.ID, "peer_id", peerID, "err", err)
s.tokenErrHelper(w, errInvalidClient, "Error validating cross client trust.", http.StatusBadRequest)
return
}
if !isTrusted {
invalidScopes = append(invalidScopes, scope)
}
}
}
if len(unrecognized) > 0 {
s.tokenErrHelper(w, errInvalidScope, fmt.Sprintf("Unrecognized scope(s) %q", unrecognized), http.StatusBadRequest)
return
}
if len(invalidScopes) > 0 {
s.tokenErrHelper(w, errInvalidScope, fmt.Sprintf("Client can't request scope(s) %q", invalidScopes), http.StatusBadRequest)
return
}
// Build claims from the client itself — no user involved.
claims := storage.Claims{
UserID: client.ID,
}
// Only populate Username/PreferredUsername when the profile scope is requested.
for _, scope := range scopes {
if scope == scopeProfile {
claims.Username = client.Name
claims.PreferredUsername = client.Name
break
}
}
nonce := r.Form.Get("nonce")
// Empty connector ID is unique for cluster credentials grant
// Creating connectors with an empty ID with the config and API is prohibited
connID := ""
accessToken, expiry, err := s.newAccessToken(ctx, client.ID, claims, scopes, nonce, connID)
if err != nil {
s.logger.ErrorContext(ctx, "client_credentials grant failed to create new access token", "err", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
var idToken string
if hasOpenIDScope {
idToken, expiry, err = s.newIDToken(ctx, client.ID, claims, scopes, nonce, accessToken, "", connID)
if err != nil {
s.logger.ErrorContext(ctx, "client_credentials grant failed to create new ID token", "err", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
}
resp := s.toAccessTokenResponse(idToken, accessToken, "", expiry)
s.writeAccessToken(w, resp)
}
type accessTokenResponse struct {
AccessToken string `json:"access_token"`
IssuedTokenType string `json:"issued_token_type,omitempty"`

117
server/handlers_approval_test.go

@ -0,0 +1,117 @@
package server
import (
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"errors"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/dexidp/dex/storage"
)
type getAuthRequestErrorStorage struct {
storage.Storage
err error
}
func (s *getAuthRequestErrorStorage) GetAuthRequest(context.Context, string) (storage.AuthRequest, error) {
return storage.AuthRequest{}, s.err
}
func TestHandleApprovalGetAuthRequestErrorGET(t *testing.T) {
httpServer, server := newTestServer(t, func(c *Config) {
c.Storage = &getAuthRequestErrorStorage{Storage: c.Storage, err: errors.New("storage unavailable")}
})
defer httpServer.Close()
rr := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "/approval?req=any&hmac=AQ", nil)
server.ServeHTTP(rr, req)
require.Equal(t, http.StatusInternalServerError, rr.Code)
require.Contains(t, rr.Body.String(), "Database error.")
}
func TestHandleApprovalGetAuthRequestNotFoundGET(t *testing.T) {
httpServer, server := newTestServer(t, nil)
defer httpServer.Close()
rr := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "/approval?req=does-not-exist&hmac=AQ", nil)
server.ServeHTTP(rr, req)
require.Equal(t, http.StatusBadRequest, rr.Code)
require.Contains(t, rr.Body.String(), "User session error.")
require.NotContains(t, rr.Body.String(), "Database error.")
}
func TestHandleApprovalGetAuthRequestNotFoundPOST(t *testing.T) {
httpServer, server := newTestServer(t, nil)
defer httpServer.Close()
body := strings.NewReader("approval=approve&req=does-not-exist&hmac=AQ")
rr := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodPost, "/approval", body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server.ServeHTTP(rr, req)
require.Equal(t, http.StatusBadRequest, rr.Code)
require.Contains(t, rr.Body.String(), "User session error.")
require.NotContains(t, rr.Body.String(), "Database error.")
}
func TestHandleApprovalDoubleSubmitPOST(t *testing.T) {
ctx := t.Context()
httpServer, server := newTestServer(t, nil)
defer httpServer.Close()
authReq := storage.AuthRequest{
ID: "approval-double-submit",
ClientID: "test",
ResponseTypes: []string{responseTypeCode},
RedirectURI: "https://client.example/callback",
Expiry: time.Now().Add(time.Minute),
LoggedIn: true,
HMACKey: []byte("approval-double-submit-key"),
}
require.NoError(t, server.storage.CreateAuthRequest(ctx, authReq))
h := hmac.New(sha256.New, authReq.HMACKey)
h.Write([]byte(authReq.ID))
mac := base64.RawURLEncoding.EncodeToString(h.Sum(nil))
form := url.Values{
"approval": {"approve"},
"req": {authReq.ID},
"hmac": {mac},
}
firstRR := httptest.NewRecorder()
firstReq := httptest.NewRequest(http.MethodPost, "/approval", strings.NewReader(form.Encode()))
firstReq.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server.ServeHTTP(firstRR, firstReq)
require.Equal(t, http.StatusSeeOther, firstRR.Code)
require.Contains(t, firstRR.Header().Get("Location"), "https://client.example/callback")
secondRR := httptest.NewRecorder()
secondReq := httptest.NewRequest(http.MethodPost, "/approval", strings.NewReader(form.Encode()))
secondReq.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server.ServeHTTP(secondRR, secondReq)
require.Equal(t, http.StatusBadRequest, secondRR.Code)
require.Contains(t, secondRR.Body.String(), "User session error.")
require.NotContains(t, secondRR.Body.String(), "Database error.")
}

1020
server/handlers_test.go

File diff suppressed because it is too large Load Diff

4
server/introspectionhandler_test.go

@ -298,7 +298,7 @@ func TestHandleIntrospect(t *testing.T) {
{
testName: "Access Token: active",
token: activeAccessToken,
response: toJSON(getIntrospectionValue(s.issuerURL, time.Now(), expiry, "access_token")),
response: toJSON(getIntrospectionValue(s.issuerURL, t0, expiry, "access_token")),
responseStatusCode: 200,
},
{
@ -311,7 +311,7 @@ func TestHandleIntrospect(t *testing.T) {
{
testName: "Refresh Token: active",
token: activeRefreshToken,
response: toJSON(getIntrospectionValue(s.issuerURL, time.Now(), time.Now().Add(s.refreshTokenPolicy.absoluteLifetime), "refresh_token")),
response: toJSON(getIntrospectionValue(s.issuerURL, t0, t0.Add(s.refreshTokenPolicy.absoluteLifetime), "refresh_token")),
responseStatusCode: 200,
},
{

23
server/oauth2.go

@ -14,6 +14,7 @@ import (
"net"
"net/http"
"net/url"
"slices"
"strconv"
"strings"
"time"
@ -143,8 +144,19 @@ const (
grantTypePassword = "password"
grantTypeDeviceCode = "urn:ietf:params:oauth:grant-type:device_code"
grantTypeTokenExchange = "urn:ietf:params:oauth:grant-type:token-exchange"
grantTypeClientCredentials = "client_credentials"
)
// ConnectorGrantTypes is the set of grant types that can be restricted per connector.
var ConnectorGrantTypes = map[string]bool{
grantTypeAuthorizationCode: true,
grantTypeRefreshToken: true,
grantTypeImplicit: true,
grantTypePassword: true,
grantTypeDeviceCode: true,
grantTypeTokenExchange: true,
}
const (
// https://www.rfc-editor.org/rfc/rfc8693.html#section-3
tokenTypeAccess = "urn:ietf:params:oauth:token-type:access_token"
@ -464,6 +476,9 @@ func (s *Server) parseAuthorizationRequest(r *http.Request) (*storage.AuthReques
if !validateConnectorID(connectors, connectorID) {
return nil, newRedirectedErr(errInvalidRequest, "Invalid ConnectorID")
}
if !isConnectorAllowed(client.AllowedConnectors, connectorID) {
return nil, newRedirectedErr(errInvalidRequest, "Connector not allowed for this client")
}
}
// dex doesn't support request parameter and must return request_not_supported error
@ -472,10 +487,16 @@ func (s *Server) parseAuthorizationRequest(r *http.Request) (*storage.AuthReques
return nil, newRedirectedErr(errRequestNotSupported, "Server does not support request parameter.")
}
if codeChallengeMethod != codeChallengeMethodS256 && codeChallengeMethod != codeChallengeMethodPlain {
if codeChallenge != "" && !slices.Contains(s.pkce.CodeChallengeMethodsSupported, codeChallengeMethod) {
return nil, newRedirectedErr(errInvalidRequest, "Unsupported PKCE challenge method (%q).", codeChallengeMethod)
}
// Enforce PKCE if configured.
// https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-4.1.1
if s.pkce.Enforce && codeChallenge == "" {
return nil, newRedirectedErr(errInvalidRequest, "PKCE is required. The code_challenge parameter must be provided.")
}
var (
unrecognized []string
invalidScopes []string

90
server/oauth2_test.go

@ -53,6 +53,7 @@ func TestParseAuthorizationRequest(t *testing.T) {
name string
clients []storage.Client
supportedResponseTypes []string
pkce PKCEConfig
usePOST bool
@ -319,6 +320,92 @@ func TestParseAuthorizationRequest(t *testing.T) {
},
expectedError: &redirectedAuthErr{Type: errInvalidRequest},
},
{
name: "PKCE enforced, no code_challenge provided",
clients: []storage.Client{
{
ID: "bar",
RedirectURIs: []string{"https://example.com/bar"},
},
},
supportedResponseTypes: []string{"code"},
pkce: PKCEConfig{
Enforce: true,
CodeChallengeMethodsSupported: []string{"S256", "plain"},
},
queryParams: map[string]string{
"client_id": "bar",
"redirect_uri": "https://example.com/bar",
"response_type": "code",
"scope": "openid email profile",
},
expectedError: &redirectedAuthErr{Type: errInvalidRequest},
},
{
name: "PKCE enforced, code_challenge provided",
clients: []storage.Client{
{
ID: "bar",
RedirectURIs: []string{"https://example.com/bar"},
},
},
supportedResponseTypes: []string{"code"},
pkce: PKCEConfig{
Enforce: true,
CodeChallengeMethodsSupported: []string{"S256", "plain"},
},
queryParams: map[string]string{
"client_id": "bar",
"redirect_uri": "https://example.com/bar",
"response_type": "code",
"code_challenge": "123",
"code_challenge_method": "S256",
"scope": "openid email profile",
},
},
{
name: "PKCE only S256 allowed, plain rejected",
clients: []storage.Client{
{
ID: "bar",
RedirectURIs: []string{"https://example.com/bar"},
},
},
supportedResponseTypes: []string{"code"},
pkce: PKCEConfig{
CodeChallengeMethodsSupported: []string{"S256"},
},
queryParams: map[string]string{
"client_id": "bar",
"redirect_uri": "https://example.com/bar",
"response_type": "code",
"code_challenge": "123",
"code_challenge_method": "plain",
"scope": "openid email profile",
},
expectedError: &redirectedAuthErr{Type: errInvalidRequest},
},
{
name: "PKCE only S256 allowed, S256 accepted",
clients: []storage.Client{
{
ID: "bar",
RedirectURIs: []string{"https://example.com/bar"},
},
},
supportedResponseTypes: []string{"code"},
pkce: PKCEConfig{
CodeChallengeMethodsSupported: []string{"S256"},
},
queryParams: map[string]string{
"client_id": "bar",
"redirect_uri": "https://example.com/bar",
"response_type": "code",
"code_challenge": "123",
"code_challenge_method": "S256",
"scope": "openid email profile",
},
},
}
for _, tc := range tests {
@ -326,6 +413,9 @@ func TestParseAuthorizationRequest(t *testing.T) {
httpServer, server := newTestServerMultipleConnectors(t, func(c *Config) {
c.SupportedResponseTypes = tc.supportedResponseTypes
c.Storage = storage.WithStaticClients(c.Storage, tc.clients)
if len(tc.pkce.CodeChallengeMethodsSupported) > 0 || tc.pkce.Enforce {
c.PKCE = tc.pkce
}
})
defer httpServer.Close()

4
server/refreshhandlers.go

@ -202,6 +202,10 @@ func (s *Server) getRefreshTokenFromStorage(ctx context.Context, clientID *strin
s.logger.ErrorContext(ctx, "connector not found", "connector_id", refresh.ConnectorID, "err", err)
return nil, newInternalServerError()
}
if !GrantTypeAllowed(refreshCtx.connector.GrantTypes, grantTypeRefreshToken) {
s.logger.ErrorContext(ctx, "connector does not allow refresh token grant", "connector_id", refresh.ConnectorID)
return nil, &refreshError{msg: errInvalidRequest, desc: "Connector does not support refresh tokens.", code: http.StatusBadRequest}
}
// Get Connector Data
session, err := s.storage.GetOfflineSessions(ctx, refresh.Claims.UserID, refresh.ConnectorID)

50
server/server.go

@ -13,6 +13,7 @@ import (
"net/url"
"os"
"path"
"slices"
"sort"
"strings"
"sync"
@ -45,6 +46,7 @@ import (
"github.com/dexidp/dex/connector/openshift"
"github.com/dexidp/dex/connector/saml"
"github.com/dexidp/dex/connector/ssh"
"github.com/dexidp/dex/pkg/featureflags"
"github.com/dexidp/dex/server/signer"
"github.com/dexidp/dex/storage"
"github.com/dexidp/dex/web"
@ -58,6 +60,13 @@ const LocalConnector = "local"
type Connector struct {
ResourceVersion string
Connector connector.Connector
GrantTypes []string
}
// GrantTypeAllowed checks if the given grant type is allowed for this connector.
// If no grant types are configured, all are allowed.
func GrantTypeAllowed(configuredTypes []string, grantType string) bool {
return len(configuredTypes) == 0 || slices.Contains(configuredTypes, grantType)
}
// Config holds the server's configuration options.
@ -107,6 +116,9 @@ type Config struct {
// If set, the server will use this connector to handle password grants
PasswordConnector string
// PKCE configuration
PKCE PKCEConfig
GCFrequency time.Duration // Defaults to 5 minutes
// If specified, the server will use this function for determining time.
@ -159,6 +171,14 @@ type WebConfig struct {
Extra map[string]string
}
// PKCEConfig holds PKCE (Proof Key for Code Exchange) settings.
type PKCEConfig struct {
// If true, PKCE is required for all authorization code flows.
Enforce bool
// Supported code challenge methods. Defaults to ["S256", "plain"].
CodeChallengeMethodsSupported []string
}
func value(val, defaultValue time.Duration) time.Duration {
if val == 0 {
return defaultValue
@ -194,6 +214,8 @@ type Server struct {
supportedGrantTypes []string
pkce PKCEConfig
now func() time.Time
idTokensValidFor time.Duration
@ -229,6 +251,19 @@ func newServer(ctx context.Context, c Config) (*Server, error) {
c.AllowedHeaders = []string{"Authorization"}
}
supportedChallengeMethods := map[string]bool{
codeChallengeMethodS256: true,
codeChallengeMethodPlain: true,
}
if len(c.PKCE.CodeChallengeMethodsSupported) == 0 {
c.PKCE.CodeChallengeMethodsSupported = []string{codeChallengeMethodS256, codeChallengeMethodPlain}
}
for _, m := range c.PKCE.CodeChallengeMethodsSupported {
if !supportedChallengeMethods[m] {
return nil, fmt.Errorf("unsupported PKCE challenge method %q", m)
}
}
allSupportedGrants := map[string]bool{
grantTypeAuthorizationCode: true,
grantTypeRefreshToken: true,
@ -255,6 +290,8 @@ func newServer(ctx context.Context, c Config) (*Server, error) {
allSupportedGrants[grantTypePassword] = true
}
allSupportedGrants[grantTypeClientCredentials] = true
var supportedGrants []string
if len(c.AllowedGrantTypes) > 0 {
for _, grant := range c.AllowedGrantTypes {
@ -301,6 +338,7 @@ func newServer(ctx context.Context, c Config) (*Server, error) {
storage: newKeyCacher(c.Storage, now),
supportedResponseTypes: supportedRes,
supportedGrantTypes: supportedGrants,
pkce: c.PKCE,
idTokensValidFor: value(c.IDTokensValidFor, 24*time.Hour),
authRequestsValidFor: value(c.AuthRequestsValidFor, 24*time.Hour),
deviceRequestsValidFor: value(c.DeviceRequestsValidFor, 5*time.Minute),
@ -341,6 +379,10 @@ func newServer(ctx context.Context, c Config) (*Server, error) {
return nil, fmt.Errorf("server: failed to open all connectors (%d/%d)", failedCount, len(storageConnectors))
}
if featureflags.SessionsEnabled.Enabled() {
s.logger.InfoContext(ctx, "sessions feature flag is enabled")
}
instrumentHandler := func(_ string, handler http.Handler) http.HandlerFunc {
return handler.ServeHTTP
}
@ -739,6 +781,7 @@ func (s *Server) OpenConnector(conn storage.Connector) (Connector, error) {
connector := Connector{
ResourceVersion: conn.ResourceVersion,
Connector: c,
GrantTypes: conn.GrantTypes,
}
s.mu.Lock()
s.connectors[conn.ID] = connector
@ -747,6 +790,13 @@ func (s *Server) OpenConnector(conn storage.Connector) (Connector, error) {
return connector, nil
}
// CloseConnector removes the connector from the server's in-memory map.
func (s *Server) CloseConnector(id string) {
s.mu.Lock()
delete(s.connectors, id)
s.mu.Unlock()
}
// getConnector retrieves the connector object with the given id from the storage
// and updates the connector list for server if necessary.
func (s *Server) getConnector(ctx context.Context, id string) (Connector, error) {

37
server/server_test.go

@ -103,6 +103,7 @@ func newTestServer(t *testing.T, updateConfig func(c *Config)) (*httptest.Server
AllowedGrantTypes: []string{ // all implemented types
grantTypeDeviceCode,
grantTypeAuthorizationCode,
grantTypeClientCredentials,
grantTypeRefreshToken,
grantTypeTokenExchange,
grantTypeImplicit,
@ -1640,7 +1641,7 @@ func TestOAuth2DeviceFlow(t *testing.T) {
// Add the Clients to the test server
client := storage.Client{
ID: clientID,
RedirectURIs: []string{deviceCallbackURI},
RedirectURIs: []string{s.absPath(deviceCallbackURI)},
Public: true,
}
if err := s.storage.CreateClient(ctx, client); err != nil {
@ -1751,7 +1752,7 @@ func TestOAuth2DeviceFlow(t *testing.T) {
ClientSecret: client.Secret,
Endpoint: p.Endpoint(),
Scopes: requestedScopes,
RedirectURL: deviceCallbackURI,
RedirectURL: s.absURL(deviceCallbackURI),
}
if len(tc.scopes) != 0 {
oauth2Config.Scopes = tc.scopes
@ -1774,7 +1775,7 @@ func TestServerSupportedGrants(t *testing.T) {
{
name: "Simple",
config: func(c *Config) {},
resGrants: []string{grantTypeAuthorizationCode, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
},
{
name: "Minimal",
@ -1782,14 +1783,30 @@ func TestServerSupportedGrants(t *testing.T) {
resGrants: []string{grantTypeTokenExchange},
},
{
name: "With password connector",
config: func(c *Config) { c.PasswordConnector = "local" },
resGrants: []string{grantTypeAuthorizationCode, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
name: "With password connector",
config: func(c *Config) {
c.PasswordConnector = "local"
},
resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
},
{
name: "With token response",
config: func(c *Config) { c.SupportedResponseTypes = append(c.SupportedResponseTypes, responseTypeToken) },
resGrants: []string{grantTypeAuthorizationCode, grantTypeImplicit, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
name: "Without client credentials",
config: func(c *Config) {
c.AllowedGrantTypes = []string{
grantTypeAuthorizationCode,
grantTypeRefreshToken,
grantTypeDeviceCode,
grantTypeTokenExchange,
}
},
resGrants: []string{grantTypeAuthorizationCode, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
},
{
name: "With token response",
config: func(c *Config) {
c.SupportedResponseTypes = append(c.SupportedResponseTypes, responseTypeToken)
},
resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypeImplicit, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
},
{
name: "All",
@ -1797,7 +1814,7 @@ func TestServerSupportedGrants(t *testing.T) {
c.PasswordConnector = "local"
c.SupportedResponseTypes = append(c.SupportedResponseTypes, responseTypeToken)
},
resGrants: []string{grantTypeAuthorizationCode, grantTypeImplicit, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypeImplicit, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange},
},
}

204
storage/conformance/conformance.go

@ -51,6 +51,8 @@ func RunTests(t *testing.T, newStorage func(t *testing.T) storage.Storage) {
{"TimezoneSupport", testTimezones},
{"DeviceRequestCRUD", testDeviceRequestCRUD},
{"DeviceTokenCRUD", testDeviceTokenCRUD},
{"UserIdentityCRUD", testUserIdentityCRUD},
{"AuthSessionCRUD", testAuthSessionCRUD},
})
}
@ -262,11 +264,12 @@ func testClientCRUD(t *testing.T, s storage.Storage) {
ctx := t.Context()
id1 := storage.NewID()
c1 := storage.Client{
ID: id1,
Secret: "foobar",
RedirectURIs: []string{"foo://bar.com/", "https://auth.example.com"},
Name: "dex client",
LogoURL: "https://goo.gl/JIyzIC",
ID: id1,
Secret: "foobar",
RedirectURIs: []string{"foo://bar.com/", "https://auth.example.com"},
Name: "dex client",
LogoURL: "https://goo.gl/JIyzIC",
AllowedConnectors: []string{"github", "google"},
}
err := s.DeleteClient(ctx, id1)
mustBeErrNotFound(t, "client", err)
@ -630,10 +633,11 @@ func testConnectorCRUD(t *testing.T, s storage.Storage) {
id1 := storage.NewID()
config1 := []byte(`{"issuer": "https://accounts.google.com"}`)
c1 := storage.Connector{
ID: id1,
Type: "Default",
Name: "Default",
Config: config1,
ID: id1,
Type: "Default",
Name: "Default",
Config: config1,
GrantTypes: []string{"authorization_code", "refresh_token"},
}
if err := s.CreateConnector(ctx, c1); err != nil {
@ -674,12 +678,14 @@ func testConnectorCRUD(t *testing.T, s storage.Storage) {
if err := s.UpdateConnector(ctx, c1.ID, func(old storage.Connector) (storage.Connector, error) {
old.Type = "oidc"
old.GrantTypes = []string{"urn:ietf:params:oauth:grant-type:token-exchange"}
return old, nil
}); err != nil {
t.Fatalf("failed to update Connector: %v", err)
}
c1.Type = "oidc"
c1.GrantTypes = []string{"urn:ietf:params:oauth:grant-type:token-exchange"}
getAndCompare(id1, c1)
connectorList := []storage.Connector{c1, c2}
@ -1080,3 +1086,183 @@ func testDeviceTokenCRUD(t *testing.T, s storage.Storage) {
t.Fatalf("storage does not support PKCE, wanted challenge=%#v got %#v", codeChallenge, got.PKCE)
}
}
func testUserIdentityCRUD(t *testing.T, s storage.Storage) {
ctx := t.Context()
now := time.Now().UTC().Round(time.Millisecond)
u1 := storage.UserIdentity{
UserID: "user1",
ConnectorID: "conn1",
Claims: storage.Claims{
UserID: "user1",
Username: "jane",
Email: "jane@example.com",
EmailVerified: true,
Groups: []string{"a", "b"},
},
Consents: make(map[string][]string),
CreatedAt: now,
LastLogin: now,
BlockedUntil: time.Unix(0, 0).UTC(),
}
// Create with empty Consents map.
if err := s.CreateUserIdentity(ctx, u1); err != nil {
t.Fatalf("create user identity: %v", err)
}
// Duplicate create should return ErrAlreadyExists.
err := s.CreateUserIdentity(ctx, u1)
mustBeErrAlreadyExists(t, "user identity", err)
// Get and compare.
got, err := s.GetUserIdentity(ctx, u1.UserID, u1.ConnectorID)
if err != nil {
t.Fatalf("get user identity: %v", err)
}
got.CreatedAt = got.CreatedAt.UTC().Round(time.Millisecond)
got.LastLogin = got.LastLogin.UTC().Round(time.Millisecond)
got.BlockedUntil = got.BlockedUntil.UTC().Round(time.Millisecond)
u1.BlockedUntil = u1.BlockedUntil.UTC().Round(time.Millisecond)
if diff := pretty.Compare(u1, got); diff != "" {
t.Errorf("user identity retrieved from storage did not match: %s", diff)
}
// Update: add consent entry.
if err := s.UpdateUserIdentity(ctx, u1.UserID, u1.ConnectorID, func(old storage.UserIdentity) (storage.UserIdentity, error) {
old.Consents["client1"] = []string{"openid", "email"}
return old, nil
}); err != nil {
t.Fatalf("update user identity: %v", err)
}
// Get and verify updated consents.
got, err = s.GetUserIdentity(ctx, u1.UserID, u1.ConnectorID)
if err != nil {
t.Fatalf("get user identity after update: %v", err)
}
wantConsents := map[string][]string{"client1": {"openid", "email"}}
if diff := pretty.Compare(wantConsents, got.Consents); diff != "" {
t.Errorf("user identity consents did not match after update: %s", diff)
}
// List and verify.
identities, err := s.ListUserIdentities(ctx)
if err != nil {
t.Fatalf("list user identities: %v", err)
}
if len(identities) != 1 {
t.Fatalf("expected 1 user identity, got %d", len(identities))
}
// Delete.
if err := s.DeleteUserIdentity(ctx, u1.UserID, u1.ConnectorID); err != nil {
t.Fatalf("delete user identity: %v", err)
}
// Get deleted should return ErrNotFound.
_, err = s.GetUserIdentity(ctx, u1.UserID, u1.ConnectorID)
mustBeErrNotFound(t, "user identity", err)
}
func testAuthSessionCRUD(t *testing.T, s storage.Storage) {
ctx := t.Context()
now := time.Now().UTC().Round(time.Millisecond)
session := storage.AuthSession{
ID: storage.NewID(),
ClientStates: map[string]*storage.ClientAuthState{
"client1": {
UserID: "user1",
ConnectorID: "conn1",
Active: true,
ExpiresAt: now.Add(24 * time.Hour),
LastActivity: now,
LastTokenIssuedAt: now,
},
},
CreatedAt: now,
LastActivity: now,
IPAddress: "192.168.1.1",
UserAgent: "TestBrowser/1.0",
}
// Create.
if err := s.CreateAuthSession(ctx, session); err != nil {
t.Fatalf("create auth session: %v", err)
}
// Duplicate create should return ErrAlreadyExists.
err := s.CreateAuthSession(ctx, session)
mustBeErrAlreadyExists(t, "auth session", err)
// Get and compare.
got, err := s.GetAuthSession(ctx, session.ID)
if err != nil {
t.Fatalf("get auth session: %v", err)
}
got.CreatedAt = got.CreatedAt.UTC().Round(time.Millisecond)
got.LastActivity = got.LastActivity.UTC().Round(time.Millisecond)
for _, cs := range got.ClientStates {
cs.ExpiresAt = cs.ExpiresAt.UTC().Round(time.Millisecond)
cs.LastActivity = cs.LastActivity.UTC().Round(time.Millisecond)
cs.LastTokenIssuedAt = cs.LastTokenIssuedAt.UTC().Round(time.Millisecond)
}
if diff := pretty.Compare(session, got); diff != "" {
t.Errorf("auth session retrieved from storage did not match: %s", diff)
}
// Update: add a new client state.
newNow := now.Add(time.Minute)
if err := s.UpdateAuthSession(ctx, session.ID, func(old storage.AuthSession) (storage.AuthSession, error) {
old.ClientStates["client2"] = &storage.ClientAuthState{
UserID: "user2",
ConnectorID: "conn2",
Active: true,
ExpiresAt: newNow.Add(24 * time.Hour),
LastActivity: newNow,
}
old.LastActivity = newNow
return old, nil
}); err != nil {
t.Fatalf("update auth session: %v", err)
}
// Get and verify update.
got, err = s.GetAuthSession(ctx, session.ID)
if err != nil {
t.Fatalf("get auth session after update: %v", err)
}
if len(got.ClientStates) != 2 {
t.Fatalf("expected 2 client states, got %d", len(got.ClientStates))
}
if got.ClientStates["client2"] == nil {
t.Fatal("expected client2 state to exist")
}
if got.ClientStates["client2"].UserID != "user2" {
t.Errorf("expected client2 user_id to be user2, got %s", got.ClientStates["client2"].UserID)
}
// List and verify.
sessions, err := s.ListAuthSessions(ctx)
if err != nil {
t.Fatalf("list auth sessions: %v", err)
}
if len(sessions) != 1 {
t.Fatalf("expected 1 auth session, got %d", len(sessions))
}
// Delete.
if err := s.DeleteAuthSession(ctx, session.ID); err != nil {
t.Fatalf("delete auth session: %v", err)
}
// Get deleted should return ErrNotFound.
_, err = s.GetAuthSession(ctx, session.ID)
mustBeErrNotFound(t, "auth session", err)
}

121
storage/conformance/transactions.go

@ -2,9 +2,12 @@ package conformance
import (
"context"
"strconv"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/bcrypt"
"github.com/dexidp/dex/storage"
@ -26,6 +29,16 @@ func RunTransactionTests(t *testing.T, newStorage func(t *testing.T) storage.Sto
})
}
// RunConcurrencyTests runs tests that verify storage implementations handle
// high-contention parallel updates correctly. Unlike RunTransactionTests,
// these tests use real goroutine-based parallelism rather than nested calls,
// and are safe to run on all storage backends (including those with non-reentrant locks).
func RunConcurrencyTests(t *testing.T, newStorage func(t *testing.T) storage.Storage) {
runTests(t, newStorage, []subTest{
{"RefreshTokenParallelUpdate", testRefreshTokenParallelUpdate},
})
}
func testClientConcurrentUpdate(t *testing.T, s storage.Storage) {
ctx := t.Context()
c := storage.Client{
@ -180,3 +193,111 @@ func testKeysConcurrentUpdate(t *testing.T, s storage.Storage) {
}
}
}
// testRefreshTokenParallelUpdate tests that many parallel updates to the same
// refresh token are serialized correctly by the storage and no updates are lost.
//
// Each goroutine atomically increments a counter stored in the Token field.
// After all goroutines finish, the counter must equal the number of successful updates.
// A mismatch indicates lost updates due to broken atomicity.
func testRefreshTokenParallelUpdate(t *testing.T, s storage.Storage) {
ctx := t.Context()
id := storage.NewID()
refresh := storage.RefreshToken{
ID: id,
Token: "0",
Nonce: "foo",
ClientID: "client_id",
ConnectorID: "connector_id",
Scopes: []string{"openid"},
CreatedAt: time.Now().UTC().Round(time.Millisecond),
LastUsed: time.Now().UTC().Round(time.Millisecond),
Claims: storage.Claims{
UserID: "1",
Username: "jane",
Email: "jane@example.com",
},
}
require.NoError(t, s.CreateRefresh(ctx, refresh))
const numWorkers = 100
type updateResult struct {
err error
newToken string // token value written by this worker's updater
}
var wg sync.WaitGroup
results := make([]updateResult, numWorkers)
for i := range numWorkers {
wg.Add(1)
go func() {
defer wg.Done()
results[i].err = s.UpdateRefreshToken(ctx, id, func(old storage.RefreshToken) (storage.RefreshToken, error) {
counter, _ := strconv.Atoi(old.Token)
old.Token = strconv.Itoa(counter + 1)
results[i].newToken = old.Token
return old, nil
})
}()
}
wg.Wait()
errCounts := map[string]int{}
var successes int
writtenTokens := map[string]int{}
for _, r := range results {
if r.err == nil {
successes++
writtenTokens[r.newToken]++
} else {
errCounts[r.err.Error()]++
}
}
for msg, count := range errCounts {
t.Logf("error (x%d): %s", count, msg)
}
stored, err := s.GetRefresh(ctx, id)
require.NoError(t, err)
counter, err := strconv.Atoi(stored.Token)
require.NoError(t, err)
t.Logf("parallel refresh token updates: %d/%d succeeded, final counter: %d", successes, numWorkers, counter)
if successes < numWorkers {
t.Errorf("not all updates succeeded: %d/%d (some failed under contention)", successes, numWorkers)
}
if counter != successes {
t.Errorf("lost updates detected: %d successful updates but counter is %d", successes, counter)
}
// Each successful updater must have seen a unique counter value.
// Duplicates would mean two updaters read the same state — a sign of broken atomicity.
for token, count := range writtenTokens {
if count > 1 {
t.Errorf("token %q was written by %d updaters — concurrent updaters saw the same state", token, count)
}
}
// Successful updaters must have produced a contiguous sequence 1..N.
// A gap would mean an updater saw stale state even though the write succeeded.
for i := 1; i <= successes; i++ {
if writtenTokens[strconv.Itoa(i)] != 1 {
t.Errorf("expected token %q to be written exactly once, got %d", strconv.Itoa(i), writtenTokens[strconv.Itoa(i)])
}
}
// The token stored in the database must match the highest value written.
// This confirms that the last successful update is the one persisted.
if stored.Token != strconv.Itoa(successes) {
t.Errorf("stored token %q does not match expected final value %q", stored.Token, strconv.Itoa(successes))
}
}

108
storage/ent/client/authsession.go

@ -0,0 +1,108 @@
package client
import (
"context"
"encoding/json"
"fmt"
"github.com/dexidp/dex/storage"
)
// CreateAuthSession saves provided auth session into the database.
func (d *Database) CreateAuthSession(ctx context.Context, session storage.AuthSession) error {
if session.ClientStates == nil {
session.ClientStates = make(map[string]*storage.ClientAuthState)
}
encodedStates, err := json.Marshal(session.ClientStates)
if err != nil {
return fmt.Errorf("encode client states auth session: %w", err)
}
_, err = d.client.AuthSession.Create().
SetID(session.ID).
SetClientStates(encodedStates).
SetCreatedAt(session.CreatedAt).
SetLastActivity(session.LastActivity).
SetIPAddress(session.IPAddress).
SetUserAgent(session.UserAgent).
Save(ctx)
if err != nil {
return convertDBError("create auth session: %w", err)
}
return nil
}
// GetAuthSession extracts an auth session from the database by session ID.
func (d *Database) GetAuthSession(ctx context.Context, sessionID string) (storage.AuthSession, error) {
authSession, err := d.client.AuthSession.Get(ctx, sessionID)
if err != nil {
return storage.AuthSession{}, convertDBError("get auth session: %w", err)
}
return toStorageAuthSession(authSession), nil
}
// ListAuthSessions extracts all auth sessions from the database.
func (d *Database) ListAuthSessions(ctx context.Context) ([]storage.AuthSession, error) {
authSessions, err := d.client.AuthSession.Query().All(ctx)
if err != nil {
return nil, convertDBError("list auth sessions: %w", err)
}
storageAuthSessions := make([]storage.AuthSession, 0, len(authSessions))
for _, s := range authSessions {
storageAuthSessions = append(storageAuthSessions, toStorageAuthSession(s))
}
return storageAuthSessions, nil
}
// DeleteAuthSession deletes an auth session from the database by session ID.
func (d *Database) DeleteAuthSession(ctx context.Context, sessionID string) error {
err := d.client.AuthSession.DeleteOneID(sessionID).Exec(ctx)
if err != nil {
return convertDBError("delete auth session: %w", err)
}
return nil
}
// UpdateAuthSession changes an auth session using an updater function.
func (d *Database) UpdateAuthSession(ctx context.Context, sessionID string, updater func(s storage.AuthSession) (storage.AuthSession, error)) error {
tx, err := d.BeginTx(ctx)
if err != nil {
return convertDBError("update auth session tx: %w", err)
}
authSession, err := tx.AuthSession.Get(ctx, sessionID)
if err != nil {
return rollback(tx, "update auth session database: %w", err)
}
newSession, err := updater(toStorageAuthSession(authSession))
if err != nil {
return rollback(tx, "update auth session updating: %w", err)
}
if newSession.ClientStates == nil {
newSession.ClientStates = make(map[string]*storage.ClientAuthState)
}
encodedStates, err := json.Marshal(newSession.ClientStates)
if err != nil {
return rollback(tx, "encode client states auth session: %w", err)
}
_, err = tx.AuthSession.UpdateOneID(sessionID).
SetClientStates(encodedStates).
SetLastActivity(newSession.LastActivity).
SetIPAddress(newSession.IPAddress).
SetUserAgent(newSession.UserAgent).
Save(ctx)
if err != nil {
return rollback(tx, "update auth session updating: %w", err)
}
if err = tx.Commit(); err != nil {
return rollback(tx, "update auth session commit: %w", err)
}
return nil
}

2
storage/ent/client/client.go

@ -16,6 +16,7 @@ func (d *Database) CreateClient(ctx context.Context, client storage.Client) erro
SetLogoURL(client.LogoURL).
SetRedirectUris(client.RedirectURIs).
SetTrustedPeers(client.TrustedPeers).
SetAllowedConnectors(client.AllowedConnectors).
Save(ctx)
if err != nil {
return convertDBError("create oauth2 client: %w", err)
@ -79,6 +80,7 @@ func (d *Database) UpdateClient(ctx context.Context, id string, updater func(old
SetLogoURL(newClient.LogoURL).
SetRedirectUris(newClient.RedirectURIs).
SetTrustedPeers(newClient.TrustedPeers).
SetAllowedConnectors(newClient.AllowedConnectors).
Save(ctx)
if err != nil {
return rollback(tx, "update client uploading: %w", err)

2
storage/ent/client/connector.go

@ -14,6 +14,7 @@ func (d *Database) CreateConnector(ctx context.Context, connector storage.Connec
SetType(connector.Type).
SetResourceVersion(connector.ResourceVersion).
SetConfig(connector.Config).
SetGrantTypes(connector.GrantTypes).
Save(ctx)
if err != nil {
return convertDBError("create connector: %w", err)
@ -75,6 +76,7 @@ func (d *Database) UpdateConnector(ctx context.Context, id string, updater func(
SetType(newConnector.Type).
SetResourceVersion(newConnector.ResourceVersion).
SetConfig(newConnector.Config).
SetGrantTypes(newConnector.GrantTypes).
Save(ctx)
if err != nil {
return rollback(tx, "update connector uploading: %w", err)

8
storage/ent/client/offlinesession.go

@ -15,7 +15,7 @@ func (d *Database) CreateOfflineSessions(ctx context.Context, session storage.Of
return fmt.Errorf("encode refresh offline session: %w", err)
}
id := offlineSessionID(session.UserID, session.ConnID, d.hasher)
id := compositeKeyID(session.UserID, session.ConnID, d.hasher)
_, err = d.client.OfflineSession.Create().
SetID(id).
SetUserID(session.UserID).
@ -31,7 +31,7 @@ func (d *Database) CreateOfflineSessions(ctx context.Context, session storage.Of
// GetOfflineSessions extracts an offline session from the database by user id and connector id.
func (d *Database) GetOfflineSessions(ctx context.Context, userID, connID string) (storage.OfflineSessions, error) {
id := offlineSessionID(userID, connID, d.hasher)
id := compositeKeyID(userID, connID, d.hasher)
offlineSession, err := d.client.OfflineSession.Get(ctx, id)
if err != nil {
@ -42,7 +42,7 @@ func (d *Database) GetOfflineSessions(ctx context.Context, userID, connID string
// DeleteOfflineSessions deletes an offline session from the database by user id and connector id.
func (d *Database) DeleteOfflineSessions(ctx context.Context, userID, connID string) error {
id := offlineSessionID(userID, connID, d.hasher)
id := compositeKeyID(userID, connID, d.hasher)
err := d.client.OfflineSession.DeleteOneID(id).Exec(ctx)
if err != nil {
@ -53,7 +53,7 @@ func (d *Database) DeleteOfflineSessions(ctx context.Context, userID, connID str
// UpdateOfflineSessions changes an offline session by user id and connector id using an updater function.
func (d *Database) UpdateOfflineSessions(ctx context.Context, userID string, connID string, updater func(s storage.OfflineSessions) (storage.OfflineSessions, error)) error {
id := offlineSessionID(userID, connID, d.hasher)
id := compositeKeyID(userID, connID, d.hasher)
tx, err := d.BeginTx(ctx)
if err != nil {

79
storage/ent/client/types.go

@ -76,22 +76,24 @@ func toStorageAuthCode(a *db.AuthCode) storage.AuthCode {
func toStorageClient(c *db.OAuth2Client) storage.Client {
return storage.Client{
ID: c.ID,
Secret: c.Secret,
RedirectURIs: c.RedirectUris,
TrustedPeers: c.TrustedPeers,
Public: c.Public,
Name: c.Name,
LogoURL: c.LogoURL,
ID: c.ID,
Secret: c.Secret,
RedirectURIs: c.RedirectUris,
TrustedPeers: c.TrustedPeers,
Public: c.Public,
Name: c.Name,
LogoURL: c.LogoURL,
AllowedConnectors: c.AllowedConnectors,
}
}
func toStorageConnector(c *db.Connector) storage.Connector {
return storage.Connector{
ID: c.ID,
Type: c.Type,
Name: c.Name,
Config: c.Config,
ID: c.ID,
Type: c.Type,
Name: c.Name,
Config: c.Config,
GrantTypes: c.GrantTypes,
}
}
@ -161,6 +163,61 @@ func toStorageDeviceRequest(r *db.DeviceRequest) storage.DeviceRequest {
}
}
func toStorageUserIdentity(u *db.UserIdentity) storage.UserIdentity {
s := storage.UserIdentity{
UserID: u.UserID,
ConnectorID: u.ConnectorID,
Claims: storage.Claims{
UserID: u.ClaimsUserID,
Username: u.ClaimsUsername,
PreferredUsername: u.ClaimsPreferredUsername,
Email: u.ClaimsEmail,
EmailVerified: u.ClaimsEmailVerified,
Groups: u.ClaimsGroups,
},
CreatedAt: u.CreatedAt,
LastLogin: u.LastLogin,
BlockedUntil: u.BlockedUntil,
}
if u.Consents != nil {
if err := json.Unmarshal(u.Consents, &s.Consents); err != nil {
// Correctness of json structure is guaranteed on uploading
panic(err)
}
if s.Consents == nil {
// Ensure Consents is non-nil even if JSON was "null".
s.Consents = make(map[string][]string)
}
} else {
// Server code assumes this will be non-nil.
s.Consents = make(map[string][]string)
}
return s
}
func toStorageAuthSession(s *db.AuthSession) storage.AuthSession {
result := storage.AuthSession{
ID: s.ID,
CreatedAt: s.CreatedAt,
LastActivity: s.LastActivity,
IPAddress: s.IPAddress,
UserAgent: s.UserAgent,
}
if s.ClientStates != nil {
if err := json.Unmarshal(s.ClientStates, &result.ClientStates); err != nil {
panic(err)
}
if result.ClientStates == nil {
result.ClientStates = make(map[string]*storage.ClientAuthState)
}
} else {
result.ClientStates = make(map[string]*storage.ClientAuthState)
}
return result
}
func toStorageDeviceToken(t *db.DeviceToken) storage.DeviceToken {
return storage.DeviceToken{
DeviceCode: t.DeviceCode,

130
storage/ent/client/useridentity.go

@ -0,0 +1,130 @@
package client
import (
"context"
"encoding/json"
"fmt"
"github.com/dexidp/dex/storage"
)
// CreateUserIdentity saves provided user identity into the database.
func (d *Database) CreateUserIdentity(ctx context.Context, identity storage.UserIdentity) error {
if identity.Consents == nil {
identity.Consents = make(map[string][]string)
}
encodedConsents, err := json.Marshal(identity.Consents)
if err != nil {
return fmt.Errorf("encode consents user identity: %w", err)
}
id := compositeKeyID(identity.UserID, identity.ConnectorID, d.hasher)
_, err = d.client.UserIdentity.Create().
SetID(id).
SetUserID(identity.UserID).
SetConnectorID(identity.ConnectorID).
SetClaimsUserID(identity.Claims.UserID).
SetClaimsUsername(identity.Claims.Username).
SetClaimsPreferredUsername(identity.Claims.PreferredUsername).
SetClaimsEmail(identity.Claims.Email).
SetClaimsEmailVerified(identity.Claims.EmailVerified).
SetClaimsGroups(identity.Claims.Groups).
SetConsents(encodedConsents).
SetCreatedAt(identity.CreatedAt).
SetLastLogin(identity.LastLogin).
SetBlockedUntil(identity.BlockedUntil).
Save(ctx)
if err != nil {
return convertDBError("create user identity: %w", err)
}
return nil
}
// GetUserIdentity extracts a user identity from the database by user id and connector id.
func (d *Database) GetUserIdentity(ctx context.Context, userID, connectorID string) (storage.UserIdentity, error) {
id := compositeKeyID(userID, connectorID, d.hasher)
userIdentity, err := d.client.UserIdentity.Get(ctx, id)
if err != nil {
return storage.UserIdentity{}, convertDBError("get user identity: %w", err)
}
return toStorageUserIdentity(userIdentity), nil
}
// DeleteUserIdentity deletes a user identity from the database by user id and connector id.
func (d *Database) DeleteUserIdentity(ctx context.Context, userID, connectorID string) error {
id := compositeKeyID(userID, connectorID, d.hasher)
err := d.client.UserIdentity.DeleteOneID(id).Exec(ctx)
if err != nil {
return convertDBError("delete user identity: %w", err)
}
return nil
}
// UpdateUserIdentity changes a user identity by user id and connector id using an updater function.
func (d *Database) UpdateUserIdentity(ctx context.Context, userID string, connectorID string, updater func(u storage.UserIdentity) (storage.UserIdentity, error)) error {
id := compositeKeyID(userID, connectorID, d.hasher)
tx, err := d.BeginTx(ctx)
if err != nil {
return convertDBError("update user identity tx: %w", err)
}
userIdentity, err := tx.UserIdentity.Get(ctx, id)
if err != nil {
return rollback(tx, "update user identity database: %w", err)
}
newUserIdentity, err := updater(toStorageUserIdentity(userIdentity))
if err != nil {
return rollback(tx, "update user identity updating: %w", err)
}
if newUserIdentity.Consents == nil {
newUserIdentity.Consents = make(map[string][]string)
}
encodedConsents, err := json.Marshal(newUserIdentity.Consents)
if err != nil {
return rollback(tx, "encode consents user identity: %w", err)
}
_, err = tx.UserIdentity.UpdateOneID(id).
SetUserID(newUserIdentity.UserID).
SetConnectorID(newUserIdentity.ConnectorID).
SetClaimsUserID(newUserIdentity.Claims.UserID).
SetClaimsUsername(newUserIdentity.Claims.Username).
SetClaimsPreferredUsername(newUserIdentity.Claims.PreferredUsername).
SetClaimsEmail(newUserIdentity.Claims.Email).
SetClaimsEmailVerified(newUserIdentity.Claims.EmailVerified).
SetClaimsGroups(newUserIdentity.Claims.Groups).
SetConsents(encodedConsents).
SetCreatedAt(newUserIdentity.CreatedAt).
SetLastLogin(newUserIdentity.LastLogin).
SetBlockedUntil(newUserIdentity.BlockedUntil).
Save(ctx)
if err != nil {
return rollback(tx, "update user identity uploading: %w", err)
}
if err = tx.Commit(); err != nil {
return rollback(tx, "update user identity commit: %w", err)
}
return nil
}
// ListUserIdentities lists all user identities in the database.
func (d *Database) ListUserIdentities(ctx context.Context) ([]storage.UserIdentity, error) {
userIdentities, err := d.client.UserIdentity.Query().All(ctx)
if err != nil {
return nil, convertDBError("list user identities: %w", err)
}
storageUserIdentities := make([]storage.UserIdentity, 0, len(userIdentities))
for _, u := range userIdentities {
storageUserIdentities = append(storageUserIdentities, toStorageUserIdentity(u))
}
return storageUserIdentities, nil
}

8
storage/ent/client/utils.go

@ -32,13 +32,13 @@ func convertDBError(t string, err error) error {
return fmt.Errorf(t, err)
}
// compose hashed id from user and connection id to use it as primary key
// compositeKeyID composes a hashed id from two key parts to use as primary key.
// ent doesn't support multi-key primary yet
// https://github.com/facebook/ent/issues/400
func offlineSessionID(userID string, connID string, hasher func() hash.Hash) string {
func compositeKeyID(first string, second string, hasher func() hash.Hash) string {
h := hasher()
h.Write([]byte(userID))
h.Write([]byte(connID))
h.Write([]byte(first))
h.Write([]byte(second))
return fmt.Sprintf("%x", h.Sum(nil))
}

150
storage/ent/db/authsession.go

@ -0,0 +1,150 @@
// Code generated by ent, DO NOT EDIT.
package db
import (
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/dexidp/dex/storage/ent/db/authsession"
)
// AuthSession is the model entity for the AuthSession schema.
type AuthSession struct {
config `json:"-"`
// ID of the ent.
ID string `json:"id,omitempty"`
// ClientStates holds the value of the "client_states" field.
ClientStates []byte `json:"client_states,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// LastActivity holds the value of the "last_activity" field.
LastActivity time.Time `json:"last_activity,omitempty"`
// IPAddress holds the value of the "ip_address" field.
IPAddress string `json:"ip_address,omitempty"`
// UserAgent holds the value of the "user_agent" field.
UserAgent string `json:"user_agent,omitempty"`
selectValues sql.SelectValues
}
// scanValues returns the types for scanning values from sql.Rows.
func (*AuthSession) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case authsession.FieldClientStates:
values[i] = new([]byte)
case authsession.FieldID, authsession.FieldIPAddress, authsession.FieldUserAgent:
values[i] = new(sql.NullString)
case authsession.FieldCreatedAt, authsession.FieldLastActivity:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the AuthSession fields.
func (_m *AuthSession) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case authsession.FieldID:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field id", values[i])
} else if value.Valid {
_m.ID = value.String
}
case authsession.FieldClientStates:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field client_states", values[i])
} else if value != nil {
_m.ClientStates = *value
}
case authsession.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
_m.CreatedAt = value.Time
}
case authsession.FieldLastActivity:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field last_activity", values[i])
} else if value.Valid {
_m.LastActivity = value.Time
}
case authsession.FieldIPAddress:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field ip_address", values[i])
} else if value.Valid {
_m.IPAddress = value.String
}
case authsession.FieldUserAgent:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field user_agent", values[i])
} else if value.Valid {
_m.UserAgent = value.String
}
default:
_m.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the AuthSession.
// This includes values selected through modifiers, order, etc.
func (_m *AuthSession) Value(name string) (ent.Value, error) {
return _m.selectValues.Get(name)
}
// Update returns a builder for updating this AuthSession.
// Note that you need to call AuthSession.Unwrap() before calling this method if this AuthSession
// was returned from a transaction, and the transaction was committed or rolled back.
func (_m *AuthSession) Update() *AuthSessionUpdateOne {
return NewAuthSessionClient(_m.config).UpdateOne(_m)
}
// Unwrap unwraps the AuthSession entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (_m *AuthSession) Unwrap() *AuthSession {
_tx, ok := _m.config.driver.(*txDriver)
if !ok {
panic("db: AuthSession is not a transactional entity")
}
_m.config.driver = _tx.drv
return _m
}
// String implements the fmt.Stringer.
func (_m *AuthSession) String() string {
var builder strings.Builder
builder.WriteString("AuthSession(")
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
builder.WriteString("client_states=")
builder.WriteString(fmt.Sprintf("%v", _m.ClientStates))
builder.WriteString(", ")
builder.WriteString("created_at=")
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("last_activity=")
builder.WriteString(_m.LastActivity.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("ip_address=")
builder.WriteString(_m.IPAddress)
builder.WriteString(", ")
builder.WriteString("user_agent=")
builder.WriteString(_m.UserAgent)
builder.WriteByte(')')
return builder.String()
}
// AuthSessions is a parsable slice of AuthSession.
type AuthSessions []*AuthSession

83
storage/ent/db/authsession/authsession.go

@ -0,0 +1,83 @@
// Code generated by ent, DO NOT EDIT.
package authsession
import (
"entgo.io/ent/dialect/sql"
)
const (
// Label holds the string label denoting the authsession type in the database.
Label = "auth_session"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldClientStates holds the string denoting the client_states field in the database.
FieldClientStates = "client_states"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldLastActivity holds the string denoting the last_activity field in the database.
FieldLastActivity = "last_activity"
// FieldIPAddress holds the string denoting the ip_address field in the database.
FieldIPAddress = "ip_address"
// FieldUserAgent holds the string denoting the user_agent field in the database.
FieldUserAgent = "user_agent"
// Table holds the table name of the authsession in the database.
Table = "auth_sessions"
)
// Columns holds all SQL columns for authsession fields.
var Columns = []string{
FieldID,
FieldClientStates,
FieldCreatedAt,
FieldLastActivity,
FieldIPAddress,
FieldUserAgent,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
var (
// DefaultIPAddress holds the default value on creation for the "ip_address" field.
DefaultIPAddress string
// DefaultUserAgent holds the default value on creation for the "user_agent" field.
DefaultUserAgent string
// IDValidator is a validator for the "id" field. It is called by the builders before save.
IDValidator func(string) error
)
// OrderOption defines the ordering options for the AuthSession queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByLastActivity orders the results by the last_activity field.
func ByLastActivity(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLastActivity, opts...).ToFunc()
}
// ByIPAddress orders the results by the ip_address field.
func ByIPAddress(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIPAddress, opts...).ToFunc()
}
// ByUserAgent orders the results by the user_agent field.
func ByUserAgent(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUserAgent, opts...).ToFunc()
}

355
storage/ent/db/authsession/where.go

@ -0,0 +1,355 @@
// Code generated by ent, DO NOT EDIT.
package authsession
import (
"time"
"entgo.io/ent/dialect/sql"
"github.com/dexidp/dex/storage/ent/db/predicate"
)
// ID filters vertices based on their ID field.
func ID(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLTE(FieldID, id))
}
// IDEqualFold applies the EqualFold predicate on the ID field.
func IDEqualFold(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEqualFold(FieldID, id))
}
// IDContainsFold applies the ContainsFold predicate on the ID field.
func IDContainsFold(id string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldContainsFold(FieldID, id))
}
// ClientStates applies equality check predicate on the "client_states" field. It's identical to ClientStatesEQ.
func ClientStates(v []byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldClientStates, v))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldCreatedAt, v))
}
// LastActivity applies equality check predicate on the "last_activity" field. It's identical to LastActivityEQ.
func LastActivity(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldLastActivity, v))
}
// IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ.
func IPAddress(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldIPAddress, v))
}
// UserAgent applies equality check predicate on the "user_agent" field. It's identical to UserAgentEQ.
func UserAgent(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldUserAgent, v))
}
// ClientStatesEQ applies the EQ predicate on the "client_states" field.
func ClientStatesEQ(v []byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldClientStates, v))
}
// ClientStatesNEQ applies the NEQ predicate on the "client_states" field.
func ClientStatesNEQ(v []byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNEQ(FieldClientStates, v))
}
// ClientStatesIn applies the In predicate on the "client_states" field.
func ClientStatesIn(vs ...[]byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldIn(FieldClientStates, vs...))
}
// ClientStatesNotIn applies the NotIn predicate on the "client_states" field.
func ClientStatesNotIn(vs ...[]byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNotIn(FieldClientStates, vs...))
}
// ClientStatesGT applies the GT predicate on the "client_states" field.
func ClientStatesGT(v []byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGT(FieldClientStates, v))
}
// ClientStatesGTE applies the GTE predicate on the "client_states" field.
func ClientStatesGTE(v []byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGTE(FieldClientStates, v))
}
// ClientStatesLT applies the LT predicate on the "client_states" field.
func ClientStatesLT(v []byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLT(FieldClientStates, v))
}
// ClientStatesLTE applies the LTE predicate on the "client_states" field.
func ClientStatesLTE(v []byte) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLTE(FieldClientStates, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLTE(FieldCreatedAt, v))
}
// LastActivityEQ applies the EQ predicate on the "last_activity" field.
func LastActivityEQ(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldLastActivity, v))
}
// LastActivityNEQ applies the NEQ predicate on the "last_activity" field.
func LastActivityNEQ(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNEQ(FieldLastActivity, v))
}
// LastActivityIn applies the In predicate on the "last_activity" field.
func LastActivityIn(vs ...time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldIn(FieldLastActivity, vs...))
}
// LastActivityNotIn applies the NotIn predicate on the "last_activity" field.
func LastActivityNotIn(vs ...time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNotIn(FieldLastActivity, vs...))
}
// LastActivityGT applies the GT predicate on the "last_activity" field.
func LastActivityGT(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGT(FieldLastActivity, v))
}
// LastActivityGTE applies the GTE predicate on the "last_activity" field.
func LastActivityGTE(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGTE(FieldLastActivity, v))
}
// LastActivityLT applies the LT predicate on the "last_activity" field.
func LastActivityLT(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLT(FieldLastActivity, v))
}
// LastActivityLTE applies the LTE predicate on the "last_activity" field.
func LastActivityLTE(v time.Time) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLTE(FieldLastActivity, v))
}
// IPAddressEQ applies the EQ predicate on the "ip_address" field.
func IPAddressEQ(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldIPAddress, v))
}
// IPAddressNEQ applies the NEQ predicate on the "ip_address" field.
func IPAddressNEQ(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNEQ(FieldIPAddress, v))
}
// IPAddressIn applies the In predicate on the "ip_address" field.
func IPAddressIn(vs ...string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldIn(FieldIPAddress, vs...))
}
// IPAddressNotIn applies the NotIn predicate on the "ip_address" field.
func IPAddressNotIn(vs ...string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNotIn(FieldIPAddress, vs...))
}
// IPAddressGT applies the GT predicate on the "ip_address" field.
func IPAddressGT(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGT(FieldIPAddress, v))
}
// IPAddressGTE applies the GTE predicate on the "ip_address" field.
func IPAddressGTE(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGTE(FieldIPAddress, v))
}
// IPAddressLT applies the LT predicate on the "ip_address" field.
func IPAddressLT(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLT(FieldIPAddress, v))
}
// IPAddressLTE applies the LTE predicate on the "ip_address" field.
func IPAddressLTE(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLTE(FieldIPAddress, v))
}
// IPAddressContains applies the Contains predicate on the "ip_address" field.
func IPAddressContains(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldContains(FieldIPAddress, v))
}
// IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field.
func IPAddressHasPrefix(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldHasPrefix(FieldIPAddress, v))
}
// IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field.
func IPAddressHasSuffix(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldHasSuffix(FieldIPAddress, v))
}
// IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field.
func IPAddressEqualFold(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEqualFold(FieldIPAddress, v))
}
// IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field.
func IPAddressContainsFold(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldContainsFold(FieldIPAddress, v))
}
// UserAgentEQ applies the EQ predicate on the "user_agent" field.
func UserAgentEQ(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEQ(FieldUserAgent, v))
}
// UserAgentNEQ applies the NEQ predicate on the "user_agent" field.
func UserAgentNEQ(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNEQ(FieldUserAgent, v))
}
// UserAgentIn applies the In predicate on the "user_agent" field.
func UserAgentIn(vs ...string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldIn(FieldUserAgent, vs...))
}
// UserAgentNotIn applies the NotIn predicate on the "user_agent" field.
func UserAgentNotIn(vs ...string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldNotIn(FieldUserAgent, vs...))
}
// UserAgentGT applies the GT predicate on the "user_agent" field.
func UserAgentGT(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGT(FieldUserAgent, v))
}
// UserAgentGTE applies the GTE predicate on the "user_agent" field.
func UserAgentGTE(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldGTE(FieldUserAgent, v))
}
// UserAgentLT applies the LT predicate on the "user_agent" field.
func UserAgentLT(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLT(FieldUserAgent, v))
}
// UserAgentLTE applies the LTE predicate on the "user_agent" field.
func UserAgentLTE(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldLTE(FieldUserAgent, v))
}
// UserAgentContains applies the Contains predicate on the "user_agent" field.
func UserAgentContains(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldContains(FieldUserAgent, v))
}
// UserAgentHasPrefix applies the HasPrefix predicate on the "user_agent" field.
func UserAgentHasPrefix(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldHasPrefix(FieldUserAgent, v))
}
// UserAgentHasSuffix applies the HasSuffix predicate on the "user_agent" field.
func UserAgentHasSuffix(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldHasSuffix(FieldUserAgent, v))
}
// UserAgentEqualFold applies the EqualFold predicate on the "user_agent" field.
func UserAgentEqualFold(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldEqualFold(FieldUserAgent, v))
}
// UserAgentContainsFold applies the ContainsFold predicate on the "user_agent" field.
func UserAgentContainsFold(v string) predicate.AuthSession {
return predicate.AuthSession(sql.FieldContainsFold(FieldUserAgent, v))
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.AuthSession) predicate.AuthSession {
return predicate.AuthSession(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.AuthSession) predicate.AuthSession {
return predicate.AuthSession(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.AuthSession) predicate.AuthSession {
return predicate.AuthSession(sql.NotPredicates(p))
}

282
storage/ent/db/authsession_create.go

@ -0,0 +1,282 @@
// Code generated by ent, DO NOT EDIT.
package db
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/dexidp/dex/storage/ent/db/authsession"
)
// AuthSessionCreate is the builder for creating a AuthSession entity.
type AuthSessionCreate struct {
config
mutation *AuthSessionMutation
hooks []Hook
}
// SetClientStates sets the "client_states" field.
func (_c *AuthSessionCreate) SetClientStates(v []byte) *AuthSessionCreate {
_c.mutation.SetClientStates(v)
return _c
}
// SetCreatedAt sets the "created_at" field.
func (_c *AuthSessionCreate) SetCreatedAt(v time.Time) *AuthSessionCreate {
_c.mutation.SetCreatedAt(v)
return _c
}
// SetLastActivity sets the "last_activity" field.
func (_c *AuthSessionCreate) SetLastActivity(v time.Time) *AuthSessionCreate {
_c.mutation.SetLastActivity(v)
return _c
}
// SetIPAddress sets the "ip_address" field.
func (_c *AuthSessionCreate) SetIPAddress(v string) *AuthSessionCreate {
_c.mutation.SetIPAddress(v)
return _c
}
// SetNillableIPAddress sets the "ip_address" field if the given value is not nil.
func (_c *AuthSessionCreate) SetNillableIPAddress(v *string) *AuthSessionCreate {
if v != nil {
_c.SetIPAddress(*v)
}
return _c
}
// SetUserAgent sets the "user_agent" field.
func (_c *AuthSessionCreate) SetUserAgent(v string) *AuthSessionCreate {
_c.mutation.SetUserAgent(v)
return _c
}
// SetNillableUserAgent sets the "user_agent" field if the given value is not nil.
func (_c *AuthSessionCreate) SetNillableUserAgent(v *string) *AuthSessionCreate {
if v != nil {
_c.SetUserAgent(*v)
}
return _c
}
// SetID sets the "id" field.
func (_c *AuthSessionCreate) SetID(v string) *AuthSessionCreate {
_c.mutation.SetID(v)
return _c
}
// Mutation returns the AuthSessionMutation object of the builder.
func (_c *AuthSessionCreate) Mutation() *AuthSessionMutation {
return _c.mutation
}
// Save creates the AuthSession in the database.
func (_c *AuthSessionCreate) Save(ctx context.Context) (*AuthSession, error) {
_c.defaults()
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
}
// SaveX calls Save and panics if Save returns an error.
func (_c *AuthSessionCreate) SaveX(ctx context.Context) *AuthSession {
v, err := _c.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (_c *AuthSessionCreate) Exec(ctx context.Context) error {
_, err := _c.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (_c *AuthSessionCreate) ExecX(ctx context.Context) {
if err := _c.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (_c *AuthSessionCreate) defaults() {
if _, ok := _c.mutation.IPAddress(); !ok {
v := authsession.DefaultIPAddress
_c.mutation.SetIPAddress(v)
}
if _, ok := _c.mutation.UserAgent(); !ok {
v := authsession.DefaultUserAgent
_c.mutation.SetUserAgent(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (_c *AuthSessionCreate) check() error {
if _, ok := _c.mutation.ClientStates(); !ok {
return &ValidationError{Name: "client_states", err: errors.New(`db: missing required field "AuthSession.client_states"`)}
}
if _, ok := _c.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "AuthSession.created_at"`)}
}
if _, ok := _c.mutation.LastActivity(); !ok {
return &ValidationError{Name: "last_activity", err: errors.New(`db: missing required field "AuthSession.last_activity"`)}
}
if _, ok := _c.mutation.IPAddress(); !ok {
return &ValidationError{Name: "ip_address", err: errors.New(`db: missing required field "AuthSession.ip_address"`)}
}
if _, ok := _c.mutation.UserAgent(); !ok {
return &ValidationError{Name: "user_agent", err: errors.New(`db: missing required field "AuthSession.user_agent"`)}
}
if v, ok := _c.mutation.ID(); ok {
if err := authsession.IDValidator(v); err != nil {
return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "AuthSession.id": %w`, err)}
}
}
return nil
}
func (_c *AuthSessionCreate) sqlSave(ctx context.Context) (*AuthSession, error) {
if err := _c.check(); err != nil {
return nil, err
}
_node, _spec := _c.createSpec()
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
if _spec.ID.Value != nil {
if id, ok := _spec.ID.Value.(string); ok {
_node.ID = id
} else {
return nil, fmt.Errorf("unexpected AuthSession.ID type: %T", _spec.ID.Value)
}
}
_c.mutation.id = &_node.ID
_c.mutation.done = true
return _node, nil
}
func (_c *AuthSessionCreate) createSpec() (*AuthSession, *sqlgraph.CreateSpec) {
var (
_node = &AuthSession{config: _c.config}
_spec = sqlgraph.NewCreateSpec(authsession.Table, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString))
)
if id, ok := _c.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = id
}
if value, ok := _c.mutation.ClientStates(); ok {
_spec.SetField(authsession.FieldClientStates, field.TypeBytes, value)
_node.ClientStates = value
}
if value, ok := _c.mutation.CreatedAt(); ok {
_spec.SetField(authsession.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := _c.mutation.LastActivity(); ok {
_spec.SetField(authsession.FieldLastActivity, field.TypeTime, value)
_node.LastActivity = value
}
if value, ok := _c.mutation.IPAddress(); ok {
_spec.SetField(authsession.FieldIPAddress, field.TypeString, value)
_node.IPAddress = value
}
if value, ok := _c.mutation.UserAgent(); ok {
_spec.SetField(authsession.FieldUserAgent, field.TypeString, value)
_node.UserAgent = value
}
return _node, _spec
}
// AuthSessionCreateBulk is the builder for creating many AuthSession entities in bulk.
type AuthSessionCreateBulk struct {
config
err error
builders []*AuthSessionCreate
}
// Save creates the AuthSession entities in the database.
func (_c *AuthSessionCreateBulk) Save(ctx context.Context) ([]*AuthSession, error) {
if _c.err != nil {
return nil, _c.err
}
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
nodes := make([]*AuthSession, len(_c.builders))
mutators := make([]Mutator, len(_c.builders))
for i := range _c.builders {
func(i int, root context.Context) {
builder := _c.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*AuthSessionMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (_c *AuthSessionCreateBulk) SaveX(ctx context.Context) []*AuthSession {
v, err := _c.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (_c *AuthSessionCreateBulk) Exec(ctx context.Context) error {
_, err := _c.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (_c *AuthSessionCreateBulk) ExecX(ctx context.Context) {
if err := _c.Exec(ctx); err != nil {
panic(err)
}
}

88
storage/ent/db/authsession_delete.go

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package db
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/dexidp/dex/storage/ent/db/authsession"
"github.com/dexidp/dex/storage/ent/db/predicate"
)
// AuthSessionDelete is the builder for deleting a AuthSession entity.
type AuthSessionDelete struct {
config
hooks []Hook
mutation *AuthSessionMutation
}
// Where appends a list predicates to the AuthSessionDelete builder.
func (_d *AuthSessionDelete) Where(ps ...predicate.AuthSession) *AuthSessionDelete {
_d.mutation.Where(ps...)
return _d
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (_d *AuthSessionDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (_d *AuthSessionDelete) ExecX(ctx context.Context) int {
n, err := _d.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (_d *AuthSessionDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(authsession.Table, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString))
if ps := _d.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
_d.mutation.done = true
return affected, err
}
// AuthSessionDeleteOne is the builder for deleting a single AuthSession entity.
type AuthSessionDeleteOne struct {
_d *AuthSessionDelete
}
// Where appends a list predicates to the AuthSessionDelete builder.
func (_d *AuthSessionDeleteOne) Where(ps ...predicate.AuthSession) *AuthSessionDeleteOne {
_d._d.mutation.Where(ps...)
return _d
}
// Exec executes the deletion query.
func (_d *AuthSessionDeleteOne) Exec(ctx context.Context) error {
n, err := _d._d.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{authsession.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (_d *AuthSessionDeleteOne) ExecX(ctx context.Context) {
if err := _d.Exec(ctx); err != nil {
panic(err)
}
}

527
storage/ent/db/authsession_query.go

@ -0,0 +1,527 @@
// Code generated by ent, DO NOT EDIT.
package db
import (
"context"
"fmt"
"math"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/dexidp/dex/storage/ent/db/authsession"
"github.com/dexidp/dex/storage/ent/db/predicate"
)
// AuthSessionQuery is the builder for querying AuthSession entities.
type AuthSessionQuery struct {
config
ctx *QueryContext
order []authsession.OrderOption
inters []Interceptor
predicates []predicate.AuthSession
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the AuthSessionQuery builder.
func (_q *AuthSessionQuery) Where(ps ...predicate.AuthSession) *AuthSessionQuery {
_q.predicates = append(_q.predicates, ps...)
return _q
}
// Limit the number of records to be returned by this query.
func (_q *AuthSessionQuery) Limit(limit int) *AuthSessionQuery {
_q.ctx.Limit = &limit
return _q
}
// Offset to start from.
func (_q *AuthSessionQuery) Offset(offset int) *AuthSessionQuery {
_q.ctx.Offset = &offset
return _q
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (_q *AuthSessionQuery) Unique(unique bool) *AuthSessionQuery {
_q.ctx.Unique = &unique
return _q
}
// Order specifies how the records should be ordered.
func (_q *AuthSessionQuery) Order(o ...authsession.OrderOption) *AuthSessionQuery {
_q.order = append(_q.order, o...)
return _q
}
// First returns the first AuthSession entity from the query.
// Returns a *NotFoundError when no AuthSession was found.
func (_q *AuthSessionQuery) First(ctx context.Context) (*AuthSession, error) {
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{authsession.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (_q *AuthSessionQuery) FirstX(ctx context.Context) *AuthSession {
node, err := _q.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first AuthSession ID from the query.
// Returns a *NotFoundError when no AuthSession ID was found.
func (_q *AuthSessionQuery) FirstID(ctx context.Context) (id string, err error) {
var ids []string
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{authsession.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (_q *AuthSessionQuery) FirstIDX(ctx context.Context) string {
id, err := _q.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single AuthSession entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one AuthSession entity is found.
// Returns a *NotFoundError when no AuthSession entities are found.
func (_q *AuthSessionQuery) Only(ctx context.Context) (*AuthSession, error) {
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{authsession.Label}
default:
return nil, &NotSingularError{authsession.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (_q *AuthSessionQuery) OnlyX(ctx context.Context) *AuthSession {
node, err := _q.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only AuthSession ID in the query.
// Returns a *NotSingularError when more than one AuthSession ID is found.
// Returns a *NotFoundError when no entities are found.
func (_q *AuthSessionQuery) OnlyID(ctx context.Context) (id string, err error) {
var ids []string
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{authsession.Label}
default:
err = &NotSingularError{authsession.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (_q *AuthSessionQuery) OnlyIDX(ctx context.Context) string {
id, err := _q.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of AuthSessions.
func (_q *AuthSessionQuery) All(ctx context.Context) ([]*AuthSession, error) {
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
if err := _q.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*AuthSession, *AuthSessionQuery]()
return withInterceptors[[]*AuthSession](ctx, _q, qr, _q.inters)
}
// AllX is like All, but panics if an error occurs.
func (_q *AuthSessionQuery) AllX(ctx context.Context) []*AuthSession {
nodes, err := _q.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of AuthSession IDs.
func (_q *AuthSessionQuery) IDs(ctx context.Context) (ids []string, err error) {
if _q.ctx.Unique == nil && _q.path != nil {
_q.Unique(true)
}
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
if err = _q.Select(authsession.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (_q *AuthSessionQuery) IDsX(ctx context.Context) []string {
ids, err := _q.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (_q *AuthSessionQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
if err := _q.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, _q, querierCount[*AuthSessionQuery](), _q.inters)
}
// CountX is like Count, but panics if an error occurs.
func (_q *AuthSessionQuery) CountX(ctx context.Context) int {
count, err := _q.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (_q *AuthSessionQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
switch _, err := _q.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("db: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (_q *AuthSessionQuery) ExistX(ctx context.Context) bool {
exist, err := _q.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the AuthSessionQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (_q *AuthSessionQuery) Clone() *AuthSessionQuery {
if _q == nil {
return nil
}
return &AuthSessionQuery{
config: _q.config,
ctx: _q.ctx.Clone(),
order: append([]authsession.OrderOption{}, _q.order...),
inters: append([]Interceptor{}, _q.inters...),
predicates: append([]predicate.AuthSession{}, _q.predicates...),
// clone intermediate query.
sql: _q.sql.Clone(),
path: _q.path,
}
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// ClientStates []byte `json:"client_states,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.AuthSession.Query().
// GroupBy(authsession.FieldClientStates).
// Aggregate(db.Count()).
// Scan(ctx, &v)
func (_q *AuthSessionQuery) GroupBy(field string, fields ...string) *AuthSessionGroupBy {
_q.ctx.Fields = append([]string{field}, fields...)
grbuild := &AuthSessionGroupBy{build: _q}
grbuild.flds = &_q.ctx.Fields
grbuild.label = authsession.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// ClientStates []byte `json:"client_states,omitempty"`
// }
//
// client.AuthSession.Query().
// Select(authsession.FieldClientStates).
// Scan(ctx, &v)
func (_q *AuthSessionQuery) Select(fields ...string) *AuthSessionSelect {
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
sbuild := &AuthSessionSelect{AuthSessionQuery: _q}
sbuild.label = authsession.Label
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a AuthSessionSelect configured with the given aggregations.
func (_q *AuthSessionQuery) Aggregate(fns ...AggregateFunc) *AuthSessionSelect {
return _q.Select().Aggregate(fns...)
}
func (_q *AuthSessionQuery) prepareQuery(ctx context.Context) error {
for _, inter := range _q.inters {
if inter == nil {
return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, _q); err != nil {
return err
}
}
}
for _, f := range _q.ctx.Fields {
if !authsession.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
}
}
if _q.path != nil {
prev, err := _q.path(ctx)
if err != nil {
return err
}
_q.sql = prev
}
return nil
}
func (_q *AuthSessionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthSession, error) {
var (
nodes = []*AuthSession{}
_spec = _q.querySpec()
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*AuthSession).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &AuthSession{config: _q.config}
nodes = append(nodes, node)
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
return nodes, nil
}
func (_q *AuthSessionQuery) sqlCount(ctx context.Context) (int, error) {
_spec := _q.querySpec()
_spec.Node.Columns = _q.ctx.Fields
if len(_q.ctx.Fields) > 0 {
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
}
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
}
func (_q *AuthSessionQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(authsession.Table, authsession.Columns, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString))
_spec.From = _q.sql
if unique := _q.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if _q.path != nil {
_spec.Unique = true
}
if fields := _q.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, authsession.FieldID)
for i := range fields {
if fields[i] != authsession.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := _q.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := _q.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := _q.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := _q.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (_q *AuthSessionQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(_q.driver.Dialect())
t1 := builder.Table(authsession.Table)
columns := _q.ctx.Fields
if len(columns) == 0 {
columns = authsession.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if _q.sql != nil {
selector = _q.sql
selector.Select(selector.Columns(columns...)...)
}
if _q.ctx.Unique != nil && *_q.ctx.Unique {
selector.Distinct()
}
for _, p := range _q.predicates {
p(selector)
}
for _, p := range _q.order {
p(selector)
}
if offset := _q.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := _q.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// AuthSessionGroupBy is the group-by builder for AuthSession entities.
type AuthSessionGroupBy struct {
selector
build *AuthSessionQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (_g *AuthSessionGroupBy) Aggregate(fns ...AggregateFunc) *AuthSessionGroupBy {
_g.fns = append(_g.fns, fns...)
return _g
}
// Scan applies the selector query and scans the result into the given value.
func (_g *AuthSessionGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
if err := _g.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*AuthSessionQuery, *AuthSessionGroupBy](ctx, _g.build, _g, _g.build.inters, v)
}
func (_g *AuthSessionGroupBy) sqlScan(ctx context.Context, root *AuthSessionQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(_g.fns))
for _, fn := range _g.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
for _, f := range *_g.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*_g.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// AuthSessionSelect is the builder for selecting fields of AuthSession entities.
type AuthSessionSelect struct {
*AuthSessionQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (_s *AuthSessionSelect) Aggregate(fns ...AggregateFunc) *AuthSessionSelect {
_s.fns = append(_s.fns, fns...)
return _s
}
// Scan applies the selector query and scans the result into the given value.
func (_s *AuthSessionSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
if err := _s.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*AuthSessionQuery, *AuthSessionSelect](ctx, _s.AuthSessionQuery, _s, _s.inters, v)
}
func (_s *AuthSessionSelect) sqlScan(ctx context.Context, root *AuthSessionQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(_s.fns))
for _, fn := range _s.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*_s.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

330
storage/ent/db/authsession_update.go

@ -0,0 +1,330 @@
// Code generated by ent, DO NOT EDIT.
package db
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/dexidp/dex/storage/ent/db/authsession"
"github.com/dexidp/dex/storage/ent/db/predicate"
)
// AuthSessionUpdate is the builder for updating AuthSession entities.
type AuthSessionUpdate struct {
config
hooks []Hook
mutation *AuthSessionMutation
}
// Where appends a list predicates to the AuthSessionUpdate builder.
func (_u *AuthSessionUpdate) Where(ps ...predicate.AuthSession) *AuthSessionUpdate {
_u.mutation.Where(ps...)
return _u
}
// SetClientStates sets the "client_states" field.
func (_u *AuthSessionUpdate) SetClientStates(v []byte) *AuthSessionUpdate {
_u.mutation.SetClientStates(v)
return _u
}
// SetCreatedAt sets the "created_at" field.
func (_u *AuthSessionUpdate) SetCreatedAt(v time.Time) *AuthSessionUpdate {
_u.mutation.SetCreatedAt(v)
return _u
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (_u *AuthSessionUpdate) SetNillableCreatedAt(v *time.Time) *AuthSessionUpdate {
if v != nil {
_u.SetCreatedAt(*v)
}
return _u
}
// SetLastActivity sets the "last_activity" field.
func (_u *AuthSessionUpdate) SetLastActivity(v time.Time) *AuthSessionUpdate {
_u.mutation.SetLastActivity(v)
return _u
}
// SetNillableLastActivity sets the "last_activity" field if the given value is not nil.
func (_u *AuthSessionUpdate) SetNillableLastActivity(v *time.Time) *AuthSessionUpdate {
if v != nil {
_u.SetLastActivity(*v)
}
return _u
}
// SetIPAddress sets the "ip_address" field.
func (_u *AuthSessionUpdate) SetIPAddress(v string) *AuthSessionUpdate {
_u.mutation.SetIPAddress(v)
return _u
}
// SetNillableIPAddress sets the "ip_address" field if the given value is not nil.
func (_u *AuthSessionUpdate) SetNillableIPAddress(v *string) *AuthSessionUpdate {
if v != nil {
_u.SetIPAddress(*v)
}
return _u
}
// SetUserAgent sets the "user_agent" field.
func (_u *AuthSessionUpdate) SetUserAgent(v string) *AuthSessionUpdate {
_u.mutation.SetUserAgent(v)
return _u
}
// SetNillableUserAgent sets the "user_agent" field if the given value is not nil.
func (_u *AuthSessionUpdate) SetNillableUserAgent(v *string) *AuthSessionUpdate {
if v != nil {
_u.SetUserAgent(*v)
}
return _u
}
// Mutation returns the AuthSessionMutation object of the builder.
func (_u *AuthSessionUpdate) Mutation() *AuthSessionMutation {
return _u.mutation
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (_u *AuthSessionUpdate) Save(ctx context.Context) (int, error) {
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (_u *AuthSessionUpdate) SaveX(ctx context.Context) int {
affected, err := _u.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (_u *AuthSessionUpdate) Exec(ctx context.Context) error {
_, err := _u.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (_u *AuthSessionUpdate) ExecX(ctx context.Context) {
if err := _u.Exec(ctx); err != nil {
panic(err)
}
}
func (_u *AuthSessionUpdate) sqlSave(ctx context.Context) (_node int, err error) {
_spec := sqlgraph.NewUpdateSpec(authsession.Table, authsession.Columns, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString))
if ps := _u.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := _u.mutation.ClientStates(); ok {
_spec.SetField(authsession.FieldClientStates, field.TypeBytes, value)
}
if value, ok := _u.mutation.CreatedAt(); ok {
_spec.SetField(authsession.FieldCreatedAt, field.TypeTime, value)
}
if value, ok := _u.mutation.LastActivity(); ok {
_spec.SetField(authsession.FieldLastActivity, field.TypeTime, value)
}
if value, ok := _u.mutation.IPAddress(); ok {
_spec.SetField(authsession.FieldIPAddress, field.TypeString, value)
}
if value, ok := _u.mutation.UserAgent(); ok {
_spec.SetField(authsession.FieldUserAgent, field.TypeString, value)
}
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{authsession.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
_u.mutation.done = true
return _node, nil
}
// AuthSessionUpdateOne is the builder for updating a single AuthSession entity.
type AuthSessionUpdateOne struct {
config
fields []string
hooks []Hook
mutation *AuthSessionMutation
}
// SetClientStates sets the "client_states" field.
func (_u *AuthSessionUpdateOne) SetClientStates(v []byte) *AuthSessionUpdateOne {
_u.mutation.SetClientStates(v)
return _u
}
// SetCreatedAt sets the "created_at" field.
func (_u *AuthSessionUpdateOne) SetCreatedAt(v time.Time) *AuthSessionUpdateOne {
_u.mutation.SetCreatedAt(v)
return _u
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (_u *AuthSessionUpdateOne) SetNillableCreatedAt(v *time.Time) *AuthSessionUpdateOne {
if v != nil {
_u.SetCreatedAt(*v)
}
return _u
}
// SetLastActivity sets the "last_activity" field.
func (_u *AuthSessionUpdateOne) SetLastActivity(v time.Time) *AuthSessionUpdateOne {
_u.mutation.SetLastActivity(v)
return _u
}
// SetNillableLastActivity sets the "last_activity" field if the given value is not nil.
func (_u *AuthSessionUpdateOne) SetNillableLastActivity(v *time.Time) *AuthSessionUpdateOne {
if v != nil {
_u.SetLastActivity(*v)
}
return _u
}
// SetIPAddress sets the "ip_address" field.
func (_u *AuthSessionUpdateOne) SetIPAddress(v string) *AuthSessionUpdateOne {
_u.mutation.SetIPAddress(v)
return _u
}
// SetNillableIPAddress sets the "ip_address" field if the given value is not nil.
func (_u *AuthSessionUpdateOne) SetNillableIPAddress(v *string) *AuthSessionUpdateOne {
if v != nil {
_u.SetIPAddress(*v)
}
return _u
}
// SetUserAgent sets the "user_agent" field.
func (_u *AuthSessionUpdateOne) SetUserAgent(v string) *AuthSessionUpdateOne {
_u.mutation.SetUserAgent(v)
return _u
}
// SetNillableUserAgent sets the "user_agent" field if the given value is not nil.
func (_u *AuthSessionUpdateOne) SetNillableUserAgent(v *string) *AuthSessionUpdateOne {
if v != nil {
_u.SetUserAgent(*v)
}
return _u
}
// Mutation returns the AuthSessionMutation object of the builder.
func (_u *AuthSessionUpdateOne) Mutation() *AuthSessionMutation {
return _u.mutation
}
// Where appends a list predicates to the AuthSessionUpdate builder.
func (_u *AuthSessionUpdateOne) Where(ps ...predicate.AuthSession) *AuthSessionUpdateOne {
_u.mutation.Where(ps...)
return _u
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (_u *AuthSessionUpdateOne) Select(field string, fields ...string) *AuthSessionUpdateOne {
_u.fields = append([]string{field}, fields...)
return _u
}
// Save executes the query and returns the updated AuthSession entity.
func (_u *AuthSessionUpdateOne) Save(ctx context.Context) (*AuthSession, error) {
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (_u *AuthSessionUpdateOne) SaveX(ctx context.Context) *AuthSession {
node, err := _u.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (_u *AuthSessionUpdateOne) Exec(ctx context.Context) error {
_, err := _u.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (_u *AuthSessionUpdateOne) ExecX(ctx context.Context) {
if err := _u.Exec(ctx); err != nil {
panic(err)
}
}
func (_u *AuthSessionUpdateOne) sqlSave(ctx context.Context) (_node *AuthSession, err error) {
_spec := sqlgraph.NewUpdateSpec(authsession.Table, authsession.Columns, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString))
id, ok := _u.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "AuthSession.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := _u.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, authsession.FieldID)
for _, f := range fields {
if !authsession.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
}
if f != authsession.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := _u.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := _u.mutation.ClientStates(); ok {
_spec.SetField(authsession.FieldClientStates, field.TypeBytes, value)
}
if value, ok := _u.mutation.CreatedAt(); ok {
_spec.SetField(authsession.FieldCreatedAt, field.TypeTime, value)
}
if value, ok := _u.mutation.LastActivity(); ok {
_spec.SetField(authsession.FieldLastActivity, field.TypeTime, value)
}
if value, ok := _u.mutation.IPAddress(); ok {
_spec.SetField(authsession.FieldIPAddress, field.TypeString, value)
}
if value, ok := _u.mutation.UserAgent(); ok {
_spec.SetField(authsession.FieldUserAgent, field.TypeString, value)
}
_node = &AuthSession{config: _u.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{authsession.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
_u.mutation.done = true
return _node, nil
}

301
storage/ent/db/client.go

@ -16,6 +16,7 @@ import (
"entgo.io/ent/dialect/sql"
"github.com/dexidp/dex/storage/ent/db/authcode"
"github.com/dexidp/dex/storage/ent/db/authrequest"
"github.com/dexidp/dex/storage/ent/db/authsession"
"github.com/dexidp/dex/storage/ent/db/connector"
"github.com/dexidp/dex/storage/ent/db/devicerequest"
"github.com/dexidp/dex/storage/ent/db/devicetoken"
@ -24,6 +25,7 @@ import (
"github.com/dexidp/dex/storage/ent/db/offlinesession"
"github.com/dexidp/dex/storage/ent/db/password"
"github.com/dexidp/dex/storage/ent/db/refreshtoken"
"github.com/dexidp/dex/storage/ent/db/useridentity"
)
// Client is the client that holds all ent builders.
@ -35,6 +37,8 @@ type Client struct {
AuthCode *AuthCodeClient
// AuthRequest is the client for interacting with the AuthRequest builders.
AuthRequest *AuthRequestClient
// AuthSession is the client for interacting with the AuthSession builders.
AuthSession *AuthSessionClient
// Connector is the client for interacting with the Connector builders.
Connector *ConnectorClient
// DeviceRequest is the client for interacting with the DeviceRequest builders.
@ -51,6 +55,8 @@ type Client struct {
Password *PasswordClient
// RefreshToken is the client for interacting with the RefreshToken builders.
RefreshToken *RefreshTokenClient
// UserIdentity is the client for interacting with the UserIdentity builders.
UserIdentity *UserIdentityClient
}
// NewClient creates a new client configured with the given options.
@ -64,6 +70,7 @@ func (c *Client) init() {
c.Schema = migrate.NewSchema(c.driver)
c.AuthCode = NewAuthCodeClient(c.config)
c.AuthRequest = NewAuthRequestClient(c.config)
c.AuthSession = NewAuthSessionClient(c.config)
c.Connector = NewConnectorClient(c.config)
c.DeviceRequest = NewDeviceRequestClient(c.config)
c.DeviceToken = NewDeviceTokenClient(c.config)
@ -72,6 +79,7 @@ func (c *Client) init() {
c.OfflineSession = NewOfflineSessionClient(c.config)
c.Password = NewPasswordClient(c.config)
c.RefreshToken = NewRefreshTokenClient(c.config)
c.UserIdentity = NewUserIdentityClient(c.config)
}
type (
@ -166,6 +174,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
config: cfg,
AuthCode: NewAuthCodeClient(cfg),
AuthRequest: NewAuthRequestClient(cfg),
AuthSession: NewAuthSessionClient(cfg),
Connector: NewConnectorClient(cfg),
DeviceRequest: NewDeviceRequestClient(cfg),
DeviceToken: NewDeviceTokenClient(cfg),
@ -174,6 +183,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
OfflineSession: NewOfflineSessionClient(cfg),
Password: NewPasswordClient(cfg),
RefreshToken: NewRefreshTokenClient(cfg),
UserIdentity: NewUserIdentityClient(cfg),
}, nil
}
@ -195,6 +205,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
config: cfg,
AuthCode: NewAuthCodeClient(cfg),
AuthRequest: NewAuthRequestClient(cfg),
AuthSession: NewAuthSessionClient(cfg),
Connector: NewConnectorClient(cfg),
DeviceRequest: NewDeviceRequestClient(cfg),
DeviceToken: NewDeviceTokenClient(cfg),
@ -203,6 +214,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
OfflineSession: NewOfflineSessionClient(cfg),
Password: NewPasswordClient(cfg),
RefreshToken: NewRefreshTokenClient(cfg),
UserIdentity: NewUserIdentityClient(cfg),
}, nil
}
@ -232,8 +244,9 @@ func (c *Client) Close() error {
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) {
for _, n := range []interface{ Use(...Hook) }{
c.AuthCode, c.AuthRequest, c.Connector, c.DeviceRequest, c.DeviceToken, c.Keys,
c.OAuth2Client, c.OfflineSession, c.Password, c.RefreshToken,
c.AuthCode, c.AuthRequest, c.AuthSession, c.Connector, c.DeviceRequest,
c.DeviceToken, c.Keys, c.OAuth2Client, c.OfflineSession, c.Password,
c.RefreshToken, c.UserIdentity,
} {
n.Use(hooks...)
}
@ -243,8 +256,9 @@ func (c *Client) Use(hooks ...Hook) {
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
func (c *Client) Intercept(interceptors ...Interceptor) {
for _, n := range []interface{ Intercept(...Interceptor) }{
c.AuthCode, c.AuthRequest, c.Connector, c.DeviceRequest, c.DeviceToken, c.Keys,
c.OAuth2Client, c.OfflineSession, c.Password, c.RefreshToken,
c.AuthCode, c.AuthRequest, c.AuthSession, c.Connector, c.DeviceRequest,
c.DeviceToken, c.Keys, c.OAuth2Client, c.OfflineSession, c.Password,
c.RefreshToken, c.UserIdentity,
} {
n.Intercept(interceptors...)
}
@ -257,6 +271,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
return c.AuthCode.mutate(ctx, m)
case *AuthRequestMutation:
return c.AuthRequest.mutate(ctx, m)
case *AuthSessionMutation:
return c.AuthSession.mutate(ctx, m)
case *ConnectorMutation:
return c.Connector.mutate(ctx, m)
case *DeviceRequestMutation:
@ -273,6 +289,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
return c.Password.mutate(ctx, m)
case *RefreshTokenMutation:
return c.RefreshToken.mutate(ctx, m)
case *UserIdentityMutation:
return c.UserIdentity.mutate(ctx, m)
default:
return nil, fmt.Errorf("db: unknown mutation type %T", m)
}
@ -544,6 +562,139 @@ func (c *AuthRequestClient) mutate(ctx context.Context, m *AuthRequestMutation)
}
}
// AuthSessionClient is a client for the AuthSession schema.
type AuthSessionClient struct {
config
}
// NewAuthSessionClient returns a client for the AuthSession from the given config.
func NewAuthSessionClient(c config) *AuthSessionClient {
return &AuthSessionClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `authsession.Hooks(f(g(h())))`.
func (c *AuthSessionClient) Use(hooks ...Hook) {
c.hooks.AuthSession = append(c.hooks.AuthSession, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `authsession.Intercept(f(g(h())))`.
func (c *AuthSessionClient) Intercept(interceptors ...Interceptor) {
c.inters.AuthSession = append(c.inters.AuthSession, interceptors...)
}
// Create returns a builder for creating a AuthSession entity.
func (c *AuthSessionClient) Create() *AuthSessionCreate {
mutation := newAuthSessionMutation(c.config, OpCreate)
return &AuthSessionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// CreateBulk returns a builder for creating a bulk of AuthSession entities.
func (c *AuthSessionClient) CreateBulk(builders ...*AuthSessionCreate) *AuthSessionCreateBulk {
return &AuthSessionCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *AuthSessionClient) MapCreateBulk(slice any, setFunc func(*AuthSessionCreate, int)) *AuthSessionCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &AuthSessionCreateBulk{err: fmt.Errorf("calling to AuthSessionClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*AuthSessionCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &AuthSessionCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for AuthSession.
func (c *AuthSessionClient) Update() *AuthSessionUpdate {
mutation := newAuthSessionMutation(c.config, OpUpdate)
return &AuthSessionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *AuthSessionClient) UpdateOne(_m *AuthSession) *AuthSessionUpdateOne {
mutation := newAuthSessionMutation(c.config, OpUpdateOne, withAuthSession(_m))
return &AuthSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *AuthSessionClient) UpdateOneID(id string) *AuthSessionUpdateOne {
mutation := newAuthSessionMutation(c.config, OpUpdateOne, withAuthSessionID(id))
return &AuthSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for AuthSession.
func (c *AuthSessionClient) Delete() *AuthSessionDelete {
mutation := newAuthSessionMutation(c.config, OpDelete)
return &AuthSessionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a builder for deleting the given entity.
func (c *AuthSessionClient) DeleteOne(_m *AuthSession) *AuthSessionDeleteOne {
return c.DeleteOneID(_m.ID)
}
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *AuthSessionClient) DeleteOneID(id string) *AuthSessionDeleteOne {
builder := c.Delete().Where(authsession.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &AuthSessionDeleteOne{builder}
}
// Query returns a query builder for AuthSession.
func (c *AuthSessionClient) Query() *AuthSessionQuery {
return &AuthSessionQuery{
config: c.config,
ctx: &QueryContext{Type: TypeAuthSession},
inters: c.Interceptors(),
}
}
// Get returns a AuthSession entity by its id.
func (c *AuthSessionClient) Get(ctx context.Context, id string) (*AuthSession, error) {
return c.Query().Where(authsession.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *AuthSessionClient) GetX(ctx context.Context, id string) *AuthSession {
obj, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return obj
}
// Hooks returns the client hooks.
func (c *AuthSessionClient) Hooks() []Hook {
return c.hooks.AuthSession
}
// Interceptors returns the client interceptors.
func (c *AuthSessionClient) Interceptors() []Interceptor {
return c.inters.AuthSession
}
func (c *AuthSessionClient) mutate(ctx context.Context, m *AuthSessionMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&AuthSessionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&AuthSessionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&AuthSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&AuthSessionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("db: unknown AuthSession mutation op: %q", m.Op())
}
}
// ConnectorClient is a client for the Connector schema.
type ConnectorClient struct {
config
@ -1608,14 +1759,148 @@ func (c *RefreshTokenClient) mutate(ctx context.Context, m *RefreshTokenMutation
}
}
// UserIdentityClient is a client for the UserIdentity schema.
type UserIdentityClient struct {
config
}
// NewUserIdentityClient returns a client for the UserIdentity from the given config.
func NewUserIdentityClient(c config) *UserIdentityClient {
return &UserIdentityClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `useridentity.Hooks(f(g(h())))`.
func (c *UserIdentityClient) Use(hooks ...Hook) {
c.hooks.UserIdentity = append(c.hooks.UserIdentity, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `useridentity.Intercept(f(g(h())))`.
func (c *UserIdentityClient) Intercept(interceptors ...Interceptor) {
c.inters.UserIdentity = append(c.inters.UserIdentity, interceptors...)
}
// Create returns a builder for creating a UserIdentity entity.
func (c *UserIdentityClient) Create() *UserIdentityCreate {
mutation := newUserIdentityMutation(c.config, OpCreate)
return &UserIdentityCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// CreateBulk returns a builder for creating a bulk of UserIdentity entities.
func (c *UserIdentityClient) CreateBulk(builders ...*UserIdentityCreate) *UserIdentityCreateBulk {
return &UserIdentityCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *UserIdentityClient) MapCreateBulk(slice any, setFunc func(*UserIdentityCreate, int)) *UserIdentityCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &UserIdentityCreateBulk{err: fmt.Errorf("calling to UserIdentityClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*UserIdentityCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &UserIdentityCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for UserIdentity.
func (c *UserIdentityClient) Update() *UserIdentityUpdate {
mutation := newUserIdentityMutation(c.config, OpUpdate)
return &UserIdentityUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *UserIdentityClient) UpdateOne(_m *UserIdentity) *UserIdentityUpdateOne {
mutation := newUserIdentityMutation(c.config, OpUpdateOne, withUserIdentity(_m))
return &UserIdentityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *UserIdentityClient) UpdateOneID(id string) *UserIdentityUpdateOne {
mutation := newUserIdentityMutation(c.config, OpUpdateOne, withUserIdentityID(id))
return &UserIdentityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for UserIdentity.
func (c *UserIdentityClient) Delete() *UserIdentityDelete {
mutation := newUserIdentityMutation(c.config, OpDelete)
return &UserIdentityDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a builder for deleting the given entity.
func (c *UserIdentityClient) DeleteOne(_m *UserIdentity) *UserIdentityDeleteOne {
return c.DeleteOneID(_m.ID)
}
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *UserIdentityClient) DeleteOneID(id string) *UserIdentityDeleteOne {
builder := c.Delete().Where(useridentity.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &UserIdentityDeleteOne{builder}
}
// Query returns a query builder for UserIdentity.
func (c *UserIdentityClient) Query() *UserIdentityQuery {
return &UserIdentityQuery{
config: c.config,
ctx: &QueryContext{Type: TypeUserIdentity},
inters: c.Interceptors(),
}
}
// Get returns a UserIdentity entity by its id.
func (c *UserIdentityClient) Get(ctx context.Context, id string) (*UserIdentity, error) {
return c.Query().Where(useridentity.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *UserIdentityClient) GetX(ctx context.Context, id string) *UserIdentity {
obj, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return obj
}
// Hooks returns the client hooks.
func (c *UserIdentityClient) Hooks() []Hook {
return c.hooks.UserIdentity
}
// Interceptors returns the client interceptors.
func (c *UserIdentityClient) Interceptors() []Interceptor {
return c.inters.UserIdentity
}
func (c *UserIdentityClient) mutate(ctx context.Context, m *UserIdentityMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&UserIdentityCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&UserIdentityUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&UserIdentityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&UserIdentityDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("db: unknown UserIdentity mutation op: %q", m.Op())
}
}
// hooks and interceptors per client, for fast access.
type (
hooks struct {
AuthCode, AuthRequest, Connector, DeviceRequest, DeviceToken, Keys,
OAuth2Client, OfflineSession, Password, RefreshToken []ent.Hook
AuthCode, AuthRequest, AuthSession, Connector, DeviceRequest, DeviceToken, Keys,
OAuth2Client, OfflineSession, Password, RefreshToken, UserIdentity []ent.Hook
}
inters struct {
AuthCode, AuthRequest, Connector, DeviceRequest, DeviceToken, Keys,
OAuth2Client, OfflineSession, Password, RefreshToken []ent.Interceptor
AuthCode, AuthRequest, AuthSession, Connector, DeviceRequest, DeviceToken, Keys,
OAuth2Client, OfflineSession, Password, RefreshToken,
UserIdentity []ent.Interceptor
}
)

18
storage/ent/db/connector.go

@ -3,6 +3,7 @@
package db
import (
"encoding/json"
"fmt"
"strings"
@ -23,7 +24,9 @@ type Connector struct {
// ResourceVersion holds the value of the "resource_version" field.
ResourceVersion string `json:"resource_version,omitempty"`
// Config holds the value of the "config" field.
Config []byte `json:"config,omitempty"`
Config []byte `json:"config,omitempty"`
// GrantTypes holds the value of the "grant_types" field.
GrantTypes []string `json:"grant_types,omitempty"`
selectValues sql.SelectValues
}
@ -32,7 +35,7 @@ func (*Connector) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case connector.FieldConfig:
case connector.FieldConfig, connector.FieldGrantTypes:
values[i] = new([]byte)
case connector.FieldID, connector.FieldType, connector.FieldName, connector.FieldResourceVersion:
values[i] = new(sql.NullString)
@ -81,6 +84,14 @@ func (_m *Connector) assignValues(columns []string, values []any) error {
} else if value != nil {
_m.Config = *value
}
case connector.FieldGrantTypes:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field grant_types", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &_m.GrantTypes); err != nil {
return fmt.Errorf("unmarshal field grant_types: %w", err)
}
}
default:
_m.selectValues.Set(columns[i], values[i])
}
@ -128,6 +139,9 @@ func (_m *Connector) String() string {
builder.WriteString(", ")
builder.WriteString("config=")
builder.WriteString(fmt.Sprintf("%v", _m.Config))
builder.WriteString(", ")
builder.WriteString("grant_types=")
builder.WriteString(fmt.Sprintf("%v", _m.GrantTypes))
builder.WriteByte(')')
return builder.String()
}

3
storage/ent/db/connector/connector.go

@ -19,6 +19,8 @@ const (
FieldResourceVersion = "resource_version"
// FieldConfig holds the string denoting the config field in the database.
FieldConfig = "config"
// FieldGrantTypes holds the string denoting the grant_types field in the database.
FieldGrantTypes = "grant_types"
// Table holds the table name of the connector in the database.
Table = "connectors"
)
@ -30,6 +32,7 @@ var Columns = []string{
FieldName,
FieldResourceVersion,
FieldConfig,
FieldGrantTypes,
}
// ValidColumn reports if the column name is valid (part of the table columns).

10
storage/ent/db/connector/where.go

@ -317,6 +317,16 @@ func ConfigLTE(v []byte) predicate.Connector {
return predicate.Connector(sql.FieldLTE(FieldConfig, v))
}
// GrantTypesIsNil applies the IsNil predicate on the "grant_types" field.
func GrantTypesIsNil() predicate.Connector {
return predicate.Connector(sql.FieldIsNull(FieldGrantTypes))
}
// GrantTypesNotNil applies the NotNil predicate on the "grant_types" field.
func GrantTypesNotNil() predicate.Connector {
return predicate.Connector(sql.FieldNotNull(FieldGrantTypes))
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Connector) predicate.Connector {
return predicate.Connector(sql.AndPredicates(predicates...))

10
storage/ent/db/connector_create.go

@ -43,6 +43,12 @@ func (_c *ConnectorCreate) SetConfig(v []byte) *ConnectorCreate {
return _c
}
// SetGrantTypes sets the "grant_types" field.
func (_c *ConnectorCreate) SetGrantTypes(v []string) *ConnectorCreate {
_c.mutation.SetGrantTypes(v)
return _c
}
// SetID sets the "id" field.
func (_c *ConnectorCreate) SetID(v string) *ConnectorCreate {
_c.mutation.SetID(v)
@ -161,6 +167,10 @@ func (_c *ConnectorCreate) createSpec() (*Connector, *sqlgraph.CreateSpec) {
_spec.SetField(connector.FieldConfig, field.TypeBytes, value)
_node.Config = value
}
if value, ok := _c.mutation.GrantTypes(); ok {
_spec.SetField(connector.FieldGrantTypes, field.TypeJSON, value)
_node.GrantTypes = value
}
return _node, _spec
}

59
storage/ent/db/connector_update.go

@ -9,6 +9,7 @@ import (
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/dialect/sql/sqljson"
"entgo.io/ent/schema/field"
"github.com/dexidp/dex/storage/ent/db/connector"
"github.com/dexidp/dex/storage/ent/db/predicate"
@ -75,6 +76,24 @@ func (_u *ConnectorUpdate) SetConfig(v []byte) *ConnectorUpdate {
return _u
}
// SetGrantTypes sets the "grant_types" field.
func (_u *ConnectorUpdate) SetGrantTypes(v []string) *ConnectorUpdate {
_u.mutation.SetGrantTypes(v)
return _u
}
// AppendGrantTypes appends value to the "grant_types" field.
func (_u *ConnectorUpdate) AppendGrantTypes(v []string) *ConnectorUpdate {
_u.mutation.AppendGrantTypes(v)
return _u
}
// ClearGrantTypes clears the value of the "grant_types" field.
func (_u *ConnectorUpdate) ClearGrantTypes() *ConnectorUpdate {
_u.mutation.ClearGrantTypes()
return _u
}
// Mutation returns the ConnectorMutation object of the builder.
func (_u *ConnectorUpdate) Mutation() *ConnectorMutation {
return _u.mutation
@ -146,6 +165,17 @@ func (_u *ConnectorUpdate) sqlSave(ctx context.Context) (_node int, err error) {
if value, ok := _u.mutation.Config(); ok {
_spec.SetField(connector.FieldConfig, field.TypeBytes, value)
}
if value, ok := _u.mutation.GrantTypes(); ok {
_spec.SetField(connector.FieldGrantTypes, field.TypeJSON, value)
}
if value, ok := _u.mutation.AppendedGrantTypes(); ok {
_spec.AddModifier(func(u *sql.UpdateBuilder) {
sqljson.Append(u, connector.FieldGrantTypes, value)
})
}
if _u.mutation.GrantTypesCleared() {
_spec.ClearField(connector.FieldGrantTypes, field.TypeJSON)
}
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{connector.Label}
@ -214,6 +244,24 @@ func (_u *ConnectorUpdateOne) SetConfig(v []byte) *ConnectorUpdateOne {
return _u
}
// SetGrantTypes sets the "grant_types" field.
func (_u *ConnectorUpdateOne) SetGrantTypes(v []string) *ConnectorUpdateOne {
_u.mutation.SetGrantTypes(v)
return _u
}
// AppendGrantTypes appends value to the "grant_types" field.
func (_u *ConnectorUpdateOne) AppendGrantTypes(v []string) *ConnectorUpdateOne {
_u.mutation.AppendGrantTypes(v)
return _u
}
// ClearGrantTypes clears the value of the "grant_types" field.
func (_u *ConnectorUpdateOne) ClearGrantTypes() *ConnectorUpdateOne {
_u.mutation.ClearGrantTypes()
return _u
}
// Mutation returns the ConnectorMutation object of the builder.
func (_u *ConnectorUpdateOne) Mutation() *ConnectorMutation {
return _u.mutation
@ -315,6 +363,17 @@ func (_u *ConnectorUpdateOne) sqlSave(ctx context.Context) (_node *Connector, er
if value, ok := _u.mutation.Config(); ok {
_spec.SetField(connector.FieldConfig, field.TypeBytes, value)
}
if value, ok := _u.mutation.GrantTypes(); ok {
_spec.SetField(connector.FieldGrantTypes, field.TypeJSON, value)
}
if value, ok := _u.mutation.AppendedGrantTypes(); ok {
_spec.AddModifier(func(u *sql.UpdateBuilder) {
sqljson.Append(u, connector.FieldGrantTypes, value)
})
}
if _u.mutation.GrantTypesCleared() {
_spec.ClearField(connector.FieldGrantTypes, field.TypeJSON)
}
_node = &Connector{config: _u.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues

4
storage/ent/db/ent.go

@ -14,6 +14,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/dexidp/dex/storage/ent/db/authcode"
"github.com/dexidp/dex/storage/ent/db/authrequest"
"github.com/dexidp/dex/storage/ent/db/authsession"
"github.com/dexidp/dex/storage/ent/db/connector"
"github.com/dexidp/dex/storage/ent/db/devicerequest"
"github.com/dexidp/dex/storage/ent/db/devicetoken"
@ -22,6 +23,7 @@ import (
"github.com/dexidp/dex/storage/ent/db/offlinesession"
"github.com/dexidp/dex/storage/ent/db/password"
"github.com/dexidp/dex/storage/ent/db/refreshtoken"
"github.com/dexidp/dex/storage/ent/db/useridentity"
)
// ent aliases to avoid import conflicts in user's code.
@ -84,6 +86,7 @@ func checkColumn(t, c string) error {
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
authcode.Table: authcode.ValidColumn,
authrequest.Table: authrequest.ValidColumn,
authsession.Table: authsession.ValidColumn,
connector.Table: connector.ValidColumn,
devicerequest.Table: devicerequest.ValidColumn,
devicetoken.Table: devicetoken.ValidColumn,
@ -92,6 +95,7 @@ func checkColumn(t, c string) error {
offlinesession.Table: offlinesession.ValidColumn,
password.Table: password.ValidColumn,
refreshtoken.Table: refreshtoken.ValidColumn,
useridentity.Table: useridentity.ValidColumn,
})
})
return columnCheck(t, c)

24
storage/ent/db/hook/hook.go

@ -33,6 +33,18 @@ func (f AuthRequestFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, e
return nil, fmt.Errorf("unexpected mutation type %T. expect *db.AuthRequestMutation", m)
}
// The AuthSessionFunc type is an adapter to allow the use of ordinary
// function as AuthSession mutator.
type AuthSessionFunc func(context.Context, *db.AuthSessionMutation) (db.Value, error)
// Mutate calls f(ctx, m).
func (f AuthSessionFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) {
if mv, ok := m.(*db.AuthSessionMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *db.AuthSessionMutation", m)
}
// The ConnectorFunc type is an adapter to allow the use of ordinary
// function as Connector mutator.
type ConnectorFunc func(context.Context, *db.ConnectorMutation) (db.Value, error)
@ -129,6 +141,18 @@ func (f RefreshTokenFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value,
return nil, fmt.Errorf("unexpected mutation type %T. expect *db.RefreshTokenMutation", m)
}
// The UserIdentityFunc type is an adapter to allow the use of ordinary
// function as UserIdentity mutator.
type UserIdentityFunc func(context.Context, *db.UserIdentityMutation) (db.Value, error)
// Mutate calls f(ctx, m).
func (f UserIdentityFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) {
if mv, ok := m.(*db.UserIdentityMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *db.UserIdentityMutation", m)
}
// Condition is a hook condition function.
type Condition func(context.Context, db.Mutation) bool

41
storage/ent/db/migrate/schema.go

@ -63,6 +63,21 @@ var (
Columns: AuthRequestsColumns,
PrimaryKey: []*schema.Column{AuthRequestsColumns[0]},
}
// AuthSessionsColumns holds the columns for the "auth_sessions" table.
AuthSessionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "client_states", Type: field.TypeBytes},
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}},
{Name: "last_activity", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}},
{Name: "ip_address", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "user_agent", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
}
// AuthSessionsTable holds the schema information for the "auth_sessions" table.
AuthSessionsTable = &schema.Table{
Name: "auth_sessions",
Columns: AuthSessionsColumns,
PrimaryKey: []*schema.Column{AuthSessionsColumns[0]},
}
// ConnectorsColumns holds the columns for the "connectors" table.
ConnectorsColumns = []*schema.Column{
{Name: "id", Type: field.TypeString, Unique: true, Size: 100, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
@ -70,6 +85,7 @@ var (
{Name: "name", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "resource_version", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "config", Type: field.TypeBytes},
{Name: "grant_types", Type: field.TypeJSON, Nullable: true},
}
// ConnectorsTable holds the schema information for the "connectors" table.
ConnectorsTable = &schema.Table{
@ -134,6 +150,7 @@ var (
{Name: "public", Type: field.TypeBool},
{Name: "name", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "logo_url", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "allowed_connectors", Type: field.TypeJSON, Nullable: true},
}
// Oauth2clientsTable holds the schema information for the "oauth2clients" table.
Oauth2clientsTable = &schema.Table{
@ -198,10 +215,33 @@ var (
Columns: RefreshTokensColumns,
PrimaryKey: []*schema.Column{RefreshTokensColumns[0]},
}
// UserIdentitiesColumns holds the columns for the "user_identities" table.
UserIdentitiesColumns = []*schema.Column{
{Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "user_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "connector_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "claims_user_id", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "claims_username", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "claims_preferred_username", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "claims_email", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}},
{Name: "claims_email_verified", Type: field.TypeBool, Default: false},
{Name: "claims_groups", Type: field.TypeJSON, Nullable: true},
{Name: "consents", Type: field.TypeBytes},
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}},
{Name: "last_login", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}},
{Name: "blocked_until", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}},
}
// UserIdentitiesTable holds the schema information for the "user_identities" table.
UserIdentitiesTable = &schema.Table{
Name: "user_identities",
Columns: UserIdentitiesColumns,
PrimaryKey: []*schema.Column{UserIdentitiesColumns[0]},
}
// Tables holds all the tables in the schema.
Tables = []*schema.Table{
AuthCodesTable,
AuthRequestsTable,
AuthSessionsTable,
ConnectorsTable,
DeviceRequestsTable,
DeviceTokensTable,
@ -210,6 +250,7 @@ var (
OfflineSessionsTable,
PasswordsTable,
RefreshTokensTable,
UserIdentitiesTable,
}
)

6814
storage/ent/db/mutation.go

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save