diff --git a/.github/workflows/analysis-scorecard.yaml b/.github/workflows/analysis-scorecard.yaml index 2bef8332..56306a64 100644 --- a/.github/workflows/analysis-scorecard.yaml +++ b/.github/workflows/analysis-scorecard.yaml @@ -35,13 +35,13 @@ jobs: publish_results: true - name: Upload results as artifact - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: OpenSSF Scorecard results path: results.sarif retention-days: 5 - name: Upload results to GitHub Security tab - uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v3.29.5 + uses: github/codeql-action/upload-sarif@0d579ffd059c29b07949a3cce3983f0780820c98 # v3.29.5 with: sarif_file: results.sarif diff --git a/.github/workflows/artifacts.yaml b/.github/workflows/artifacts.yaml index f518d4d0..7169a50f 100644 --- a/.github/workflows/artifacts.yaml +++ b/.github/workflows/artifacts.yaml @@ -56,16 +56,16 @@ jobs: fetch-tags: true - name: Set up QEMU - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 + uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - name: Set up Syft - uses: anchore/sbom-action/download-syft@28d71544de8eaf1b958d335707167c5f783590ad # v0.22.2 + uses: anchore/sbom-action/download-syft@57aae528053a48a3f6235f2d9461b05fbcb7366d # v0.23.1 - name: Install cosign - uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 + uses: sigstore/cosign-installer@ba7bc0a3fef59531c69a25acd34668d6d3fe6f22 # v4.1.0 - name: Set image name id: image-name @@ -73,7 +73,7 @@ jobs: - name: Gather build metadata id: meta - uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 + uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0 with: images: | ${{ steps.image-name.outputs.value }} @@ -109,7 +109,7 @@ jobs: if_false: type=oci,dest=image.tar - name: Login to GitHub Container Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ghcr.io username: ${{ github.actor }} @@ -117,7 +117,7 @@ jobs: if: inputs.publish - name: Login to Docker Hub - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} @@ -125,7 +125,7 @@ jobs: - name: Build and push image id: build - uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2 + uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0 with: context: . platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x @@ -197,14 +197,14 @@ jobs: # TODO: uncomment when the action is working for non ghcr.io pushes. GH Issue: https://github.com/actions/attest-build-provenance/issues/80 # - name: Generate build provenance attestation - # uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0 + # uses: actions/attest-build-provenance@a2bbfa25375fe432b6a289bc6b6cd05ecd0c4c32 # v4.1.0 # with: # subject-name: dexidp/dex # subject-digest: ${{ steps.build.outputs.digest }} # push-to-registry: true - name: Generate build provenance attestation - uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0 + uses: actions/attest-build-provenance@a2bbfa25375fe432b6a289bc6b6cd05ecd0c4c32 # v4.1.0 with: subject-name: ghcr.io/${{ github.repository }} subject-digest: ${{ steps.build.outputs.digest }} @@ -233,7 +233,7 @@ jobs: restore-keys: trivy-cache- - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@e368e328979b113139d6f9068e03accaed98a518 # 0.34.1 + uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # 0.35.0 with: input: docker-image format: sarif @@ -256,7 +256,7 @@ jobs: run: cat trivy-results.sarif - name: Upload Trivy scan results as artifact - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: "[${{ github.job }}] Trivy scan results" path: trivy-results.sarif @@ -264,6 +264,6 @@ jobs: overwrite: true - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v3.29.5 + uses: github/codeql-action/upload-sarif@0d579ffd059c29b07949a3cce3983f0780820c98 # v3.29.5 with: sarif_file: trivy-results.sarif diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index feb7cf38..4aec25e0 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -16,7 +16,7 @@ jobs: steps: - name: Check minimum labels - uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5 + uses: mheap/github-action-required-labels@0ac283b4e65c1fb28ce6079dea5546ceca98ccbe # v5.5 with: mode: minimum count: 1 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0d11a12c..5224e1b3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -48,6 +48,24 @@ jobs: - 3306 options: --health-cmd "mysql -proot -e \"show databases;\"" --health-interval 10s --health-timeout 5s --health-retries 5 + mysql8: + image: mysql:8.0 + env: + MYSQL_ROOT_PASSWORD: root + MYSQL_DATABASE: dex + ports: + - 3306 + options: --health-cmd "mysql -proot -e \"show databases;\"" --health-interval 10s --health-timeout 5s --health-retries 5 + + mysql8-ent: + image: mysql:8.0 + env: + MYSQL_ROOT_PASSWORD: root + MYSQL_DATABASE: dex + ports: + - 3306 + options: --health-cmd "mysql -proot -e \"show databases;\"" --health-interval 10s --health-timeout 5s --health-retries 5 + etcd: image: gcr.io/etcd-development/etcd:v3.5.0 ports: @@ -87,7 +105,7 @@ jobs: uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Set up Go - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version: "1.25" @@ -124,6 +142,18 @@ jobs: DEX_MYSQL_ENT_HOST: 127.0.0.1 DEX_MYSQL_ENT_PORT: ${{ job.services.mysql-ent.ports[3306] }} + DEX_MYSQL8_DATABASE: dex + DEX_MYSQL8_USER: root + DEX_MYSQL8_PASSWORD: root + DEX_MYSQL8_HOST: 127.0.0.1 + DEX_MYSQL8_PORT: ${{ job.services.mysql8.ports[3306] }} + + DEX_MYSQL8_ENT_DATABASE: dex + DEX_MYSQL8_ENT_USER: root + DEX_MYSQL8_ENT_PASSWORD: root + DEX_MYSQL8_ENT_HOST: 127.0.0.1 + DEX_MYSQL8_ENT_PORT: ${{ job.services.mysql8-ent.ports[3306] }} + DEX_POSTGRES_DATABASE: postgres DEX_POSTGRES_USER: postgres DEX_POSTGRES_PASSWORD: postgres @@ -163,7 +193,7 @@ jobs: uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Set up Go - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version: "1.25" @@ -198,4 +228,4 @@ jobs: uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Dependency Review - uses: actions/dependency-review-action@05fe4576374b728f0c523d6a13d64c25081e0803 # v4.8.3 + uses: actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48 # v4.9.0 diff --git a/Dockerfile b/Dockerfile index 447a431b..69f25eb7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ ARG BASE_IMAGE=alpine FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.9.0@sha256:c64defb9ed5a91eacb37f96ccc3d4cd72521c4bd18d5442905b95e2226b0e707 AS xx -FROM --platform=$BUILDPLATFORM golang:1.26.0-alpine3.22@sha256:169d3991a4f795124a88b33c73549955a3d856e26e8504b5530c30bd245f9f1b AS builder +FROM --platform=$BUILDPLATFORM golang:1.26.1-alpine3.22@sha256:07e91d24f6330432729082bb580983181809e0a48f0f38ecde26868d4568c6ac AS builder COPY --from=xx / / @@ -59,7 +59,7 @@ FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f FROM alpine AS user-setup RUN addgroup -g 1001 -S dex && adduser -u 1001 -S -G dex -D -H -s /sbin/nologin dex -FROM gcr.io/distroless/static-debian13:nonroot@sha256:01e550fdb7ab79ee7be5ff440a563a58f1fd000ad9e0c532e65c3d23f917f1c5 AS distroless +FROM gcr.io/distroless/static-debian13:nonroot@sha256:e3f945647ffb95b5839c07038d64f9811adf17308b9121d8a2b87b6a22a80a39 AS distroless FROM $BASE_IMAGE diff --git a/api/api.pb.go b/api/api.pb.go index 702d3758..23c61410 100644 --- a/api/api.pb.go +++ b/api/api.pb.go @@ -23,16 +23,17 @@ const ( // Client represents an OAuth2 client. type Client struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` - RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` - TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` - Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"` - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` + RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` + TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` + Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"` + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` + AllowedConnectors []string `protobuf:"bytes,8,rep,name=allowed_connectors,json=allowedConnectors,proto3" json:"allowed_connectors,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Client) Reset() { @@ -114,6 +115,13 @@ func (x *Client) GetLogoUrl() string { return "" } +func (x *Client) GetAllowedConnectors() []string { + if x != nil { + return x.AllowedConnectors + } + return nil +} + // CreateClientReq is a request to make a client. type CreateClientReq struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -305,14 +313,15 @@ func (x *DeleteClientResp) GetNotFound() bool { // UpdateClientReq is a request to update an existing client. type UpdateClientReq struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` - TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` + TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` + AllowedConnectors []string `protobuf:"bytes,6,rep,name=allowed_connectors,json=allowedConnectors,proto3" json:"allowed_connectors,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UpdateClientReq) Reset() { @@ -380,6 +389,13 @@ func (x *UpdateClientReq) GetLogoUrl() string { return "" } +func (x *UpdateClientReq) GetAllowedConnectors() []string { + if x != nil { + return x.AllowedConnectors + } + return nil +} + // UpdateClientResp returns the response from updating a client. type UpdateClientResp struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1326,7 +1342,7 @@ var File_api_api_proto protoreflect.FileDescriptor var file_api_api_proto_rawDesc = string([]byte{ 0x0a, 0x0d, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x61, 0x70, 0x69, 0x22, 0xc1, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, + 0x03, 0x61, 0x70, 0x69, 0x22, 0xf0, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x72, @@ -1338,155 +1354,161 @@ var file_api_api_proto_rawDesc = string([]byte{ 0x08, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x22, 0x36, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x06, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x22, 0x5e, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, - 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, - 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x22, 0x21, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, - 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x9a, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, 0x0a, - 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, - 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, - 0x6c, 0x22, 0x2f, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, - 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, - 0x6e, 0x64, 0x22, 0x69, 0x0a, 0x08, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x3e, 0x0a, - 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, - 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3b, 0x0a, - 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, + 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x36, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, + 0x5e, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, - 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x67, 0x0a, 0x11, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, - 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0x31, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, - 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, - 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x29, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x22, 0x31, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, + 0x21, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, + 0x75, 0x6e, 0x64, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, + 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, + 0x2f, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x22, 0x69, 0x0a, 0x08, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x11, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x12, 0x29, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3b, 0x0a, 0x12, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, + 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x67, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, + 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x31, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, - 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x22, 0x3f, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2b, 0x0a, 0x09, 0x70, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x09, 0x70, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x22, 0x37, 0x0a, 0x0b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, - 0x03, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x61, 0x70, 0x69, 0x22, - 0x7a, 0x0a, 0x0f, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, - 0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0e, 0x4c, - 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, - 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66, 0x52, 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x22, 0x30, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, + 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x29, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, + 0x31, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, - 0x6e, 0x64, 0x22, 0x45, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4d, 0x0a, 0x12, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, - 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, - 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x32, 0xc7, 0x05, 0x0a, 0x03, 0x44, 0x65, 0x78, - 0x12, 0x3d, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, - 0x3d, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, - 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, - 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, - 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, - 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x0d, - 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x14, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, - 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, - 0x3a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x13, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x52, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x15, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, - 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, - 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x22, 0x00, 0x42, 0x2f, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x6f, 0x73, - 0x2e, 0x64, 0x65, 0x78, 0x2e, 0x61, 0x70, 0x69, 0x5a, 0x19, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x78, 0x69, 0x64, 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f, - 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x22, 0x3f, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2b, 0x0a, 0x09, 0x70, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x09, 0x70, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x22, 0x37, 0x0a, 0x0b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, + 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x61, 0x70, 0x69, 0x22, 0x7a, 0x0a, + 0x0f, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0e, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, + 0x65, 0x72, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x66, 0x52, 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x30, + 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x22, 0x45, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4d, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1a, 0x0a, + 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, + 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, + 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x32, 0xc7, 0x05, 0x0a, 0x03, 0x44, 0x65, 0x78, 0x12, 0x3d, + 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a, + 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x12, 0x43, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, + 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x0d, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x47, 0x65, + 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3a, 0x0a, + 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x13, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, + 0x71, 0x1a, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x52, 0x65, 0x76, + 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x15, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, + 0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x42, 0x2f, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x6f, 0x73, 0x2e, 0x64, + 0x65, 0x78, 0x2e, 0x61, 0x70, 0x69, 0x5a, 0x19, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x78, 0x69, 0x64, 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f, 0x61, 0x70, + 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( diff --git a/api/api.proto b/api/api.proto index cfb7979c..01e3db17 100644 --- a/api/api.proto +++ b/api/api.proto @@ -14,6 +14,7 @@ message Client { bool public = 5; string name = 6; string logo_url = 7; + repeated string allowed_connectors = 8; } // CreateClientReq is a request to make a client. @@ -45,6 +46,7 @@ message UpdateClientReq { repeated string trusted_peers = 3; string name = 4; string logo_url = 5; + repeated string allowed_connectors = 6; } // UpdateClientResp returns the response from updating a client. diff --git a/api/v2/api.pb.go b/api/v2/api.pb.go index 6e8a134e..84955190 100644 --- a/api/v2/api.pb.go +++ b/api/v2/api.pb.go @@ -23,16 +23,17 @@ const ( // Client represents an OAuth2 client. type Client struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` - RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` - TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` - Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"` - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` + RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` + TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` + Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"` + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` + AllowedConnectors []string `protobuf:"bytes,8,rep,name=allowed_connectors,json=allowedConnectors,proto3" json:"allowed_connectors,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Client) Reset() { @@ -114,17 +115,25 @@ func (x *Client) GetLogoUrl() string { return "" } +func (x *Client) GetAllowedConnectors() []string { + if x != nil { + return x.AllowedConnectors + } + return nil +} + // ClientInfo represents an OAuth2 client without sensitive information. type ClientInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` - TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` - Public bool `protobuf:"varint,4,opt,name=public,proto3" json:"public,omitempty"` - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - LogoUrl string `protobuf:"bytes,6,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` + TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` + Public bool `protobuf:"varint,4,opt,name=public,proto3" json:"public,omitempty"` + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + LogoUrl string `protobuf:"bytes,6,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` + AllowedConnectors []string `protobuf:"bytes,7,rep,name=allowed_connectors,json=allowedConnectors,proto3" json:"allowed_connectors,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientInfo) Reset() { @@ -199,6 +208,13 @@ func (x *ClientInfo) GetLogoUrl() string { return "" } +func (x *ClientInfo) GetAllowedConnectors() []string { + if x != nil { + return x.AllowedConnectors + } + return nil +} + // GetClientReq is a request to retrieve client details. type GetClientReq struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -481,14 +497,15 @@ func (x *DeleteClientResp) GetNotFound() bool { // UpdateClientReq is a request to update an existing client. type UpdateClientReq struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` - TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` + TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` + AllowedConnectors []string `protobuf:"bytes,6,rep,name=allowed_connectors,json=allowedConnectors,proto3" json:"allowed_connectors,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UpdateClientReq) Reset() { @@ -556,6 +573,13 @@ func (x *UpdateClientReq) GetLogoUrl() string { return "" } +func (x *UpdateClientReq) GetAllowedConnectors() []string { + if x != nil { + return x.AllowedConnectors + } + return nil +} + // UpdateClientResp returns the response from updating a client. type UpdateClientResp struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1129,6 +1153,7 @@ type Connector struct { Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` Config []byte `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + GrantTypes []string `protobuf:"bytes,5,rep,name=grant_types,json=grantTypes,proto3" json:"grant_types,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1191,6 +1216,13 @@ func (x *Connector) GetConfig() []byte { return nil } +func (x *Connector) GetGrantTypes() []string { + if x != nil { + return x.GrantTypes + } + return nil +} + // CreateConnectorReq is a request to make a connector. type CreateConnectorReq struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1281,21 +1313,71 @@ func (x *CreateConnectorResp) GetAlreadyExists() bool { return false } +// GrantTypes wraps a list of grant types to distinguish between +// "not specified" (no update) and "empty list" (unrestricted). +type GrantTypes struct { + state protoimpl.MessageState `protogen:"open.v1"` + GrantTypes []string `protobuf:"bytes,1,rep,name=grant_types,json=grantTypes,proto3" json:"grant_types,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantTypes) Reset() { + *x = GrantTypes{} + mi := &file_api_v2_api_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantTypes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantTypes) ProtoMessage() {} + +func (x *GrantTypes) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrantTypes.ProtoReflect.Descriptor instead. +func (*GrantTypes) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{24} +} + +func (x *GrantTypes) GetGrantTypes() []string { + if x != nil { + return x.GrantTypes + } + return nil +} + // UpdateConnectorReq is a request to modify an existing connector. type UpdateConnectorReq struct { state protoimpl.MessageState `protogen:"open.v1"` // The id used to lookup the connector. This field cannot be modified - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - NewType string `protobuf:"bytes,2,opt,name=new_type,json=newType,proto3" json:"new_type,omitempty"` - NewName string `protobuf:"bytes,3,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` - NewConfig []byte `protobuf:"bytes,4,opt,name=new_config,json=newConfig,proto3" json:"new_config,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + NewType string `protobuf:"bytes,2,opt,name=new_type,json=newType,proto3" json:"new_type,omitempty"` + NewName string `protobuf:"bytes,3,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` + NewConfig []byte `protobuf:"bytes,4,opt,name=new_config,json=newConfig,proto3" json:"new_config,omitempty"` + // If set, updates the connector's allowed grant types. + // An empty grant_types list means unrestricted (all grant types allowed). + // If not set (null), grant types are not modified. + NewGrantTypes *GrantTypes `protobuf:"bytes,5,opt,name=new_grant_types,json=newGrantTypes,proto3" json:"new_grant_types,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *UpdateConnectorReq) Reset() { *x = UpdateConnectorReq{} - mi := &file_api_v2_api_proto_msgTypes[24] + mi := &file_api_v2_api_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1307,7 +1389,7 @@ func (x *UpdateConnectorReq) String() string { func (*UpdateConnectorReq) ProtoMessage() {} func (x *UpdateConnectorReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[24] + mi := &file_api_v2_api_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1320,7 +1402,7 @@ func (x *UpdateConnectorReq) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateConnectorReq.ProtoReflect.Descriptor instead. func (*UpdateConnectorReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{24} + return file_api_v2_api_proto_rawDescGZIP(), []int{25} } func (x *UpdateConnectorReq) GetId() string { @@ -1351,6 +1433,13 @@ func (x *UpdateConnectorReq) GetNewConfig() []byte { return nil } +func (x *UpdateConnectorReq) GetNewGrantTypes() *GrantTypes { + if x != nil { + return x.NewGrantTypes + } + return nil +} + // UpdateConnectorResp returns the response from modifying an existing connector. type UpdateConnectorResp struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1361,7 +1450,7 @@ type UpdateConnectorResp struct { func (x *UpdateConnectorResp) Reset() { *x = UpdateConnectorResp{} - mi := &file_api_v2_api_proto_msgTypes[25] + mi := &file_api_v2_api_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1373,7 +1462,7 @@ func (x *UpdateConnectorResp) String() string { func (*UpdateConnectorResp) ProtoMessage() {} func (x *UpdateConnectorResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[25] + mi := &file_api_v2_api_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1386,7 +1475,7 @@ func (x *UpdateConnectorResp) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateConnectorResp.ProtoReflect.Descriptor instead. func (*UpdateConnectorResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{25} + return file_api_v2_api_proto_rawDescGZIP(), []int{26} } func (x *UpdateConnectorResp) GetNotFound() bool { @@ -1406,7 +1495,7 @@ type DeleteConnectorReq struct { func (x *DeleteConnectorReq) Reset() { *x = DeleteConnectorReq{} - mi := &file_api_v2_api_proto_msgTypes[26] + mi := &file_api_v2_api_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1418,7 +1507,7 @@ func (x *DeleteConnectorReq) String() string { func (*DeleteConnectorReq) ProtoMessage() {} func (x *DeleteConnectorReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[26] + mi := &file_api_v2_api_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1431,7 +1520,7 @@ func (x *DeleteConnectorReq) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteConnectorReq.ProtoReflect.Descriptor instead. func (*DeleteConnectorReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{26} + return file_api_v2_api_proto_rawDescGZIP(), []int{27} } func (x *DeleteConnectorReq) GetId() string { @@ -1451,7 +1540,7 @@ type DeleteConnectorResp struct { func (x *DeleteConnectorResp) Reset() { *x = DeleteConnectorResp{} - mi := &file_api_v2_api_proto_msgTypes[27] + mi := &file_api_v2_api_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1463,7 +1552,7 @@ func (x *DeleteConnectorResp) String() string { func (*DeleteConnectorResp) ProtoMessage() {} func (x *DeleteConnectorResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[27] + mi := &file_api_v2_api_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1476,7 +1565,7 @@ func (x *DeleteConnectorResp) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteConnectorResp.ProtoReflect.Descriptor instead. func (*DeleteConnectorResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{27} + return file_api_v2_api_proto_rawDescGZIP(), []int{28} } func (x *DeleteConnectorResp) GetNotFound() bool { @@ -1495,7 +1584,7 @@ type ListConnectorReq struct { func (x *ListConnectorReq) Reset() { *x = ListConnectorReq{} - mi := &file_api_v2_api_proto_msgTypes[28] + mi := &file_api_v2_api_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1507,7 +1596,7 @@ func (x *ListConnectorReq) String() string { func (*ListConnectorReq) ProtoMessage() {} func (x *ListConnectorReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[28] + mi := &file_api_v2_api_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1520,7 +1609,7 @@ func (x *ListConnectorReq) ProtoReflect() protoreflect.Message { // Deprecated: Use ListConnectorReq.ProtoReflect.Descriptor instead. func (*ListConnectorReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{28} + return file_api_v2_api_proto_rawDescGZIP(), []int{29} } // ListConnectorResp returns a list of connectors. @@ -1533,7 +1622,7 @@ type ListConnectorResp struct { func (x *ListConnectorResp) Reset() { *x = ListConnectorResp{} - mi := &file_api_v2_api_proto_msgTypes[29] + mi := &file_api_v2_api_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1545,7 +1634,7 @@ func (x *ListConnectorResp) String() string { func (*ListConnectorResp) ProtoMessage() {} func (x *ListConnectorResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[29] + mi := &file_api_v2_api_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1558,7 +1647,7 @@ func (x *ListConnectorResp) ProtoReflect() protoreflect.Message { // Deprecated: Use ListConnectorResp.ProtoReflect.Descriptor instead. func (*ListConnectorResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{29} + return file_api_v2_api_proto_rawDescGZIP(), []int{30} } func (x *ListConnectorResp) GetConnectors() []*Connector { @@ -1577,7 +1666,7 @@ type VersionReq struct { func (x *VersionReq) Reset() { *x = VersionReq{} - mi := &file_api_v2_api_proto_msgTypes[30] + mi := &file_api_v2_api_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1589,7 +1678,7 @@ func (x *VersionReq) String() string { func (*VersionReq) ProtoMessage() {} func (x *VersionReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[30] + mi := &file_api_v2_api_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1602,7 +1691,7 @@ func (x *VersionReq) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionReq.ProtoReflect.Descriptor instead. func (*VersionReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{30} + return file_api_v2_api_proto_rawDescGZIP(), []int{31} } // VersionResp holds the version info of components. @@ -1619,7 +1708,7 @@ type VersionResp struct { func (x *VersionResp) Reset() { *x = VersionResp{} - mi := &file_api_v2_api_proto_msgTypes[31] + mi := &file_api_v2_api_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1631,7 +1720,7 @@ func (x *VersionResp) String() string { func (*VersionResp) ProtoMessage() {} func (x *VersionResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[31] + mi := &file_api_v2_api_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1644,7 +1733,7 @@ func (x *VersionResp) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionResp.ProtoReflect.Descriptor instead. func (*VersionResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{31} + return file_api_v2_api_proto_rawDescGZIP(), []int{32} } func (x *VersionResp) GetServer() string { @@ -1670,7 +1759,7 @@ type DiscoveryReq struct { func (x *DiscoveryReq) Reset() { *x = DiscoveryReq{} - mi := &file_api_v2_api_proto_msgTypes[32] + mi := &file_api_v2_api_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1682,7 +1771,7 @@ func (x *DiscoveryReq) String() string { func (*DiscoveryReq) ProtoMessage() {} func (x *DiscoveryReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[32] + mi := &file_api_v2_api_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1695,7 +1784,7 @@ func (x *DiscoveryReq) ProtoReflect() protoreflect.Message { // Deprecated: Use DiscoveryReq.ProtoReflect.Descriptor instead. func (*DiscoveryReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{32} + return file_api_v2_api_proto_rawDescGZIP(), []int{33} } // DiscoverResp holds the version oidc disovery info. @@ -1722,7 +1811,7 @@ type DiscoveryResp struct { func (x *DiscoveryResp) Reset() { *x = DiscoveryResp{} - mi := &file_api_v2_api_proto_msgTypes[33] + mi := &file_api_v2_api_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1734,7 +1823,7 @@ func (x *DiscoveryResp) String() string { func (*DiscoveryResp) ProtoMessage() {} func (x *DiscoveryResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[33] + mi := &file_api_v2_api_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1747,7 +1836,7 @@ func (x *DiscoveryResp) ProtoReflect() protoreflect.Message { // Deprecated: Use DiscoveryResp.ProtoReflect.Descriptor instead. func (*DiscoveryResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{33} + return file_api_v2_api_proto_rawDescGZIP(), []int{34} } func (x *DiscoveryResp) GetIssuer() string { @@ -1869,7 +1958,7 @@ type RefreshTokenRef struct { func (x *RefreshTokenRef) Reset() { *x = RefreshTokenRef{} - mi := &file_api_v2_api_proto_msgTypes[34] + mi := &file_api_v2_api_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1881,7 +1970,7 @@ func (x *RefreshTokenRef) String() string { func (*RefreshTokenRef) ProtoMessage() {} func (x *RefreshTokenRef) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[34] + mi := &file_api_v2_api_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1894,7 +1983,7 @@ func (x *RefreshTokenRef) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshTokenRef.ProtoReflect.Descriptor instead. func (*RefreshTokenRef) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{34} + return file_api_v2_api_proto_rawDescGZIP(), []int{35} } func (x *RefreshTokenRef) GetId() string { @@ -1936,7 +2025,7 @@ type ListRefreshReq struct { func (x *ListRefreshReq) Reset() { *x = ListRefreshReq{} - mi := &file_api_v2_api_proto_msgTypes[35] + mi := &file_api_v2_api_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1948,7 +2037,7 @@ func (x *ListRefreshReq) String() string { func (*ListRefreshReq) ProtoMessage() {} func (x *ListRefreshReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[35] + mi := &file_api_v2_api_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1961,7 +2050,7 @@ func (x *ListRefreshReq) ProtoReflect() protoreflect.Message { // Deprecated: Use ListRefreshReq.ProtoReflect.Descriptor instead. func (*ListRefreshReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{35} + return file_api_v2_api_proto_rawDescGZIP(), []int{36} } func (x *ListRefreshReq) GetUserId() string { @@ -1981,7 +2070,7 @@ type ListRefreshResp struct { func (x *ListRefreshResp) Reset() { *x = ListRefreshResp{} - mi := &file_api_v2_api_proto_msgTypes[36] + mi := &file_api_v2_api_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1993,7 +2082,7 @@ func (x *ListRefreshResp) String() string { func (*ListRefreshResp) ProtoMessage() {} func (x *ListRefreshResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[36] + mi := &file_api_v2_api_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2006,7 +2095,7 @@ func (x *ListRefreshResp) ProtoReflect() protoreflect.Message { // Deprecated: Use ListRefreshResp.ProtoReflect.Descriptor instead. func (*ListRefreshResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{36} + return file_api_v2_api_proto_rawDescGZIP(), []int{37} } func (x *ListRefreshResp) GetRefreshTokens() []*RefreshTokenRef { @@ -2028,7 +2117,7 @@ type RevokeRefreshReq struct { func (x *RevokeRefreshReq) Reset() { *x = RevokeRefreshReq{} - mi := &file_api_v2_api_proto_msgTypes[37] + mi := &file_api_v2_api_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2040,7 +2129,7 @@ func (x *RevokeRefreshReq) String() string { func (*RevokeRefreshReq) ProtoMessage() {} func (x *RevokeRefreshReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[37] + mi := &file_api_v2_api_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2053,7 +2142,7 @@ func (x *RevokeRefreshReq) ProtoReflect() protoreflect.Message { // Deprecated: Use RevokeRefreshReq.ProtoReflect.Descriptor instead. func (*RevokeRefreshReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{37} + return file_api_v2_api_proto_rawDescGZIP(), []int{38} } func (x *RevokeRefreshReq) GetUserId() string { @@ -2081,7 +2170,7 @@ type RevokeRefreshResp struct { func (x *RevokeRefreshResp) Reset() { *x = RevokeRefreshResp{} - mi := &file_api_v2_api_proto_msgTypes[38] + mi := &file_api_v2_api_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2093,7 +2182,7 @@ func (x *RevokeRefreshResp) String() string { func (*RevokeRefreshResp) ProtoMessage() {} func (x *RevokeRefreshResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[38] + mi := &file_api_v2_api_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2106,7 +2195,7 @@ func (x *RevokeRefreshResp) ProtoReflect() protoreflect.Message { // Deprecated: Use RevokeRefreshResp.ProtoReflect.Descriptor instead. func (*RevokeRefreshResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{38} + return file_api_v2_api_proto_rawDescGZIP(), []int{39} } func (x *RevokeRefreshResp) GetNotFound() bool { @@ -2126,7 +2215,7 @@ type VerifyPasswordReq struct { func (x *VerifyPasswordReq) Reset() { *x = VerifyPasswordReq{} - mi := &file_api_v2_api_proto_msgTypes[39] + mi := &file_api_v2_api_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2138,7 +2227,7 @@ func (x *VerifyPasswordReq) String() string { func (*VerifyPasswordReq) ProtoMessage() {} func (x *VerifyPasswordReq) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[39] + mi := &file_api_v2_api_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2151,7 +2240,7 @@ func (x *VerifyPasswordReq) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyPasswordReq.ProtoReflect.Descriptor instead. func (*VerifyPasswordReq) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{39} + return file_api_v2_api_proto_rawDescGZIP(), []int{40} } func (x *VerifyPasswordReq) GetEmail() string { @@ -2178,7 +2267,7 @@ type VerifyPasswordResp struct { func (x *VerifyPasswordResp) Reset() { *x = VerifyPasswordResp{} - mi := &file_api_v2_api_proto_msgTypes[40] + mi := &file_api_v2_api_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2190,7 +2279,7 @@ func (x *VerifyPasswordResp) String() string { func (*VerifyPasswordResp) ProtoMessage() {} func (x *VerifyPasswordResp) ProtoReflect() protoreflect.Message { - mi := &file_api_v2_api_proto_msgTypes[40] + mi := &file_api_v2_api_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2203,7 +2292,7 @@ func (x *VerifyPasswordResp) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyPasswordResp.ProtoReflect.Descriptor instead. func (*VerifyPasswordResp) Descriptor() ([]byte, []int) { - return file_api_v2_api_proto_rawDescGZIP(), []int{40} + return file_api_v2_api_proto_rawDescGZIP(), []int{41} } func (x *VerifyPasswordResp) GetVerified() bool { @@ -2224,7 +2313,7 @@ var File_api_v2_api_proto protoreflect.FileDescriptor var file_api_v2_api_proto_rawDesc = string([]byte{ 0x0a, 0x10, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x03, 0x61, 0x70, 0x69, 0x22, 0xc1, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, + 0x74, 0x6f, 0x12, 0x03, 0x61, 0x70, 0x69, 0x22, 0xf0, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, @@ -2236,294 +2325,311 @@ var file_api_v2_api_proto_rawDesc = string([]byte{ 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x22, 0xad, 0x01, 0x0a, 0x0a, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, - 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x22, 0x1e, 0x0a, 0x0c, 0x47, - 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x34, 0x0a, 0x0d, 0x47, - 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x23, 0x0a, 0x06, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x22, 0x36, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x5e, 0x0a, 0x10, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, - 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, - 0x69, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x10, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x9a, 0x01, - 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, - 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, - 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x22, 0x2f, 0x0a, 0x10, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, - 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x0f, 0x0a, 0x0d, 0x4c, - 0x69, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x22, 0x3b, 0x0a, 0x0e, - 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x29, - 0x0a, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x69, 0x0a, 0x08, 0x50, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, - 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, - 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, - 0x65, 0x72, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3b, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, - 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, - 0x73, 0x22, 0x67, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x08, - 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x6e, 0x65, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x75, - 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, - 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x31, 0x0a, 0x12, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x29, 0x0a, - 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, - 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x31, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, + 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, 0xdc, 0x01, 0x0a, 0x0a, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, + 0x65, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x1e, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x34, 0x0a, 0x0d, 0x47, 0x65, 0x74, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, + 0x36, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x5e, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, + 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x10, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x4c, - 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x22, 0x3f, - 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x12, 0x2b, 0x0a, 0x09, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x09, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x22, - 0x5b, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x42, 0x0a, 0x12, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, - 0x65, 0x71, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x22, 0x3c, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, + 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0xc9, 0x01, 0x0a, 0x0f, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, + 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x2f, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, + 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x0f, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x22, 0x3b, 0x0a, 0x0e, 0x4c, 0x69, 0x73, + 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x29, 0x0a, 0x07, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x69, 0x0a, 0x08, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, + 0x64, 0x22, 0x3e, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x22, 0x3b, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x79, - 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, - 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, - 0x6e, 0x65, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x32, 0x0a, 0x13, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x24, 0x0a, - 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, - 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, - 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x12, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x22, 0x43, 0x0a, 0x11, 0x4c, - 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, - 0x22, 0x0c, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x22, 0x37, - 0x0a, 0x0b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x61, 0x70, 0x69, 0x22, 0x0e, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x22, 0xb0, 0x06, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, - 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, - 0x72, 0x12, 0x35, 0x0a, 0x16, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x15, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, - 0x19, 0x0a, 0x08, 0x6a, 0x77, 0x6b, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6a, 0x77, 0x6b, 0x73, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x73, - 0x65, 0x72, 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x75, 0x73, 0x65, 0x72, 0x69, 0x6e, 0x66, 0x6f, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x42, 0x0a, 0x1d, 0x64, 0x65, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b, - 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x16, 0x69, - 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x69, 0x6e, 0x74, - 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x13, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x12, 0x36, 0x0a, 0x17, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x15, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x4f, 0x0a, 0x25, 0x69, 0x64, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x6c, 0x67, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x69, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6c, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x64, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x64, 0x65, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, - 0x67, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x50, 0x0a, - 0x25, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, - 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x21, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6c, 0x61, 0x69, 0x6d, - 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x22, 0x7a, 0x0a, 0x0f, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, - 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x61, 0x73, - 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6c, 0x61, - 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, - 0x64, 0x22, 0x4e, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x52, 0x65, 0x73, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, - 0x65, 0x66, 0x52, 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x73, 0x22, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, - 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x30, 0x0a, 0x11, 0x52, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x45, 0x0a, - 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, - 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4d, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, - 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, - 0x75, 0x6e, 0x64, 0x32, 0x8b, 0x09, 0x0a, 0x03, 0x44, 0x65, 0x78, 0x12, 0x34, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, - 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x12, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, - 0x00, 0x12, 0x3d, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, - 0x12, 0x3d, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, - 0x3d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, - 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x38, - 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x1a, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, - 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, - 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, - 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x50, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x15, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, + 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x67, + 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x77, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x75, 0x73, 0x65, 0x72, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x55, + 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x31, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, + 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x29, 0x0a, 0x11, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x31, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, + 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x22, 0x3f, 0x0a, 0x10, 0x4c, + 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, + 0x2b, 0x0a, 0x09, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x52, 0x09, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x7c, 0x0a, 0x09, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x67, 0x72, 0x61, + 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, + 0x67, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x42, 0x0a, 0x12, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, + 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x3c, + 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, + 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, + 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x2d, 0x0a, 0x0a, + 0x47, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x67, 0x72, + 0x61, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0a, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0xb2, 0x01, 0x0a, 0x12, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, + 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6e, 0x65, + 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0f, 0x6e, 0x65, 0x77, 0x5f, 0x67, + 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x22, 0x32, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x24, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x12, + 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x71, 0x22, 0x43, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x22, 0x37, 0x0a, 0x0b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, + 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x61, 0x70, 0x69, 0x22, 0x0e, + 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x22, 0xb0, + 0x06, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x16, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x77, 0x6b, 0x73, 0x5f, 0x75, + 0x72, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x77, 0x6b, 0x73, 0x55, 0x72, + 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x73, 0x65, 0x72, 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x75, 0x73, + 0x65, 0x72, 0x69, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x42, + 0x0a, 0x1d, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x16, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x67, 0x72, 0x61, + 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, + 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x16, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, + 0x4f, 0x0a, 0x25, 0x69, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, + 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x6c, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x73, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, + 0x69, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6c, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, + 0x67, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x64, 0x65, + 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, + 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x63, 0x6f, + 0x70, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x12, 0x50, 0x0a, 0x25, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x21, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, + 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x22, 0x7a, 0x0a, 0x0f, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0x29, 0x0a, + 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, + 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66, 0x52, 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, + 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, + 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x22, 0x30, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x45, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4d, 0x0a, 0x12, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x32, 0x8b, 0x09, 0x0a, 0x03, 0x44, + 0x65, 0x78, 0x12, 0x34, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, + 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x1a, 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, + 0x43, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3e, + 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x12, + 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x46, + 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, - 0x46, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, + 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x46, + 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, - 0x41, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x73, 0x12, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x1a, 0x10, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, - 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3a, - 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x13, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, - 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x52, 0x65, - 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x15, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, - 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, - 0x00, 0x42, 0x36, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x6f, 0x73, 0x2e, - 0x64, 0x65, 0x78, 0x2e, 0x61, 0x70, 0x69, 0x5a, 0x20, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x78, 0x69, 0x64, 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x1a, + 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x47, 0x65, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x11, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x1a, + 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x12, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, + 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x12, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, + 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x42, 0x36, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x6f, 0x73, 0x2e, 0x64, 0x65, 0x78, 0x2e, 0x61, 0x70, 0x69, 0x5a, 0x20, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x78, 0x69, 0x64, + 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x70, 0x69, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( @@ -2538,7 +2644,7 @@ func file_api_v2_api_proto_rawDescGZIP() []byte { return file_api_v2_api_proto_rawDescData } -var file_api_v2_api_proto_msgTypes = make([]protoimpl.MessageInfo, 41) +var file_api_v2_api_proto_msgTypes = make([]protoimpl.MessageInfo, 42) var file_api_v2_api_proto_goTypes = []any{ (*Client)(nil), // 0: api.Client (*ClientInfo)(nil), // 1: api.ClientInfo @@ -2564,23 +2670,24 @@ var file_api_v2_api_proto_goTypes = []any{ (*Connector)(nil), // 21: api.Connector (*CreateConnectorReq)(nil), // 22: api.CreateConnectorReq (*CreateConnectorResp)(nil), // 23: api.CreateConnectorResp - (*UpdateConnectorReq)(nil), // 24: api.UpdateConnectorReq - (*UpdateConnectorResp)(nil), // 25: api.UpdateConnectorResp - (*DeleteConnectorReq)(nil), // 26: api.DeleteConnectorReq - (*DeleteConnectorResp)(nil), // 27: api.DeleteConnectorResp - (*ListConnectorReq)(nil), // 28: api.ListConnectorReq - (*ListConnectorResp)(nil), // 29: api.ListConnectorResp - (*VersionReq)(nil), // 30: api.VersionReq - (*VersionResp)(nil), // 31: api.VersionResp - (*DiscoveryReq)(nil), // 32: api.DiscoveryReq - (*DiscoveryResp)(nil), // 33: api.DiscoveryResp - (*RefreshTokenRef)(nil), // 34: api.RefreshTokenRef - (*ListRefreshReq)(nil), // 35: api.ListRefreshReq - (*ListRefreshResp)(nil), // 36: api.ListRefreshResp - (*RevokeRefreshReq)(nil), // 37: api.RevokeRefreshReq - (*RevokeRefreshResp)(nil), // 38: api.RevokeRefreshResp - (*VerifyPasswordReq)(nil), // 39: api.VerifyPasswordReq - (*VerifyPasswordResp)(nil), // 40: api.VerifyPasswordResp + (*GrantTypes)(nil), // 24: api.GrantTypes + (*UpdateConnectorReq)(nil), // 25: api.UpdateConnectorReq + (*UpdateConnectorResp)(nil), // 26: api.UpdateConnectorResp + (*DeleteConnectorReq)(nil), // 27: api.DeleteConnectorReq + (*DeleteConnectorResp)(nil), // 28: api.DeleteConnectorResp + (*ListConnectorReq)(nil), // 29: api.ListConnectorReq + (*ListConnectorResp)(nil), // 30: api.ListConnectorResp + (*VersionReq)(nil), // 31: api.VersionReq + (*VersionResp)(nil), // 32: api.VersionResp + (*DiscoveryReq)(nil), // 33: api.DiscoveryReq + (*DiscoveryResp)(nil), // 34: api.DiscoveryResp + (*RefreshTokenRef)(nil), // 35: api.RefreshTokenRef + (*ListRefreshReq)(nil), // 36: api.ListRefreshReq + (*ListRefreshResp)(nil), // 37: api.ListRefreshResp + (*RevokeRefreshReq)(nil), // 38: api.RevokeRefreshReq + (*RevokeRefreshResp)(nil), // 39: api.RevokeRefreshResp + (*VerifyPasswordReq)(nil), // 40: api.VerifyPasswordReq + (*VerifyPasswordResp)(nil), // 41: api.VerifyPasswordResp } var file_api_v2_api_proto_depIdxs = []int32{ 0, // 0: api.GetClientResp.client:type_name -> api.Client @@ -2590,49 +2697,50 @@ var file_api_v2_api_proto_depIdxs = []int32{ 12, // 4: api.CreatePasswordReq.password:type_name -> api.Password 12, // 5: api.ListPasswordResp.passwords:type_name -> api.Password 21, // 6: api.CreateConnectorReq.connector:type_name -> api.Connector - 21, // 7: api.ListConnectorResp.connectors:type_name -> api.Connector - 34, // 8: api.ListRefreshResp.refresh_tokens:type_name -> api.RefreshTokenRef - 2, // 9: api.Dex.GetClient:input_type -> api.GetClientReq - 4, // 10: api.Dex.CreateClient:input_type -> api.CreateClientReq - 8, // 11: api.Dex.UpdateClient:input_type -> api.UpdateClientReq - 6, // 12: api.Dex.DeleteClient:input_type -> api.DeleteClientReq - 10, // 13: api.Dex.ListClients:input_type -> api.ListClientReq - 13, // 14: api.Dex.CreatePassword:input_type -> api.CreatePasswordReq - 15, // 15: api.Dex.UpdatePassword:input_type -> api.UpdatePasswordReq - 17, // 16: api.Dex.DeletePassword:input_type -> api.DeletePasswordReq - 19, // 17: api.Dex.ListPasswords:input_type -> api.ListPasswordReq - 22, // 18: api.Dex.CreateConnector:input_type -> api.CreateConnectorReq - 24, // 19: api.Dex.UpdateConnector:input_type -> api.UpdateConnectorReq - 26, // 20: api.Dex.DeleteConnector:input_type -> api.DeleteConnectorReq - 28, // 21: api.Dex.ListConnectors:input_type -> api.ListConnectorReq - 30, // 22: api.Dex.GetVersion:input_type -> api.VersionReq - 32, // 23: api.Dex.GetDiscovery:input_type -> api.DiscoveryReq - 35, // 24: api.Dex.ListRefresh:input_type -> api.ListRefreshReq - 37, // 25: api.Dex.RevokeRefresh:input_type -> api.RevokeRefreshReq - 39, // 26: api.Dex.VerifyPassword:input_type -> api.VerifyPasswordReq - 3, // 27: api.Dex.GetClient:output_type -> api.GetClientResp - 5, // 28: api.Dex.CreateClient:output_type -> api.CreateClientResp - 9, // 29: api.Dex.UpdateClient:output_type -> api.UpdateClientResp - 7, // 30: api.Dex.DeleteClient:output_type -> api.DeleteClientResp - 11, // 31: api.Dex.ListClients:output_type -> api.ListClientResp - 14, // 32: api.Dex.CreatePassword:output_type -> api.CreatePasswordResp - 16, // 33: api.Dex.UpdatePassword:output_type -> api.UpdatePasswordResp - 18, // 34: api.Dex.DeletePassword:output_type -> api.DeletePasswordResp - 20, // 35: api.Dex.ListPasswords:output_type -> api.ListPasswordResp - 23, // 36: api.Dex.CreateConnector:output_type -> api.CreateConnectorResp - 25, // 37: api.Dex.UpdateConnector:output_type -> api.UpdateConnectorResp - 27, // 38: api.Dex.DeleteConnector:output_type -> api.DeleteConnectorResp - 29, // 39: api.Dex.ListConnectors:output_type -> api.ListConnectorResp - 31, // 40: api.Dex.GetVersion:output_type -> api.VersionResp - 33, // 41: api.Dex.GetDiscovery:output_type -> api.DiscoveryResp - 36, // 42: api.Dex.ListRefresh:output_type -> api.ListRefreshResp - 38, // 43: api.Dex.RevokeRefresh:output_type -> api.RevokeRefreshResp - 40, // 44: api.Dex.VerifyPassword:output_type -> api.VerifyPasswordResp - 27, // [27:45] is the sub-list for method output_type - 9, // [9:27] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 24, // 7: api.UpdateConnectorReq.new_grant_types:type_name -> api.GrantTypes + 21, // 8: api.ListConnectorResp.connectors:type_name -> api.Connector + 35, // 9: api.ListRefreshResp.refresh_tokens:type_name -> api.RefreshTokenRef + 2, // 10: api.Dex.GetClient:input_type -> api.GetClientReq + 4, // 11: api.Dex.CreateClient:input_type -> api.CreateClientReq + 8, // 12: api.Dex.UpdateClient:input_type -> api.UpdateClientReq + 6, // 13: api.Dex.DeleteClient:input_type -> api.DeleteClientReq + 10, // 14: api.Dex.ListClients:input_type -> api.ListClientReq + 13, // 15: api.Dex.CreatePassword:input_type -> api.CreatePasswordReq + 15, // 16: api.Dex.UpdatePassword:input_type -> api.UpdatePasswordReq + 17, // 17: api.Dex.DeletePassword:input_type -> api.DeletePasswordReq + 19, // 18: api.Dex.ListPasswords:input_type -> api.ListPasswordReq + 22, // 19: api.Dex.CreateConnector:input_type -> api.CreateConnectorReq + 25, // 20: api.Dex.UpdateConnector:input_type -> api.UpdateConnectorReq + 27, // 21: api.Dex.DeleteConnector:input_type -> api.DeleteConnectorReq + 29, // 22: api.Dex.ListConnectors:input_type -> api.ListConnectorReq + 31, // 23: api.Dex.GetVersion:input_type -> api.VersionReq + 33, // 24: api.Dex.GetDiscovery:input_type -> api.DiscoveryReq + 36, // 25: api.Dex.ListRefresh:input_type -> api.ListRefreshReq + 38, // 26: api.Dex.RevokeRefresh:input_type -> api.RevokeRefreshReq + 40, // 27: api.Dex.VerifyPassword:input_type -> api.VerifyPasswordReq + 3, // 28: api.Dex.GetClient:output_type -> api.GetClientResp + 5, // 29: api.Dex.CreateClient:output_type -> api.CreateClientResp + 9, // 30: api.Dex.UpdateClient:output_type -> api.UpdateClientResp + 7, // 31: api.Dex.DeleteClient:output_type -> api.DeleteClientResp + 11, // 32: api.Dex.ListClients:output_type -> api.ListClientResp + 14, // 33: api.Dex.CreatePassword:output_type -> api.CreatePasswordResp + 16, // 34: api.Dex.UpdatePassword:output_type -> api.UpdatePasswordResp + 18, // 35: api.Dex.DeletePassword:output_type -> api.DeletePasswordResp + 20, // 36: api.Dex.ListPasswords:output_type -> api.ListPasswordResp + 23, // 37: api.Dex.CreateConnector:output_type -> api.CreateConnectorResp + 26, // 38: api.Dex.UpdateConnector:output_type -> api.UpdateConnectorResp + 28, // 39: api.Dex.DeleteConnector:output_type -> api.DeleteConnectorResp + 30, // 40: api.Dex.ListConnectors:output_type -> api.ListConnectorResp + 32, // 41: api.Dex.GetVersion:output_type -> api.VersionResp + 34, // 42: api.Dex.GetDiscovery:output_type -> api.DiscoveryResp + 37, // 43: api.Dex.ListRefresh:output_type -> api.ListRefreshResp + 39, // 44: api.Dex.RevokeRefresh:output_type -> api.RevokeRefreshResp + 41, // 45: api.Dex.VerifyPassword:output_type -> api.VerifyPasswordResp + 28, // [28:46] is the sub-list for method output_type + 10, // [10:28] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name } func init() { file_api_v2_api_proto_init() } @@ -2646,7 +2754,7 @@ func file_api_v2_api_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_v2_api_proto_rawDesc), len(file_api_v2_api_proto_rawDesc)), NumEnums: 0, - NumMessages: 41, + NumMessages: 42, NumExtensions: 0, NumServices: 1, }, diff --git a/api/v2/api.proto b/api/v2/api.proto index fc14913b..4d5b850e 100644 --- a/api/v2/api.proto +++ b/api/v2/api.proto @@ -14,6 +14,7 @@ message Client { bool public = 5; string name = 6; string logo_url = 7; + repeated string allowed_connectors = 8; } // ClientInfo represents an OAuth2 client without sensitive information. @@ -24,6 +25,7 @@ message ClientInfo { bool public = 4; string name = 5; string logo_url = 6; + repeated string allowed_connectors = 7; } // GetClientReq is a request to retrieve client details. @@ -66,6 +68,7 @@ message UpdateClientReq { repeated string trusted_peers = 3; string name = 4; string logo_url = 5; + repeated string allowed_connectors = 6; } // UpdateClientResp returns the response from updating a client. @@ -140,6 +143,7 @@ message Connector { string type = 2; string name = 3; bytes config = 4; + repeated string grant_types = 5; } // CreateConnectorReq is a request to make a connector. @@ -152,6 +156,12 @@ message CreateConnectorResp { bool already_exists = 1; } +// GrantTypes wraps a list of grant types to distinguish between +// "not specified" (no update) and "empty list" (unrestricted). +message GrantTypes { + repeated string grant_types = 1; +} + // UpdateConnectorReq is a request to modify an existing connector. message UpdateConnectorReq { // The id used to lookup the connector. This field cannot be modified @@ -159,6 +169,10 @@ message UpdateConnectorReq { string new_type = 2; string new_name = 3; bytes new_config = 4; + // If set, updates the connector's allowed grant types. + // An empty grant_types list means unrestricted (all grant types allowed). + // If not set (null), grant types are not modified. + GrantTypes new_grant_types = 5; } // UpdateConnectorResp returns the response from modifying an existing connector. diff --git a/api/v2/go.mod b/api/v2/go.mod index 6c14b7e4..289e3b81 100644 --- a/api/v2/go.mod +++ b/api/v2/go.mod @@ -3,7 +3,7 @@ module github.com/dexidp/dex/api/v2 go 1.24.0 require ( - google.golang.org/grpc v1.79.1 + google.golang.org/grpc v1.79.2 google.golang.org/protobuf v1.36.11 ) diff --git a/api/v2/go.sum b/api/v2/go.sum index 727897fb..e9906532 100644 --- a/api/v2/go.sum +++ b/api/v2/go.sum @@ -32,7 +32,7 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= -google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= -google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= +google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= diff --git a/cmd/dex/config.go b/cmd/dex/config.go index 8861ddef..8efc3c38 100644 --- a/cmd/dex/config.go +++ b/cmd/dex/config.go @@ -1,6 +1,7 @@ package main import ( + "bytes" "encoding/base64" "encoding/json" "fmt" @@ -23,6 +24,15 @@ import ( "github.com/dexidp/dex/storage/sql" ) +func configUnmarshaller(b []byte, v interface{}) error { + if !featureflags.ConfigDisallowUnknownFields.Enabled() { + return json.Unmarshal(b, v) + } + dec := json.NewDecoder(bytes.NewReader(b)) + dec.DisallowUnknownFields() + return dec.Decode(v) +} + // Config is the config format for the main application. type Config struct { Issuer string `json:"issuer"` @@ -89,6 +99,7 @@ func (c Config) Validate() error { checkErrors = append(checkErrors, check.errMsg) } } + if len(checkErrors) != 0 { return fmt.Errorf("invalid Config:\n\t-\t%s", strings.Join(checkErrors, "\n\t-\t")) } @@ -109,7 +120,7 @@ func (p *password) UnmarshalJSON(b []byte) error { HashFromEnv string `json:"hashFromEnv"` Groups []string `json:"groups"` } - if err := json.Unmarshal(b, &data); err != nil { + if err := configUnmarshaller(b, &data); err != nil { return err } *p = password(storage.Password{ @@ -161,6 +172,16 @@ type OAuth2 struct { AlwaysShowLoginScreen bool `json:"alwaysShowLoginScreen"` // This is the connector that can be used for password grant PasswordConnector string `json:"passwordConnector"` + // PKCE configuration + PKCE PKCE `json:"pkce"` +} + +// PKCE holds the PKCE (Proof Key for Code Exchange) configuration. +type PKCE struct { + // If true, PKCE is required for all authorization code flows. + Enforce bool `json:"enforce"` + // Supported code challenge methods. Defaults to ["S256", "plain"]. + CodeChallengeMethodsSupported []string `json:"codeChallengeMethodsSupported"` } // Web is the config format for the HTTP server. @@ -333,7 +354,7 @@ func (s *Storage) UnmarshalJSON(b []byte) error { Type string `json:"type"` Config json.RawMessage `json:"config"` } - if err := json.Unmarshal(b, &store); err != nil { + if err := configUnmarshaller(b, &store); err != nil { return fmt.Errorf("parse storage: %v", err) } f, ok := storages[store.Type] @@ -346,7 +367,7 @@ func (s *Storage) UnmarshalJSON(b []byte) error { data := []byte(store.Config) if featureflags.ExpandEnv.Enabled() { var rawMap map[string]interface{} - if err := json.Unmarshal(store.Config, &rawMap); err != nil { + if err := configUnmarshaller(store.Config, &rawMap); err != nil { return fmt.Errorf("unmarshal config for env expansion: %v", err) } @@ -363,7 +384,7 @@ func (s *Storage) UnmarshalJSON(b []byte) error { data = expandedData } - if err := json.Unmarshal(data, storageConfig); err != nil { + if err := configUnmarshaller(data, storageConfig); err != nil { return fmt.Errorf("parse storage config: %v", err) } } @@ -449,7 +470,8 @@ type Connector struct { Name string `json:"name"` ID string `json:"id"` - Config server.ConnectorConfig `json:"config"` + Config server.ConnectorConfig `json:"config"` + GrantTypes []string `json:"grantTypes"` } // UnmarshalJSON allows Connector to implement the unmarshaler interface to @@ -460,9 +482,10 @@ func (c *Connector) UnmarshalJSON(b []byte) error { Name string `json:"name"` ID string `json:"id"` - Config json.RawMessage `json:"config"` + Config json.RawMessage `json:"config"` + GrantTypes []string `json:"grantTypes"` } - if err := json.Unmarshal(b, &conn); err != nil { + if err := configUnmarshaller(b, &conn); err != nil { return fmt.Errorf("parse connector: %v", err) } f, ok := server.ConnectorsConfig[conn.Type] @@ -475,7 +498,7 @@ func (c *Connector) UnmarshalJSON(b []byte) error { data := []byte(conn.Config) if featureflags.ExpandEnv.Enabled() { var rawMap map[string]interface{} - if err := json.Unmarshal(conn.Config, &rawMap); err != nil { + if err := configUnmarshaller(conn.Config, &rawMap); err != nil { return fmt.Errorf("unmarshal config for env expansion: %v", err) } @@ -492,16 +515,17 @@ func (c *Connector) UnmarshalJSON(b []byte) error { data = expandedData } - if err := json.Unmarshal(data, connConfig); err != nil { + if err := configUnmarshaller(data, connConfig); err != nil { return fmt.Errorf("parse connector config: %v", err) } } *c = Connector{ - Type: conn.Type, - Name: conn.Name, - ID: conn.ID, - Config: connConfig, + Type: conn.Type, + Name: conn.Name, + ID: conn.ID, + Config: connConfig, + GrantTypes: conn.GrantTypes, } return nil } @@ -514,10 +538,11 @@ func ToStorageConnector(c Connector) (storage.Connector, error) { } return storage.Connector{ - ID: c.ID, - Type: c.Type, - Name: c.Name, - Config: data, + ID: c.ID, + Type: c.Type, + Name: c.Name, + Config: data, + GrantTypes: c.GrantTypes, }, nil } @@ -546,6 +571,12 @@ type Logger struct { // Format specifies the format to be used for logging. Format string `json:"format"` + + // ExcludeFields specifies log attribute keys that should be dropped from all + // log output. This is useful for suppressing PII fields like email, username, + // preferred_username, or groups in environments subject to GDPR or similar + // data-handling constraints. + ExcludeFields []string `json:"excludeFields"` } type RefreshToken struct { diff --git a/cmd/dex/config_test.go b/cmd/dex/config_test.go index 1edca7cf..26385f56 100644 --- a/cmd/dex/config_test.go +++ b/cmd/dex/config_test.go @@ -107,6 +107,9 @@ connectors: - type: mockCallback id: mock name: Example + grantTypes: + - authorization_code + - "urn:ietf:params:oauth:grant-type:token-exchange" - type: oidc id: google name: Google @@ -202,6 +205,10 @@ additionalFeatures: [ ID: "mock", Name: "Example", Config: &mock.CallbackConfig{}, + GrantTypes: []string{ + "authorization_code", + "urn:ietf:params:oauth:grant-type:token-exchange", + }, }, { Type: "oidc", diff --git a/cmd/dex/excluding_handler.go b/cmd/dex/excluding_handler.go new file mode 100644 index 00000000..c5d03e44 --- /dev/null +++ b/cmd/dex/excluding_handler.go @@ -0,0 +1,56 @@ +package main + +import ( + "context" + "log/slog" +) + +// excludingHandler is an slog.Handler wrapper that drops log attributes +// whose keys match a configured set. This allows PII fields like email, +// username, or groups to be redacted at the logger level rather than +// requiring per-callsite suppression logic. +type excludingHandler struct { + inner slog.Handler + exclude map[string]bool +} + +func newExcludingHandler(inner slog.Handler, fields []string) slog.Handler { + if len(fields) == 0 { + return inner + } + m := make(map[string]bool, len(fields)) + for _, f := range fields { + m[f] = true + } + return &excludingHandler{inner: inner, exclude: m} +} + +func (h *excludingHandler) Enabled(ctx context.Context, level slog.Level) bool { + return h.inner.Enabled(ctx, level) +} + +func (h *excludingHandler) Handle(ctx context.Context, record slog.Record) error { + // Rebuild the record without excluded attributes. + filtered := slog.NewRecord(record.Time, record.Level, record.Message, record.PC) + record.Attrs(func(a slog.Attr) bool { + if !h.exclude[a.Key] { + filtered.AddAttrs(a) + } + return true + }) + return h.inner.Handle(ctx, filtered) +} + +func (h *excludingHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + var kept []slog.Attr + for _, a := range attrs { + if !h.exclude[a.Key] { + kept = append(kept, a) + } + } + return &excludingHandler{inner: h.inner.WithAttrs(kept), exclude: h.exclude} +} + +func (h *excludingHandler) WithGroup(name string) slog.Handler { + return &excludingHandler{inner: h.inner.WithGroup(name), exclude: h.exclude} +} diff --git a/cmd/dex/excluding_handler_test.go b/cmd/dex/excluding_handler_test.go new file mode 100644 index 00000000..e0306d60 --- /dev/null +++ b/cmd/dex/excluding_handler_test.go @@ -0,0 +1,141 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "log/slog" + "testing" +) + +func TestExcludingHandler(t *testing.T) { + tests := []struct { + name string + exclude []string + logAttrs []slog.Attr + wantKeys []string + absentKeys []string + }{ + { + name: "no exclusions", + exclude: nil, + logAttrs: []slog.Attr{ + slog.String("email", "user@example.com"), + slog.String("connector_id", "github"), + }, + wantKeys: []string{"email", "connector_id"}, + }, + { + name: "exclude email", + exclude: []string{"email"}, + logAttrs: []slog.Attr{ + slog.String("email", "user@example.com"), + slog.String("connector_id", "github"), + }, + wantKeys: []string{"connector_id"}, + absentKeys: []string{"email"}, + }, + { + name: "exclude multiple fields", + exclude: []string{"email", "username", "groups"}, + logAttrs: []slog.Attr{ + slog.String("email", "user@example.com"), + slog.String("username", "johndoe"), + slog.String("connector_id", "github"), + slog.Any("groups", []string{"admin"}), + }, + wantKeys: []string{"connector_id"}, + absentKeys: []string{"email", "username", "groups"}, + }, + { + name: "exclude non-existent field is harmless", + exclude: []string{"nonexistent"}, + logAttrs: []slog.Attr{ + slog.String("email", "user@example.com"), + }, + wantKeys: []string{"email"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + inner := slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo}) + handler := newExcludingHandler(inner, tt.exclude) + logger := slog.New(handler) + + attrs := make([]any, 0, len(tt.logAttrs)*2) + for _, a := range tt.logAttrs { + attrs = append(attrs, a) + } + logger.Info("test message", attrs...) + + var result map[string]any + if err := json.Unmarshal(buf.Bytes(), &result); err != nil { + t.Fatalf("failed to parse log output: %v", err) + } + + for _, key := range tt.wantKeys { + if _, ok := result[key]; !ok { + t.Errorf("expected key %q in log output", key) + } + } + for _, key := range tt.absentKeys { + if _, ok := result[key]; ok { + t.Errorf("expected key %q to be absent from log output", key) + } + } + }) + } +} + +func TestExcludingHandlerWithAttrs(t *testing.T) { + var buf bytes.Buffer + inner := slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo}) + handler := newExcludingHandler(inner, []string{"email"}) + logger := slog.New(handler) + + // Pre-bind an excluded attr via With + child := logger.With("email", "user@example.com", "connector_id", "github") + child.Info("login successful") + + var result map[string]any + if err := json.Unmarshal(buf.Bytes(), &result); err != nil { + t.Fatalf("failed to parse log output: %v", err) + } + + if _, ok := result["email"]; ok { + t.Error("expected email to be excluded from WithAttrs output") + } + if _, ok := result["connector_id"]; !ok { + t.Error("expected connector_id to be present") + } +} + +func TestExcludingHandlerEnabled(t *testing.T) { + inner := slog.NewJSONHandler(&bytes.Buffer{}, &slog.HandlerOptions{Level: slog.LevelWarn}) + handler := newExcludingHandler(inner, []string{"email"}) + + if handler.Enabled(context.Background(), slog.LevelInfo) { + t.Error("expected Info to be disabled when handler level is Warn") + } + if !handler.Enabled(context.Background(), slog.LevelWarn) { + t.Error("expected Warn to be enabled") + } +} + +func TestExcludingHandlerNilFields(t *testing.T) { + var buf bytes.Buffer + inner := slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo}) + + // With nil/empty fields, should return the inner handler directly + handler := newExcludingHandler(inner, nil) + if _, ok := handler.(*excludingHandler); ok { + t.Error("expected nil fields to return inner handler directly, not wrap it") + } + + handler = newExcludingHandler(inner, []string{}) + if _, ok := handler.(*excludingHandler); ok { + t.Error("expected empty fields to return inner handler directly, not wrap it") + } +} diff --git a/cmd/dex/logger.go b/cmd/dex/logger.go index c1fe6b4a..fd4bd294 100644 --- a/cmd/dex/logger.go +++ b/cmd/dex/logger.go @@ -12,7 +12,7 @@ import ( var logFormats = []string{"json", "text"} -func newLogger(level slog.Level, format string) (*slog.Logger, error) { +func newLogger(level slog.Level, format string, excludeFields []string) (*slog.Logger, error) { var handler slog.Handler switch strings.ToLower(format) { case "", "text": @@ -27,6 +27,8 @@ func newLogger(level slog.Level, format string) (*slog.Logger, error) { return nil, fmt.Errorf("log format is not one of the supported values (%s): %s", strings.Join(logFormats, ", "), format) } + handler = newExcludingHandler(handler, excludeFields) + return slog.New(newRequestContextHandler(handler)), nil } diff --git a/cmd/dex/serve.go b/cmd/dex/serve.go index 54d150af..5cc0877a 100644 --- a/cmd/dex/serve.go +++ b/cmd/dex/serve.go @@ -97,13 +97,19 @@ func runServe(options serveOptions) error { } var c Config - if err := yaml.Unmarshal(configData, &c); err != nil { + + jsonConfigData, err := yaml.YAMLToJSON(configData) + if err != nil { return fmt.Errorf("error parse config file %s: %v", configFile, err) } + if err := configUnmarshaller(jsonConfigData, &c); err != nil { + return fmt.Errorf("error unmarshalling config file %s: %v", configFile, err) + } + applyConfigOverrides(options, &c) - logger, err := newLogger(c.Logger.Level, c.Logger.Format) + logger, err := newLogger(c.Logger.Level, c.Logger.Format, c.Logger.ExcludeFields) if err != nil { return fmt.Errorf("invalid config: %v", err) } @@ -249,6 +255,11 @@ func runServe(options serveOptions) error { if c.Config == nil { return fmt.Errorf("invalid config: no config field for connector %q", c.ID) } + for _, gt := range c.GrantTypes { + if !server.ConnectorGrantTypes[gt] { + return fmt.Errorf("invalid config: unknown grant type %q for connector %q", gt, c.ID) + } + } logger.Info("config connector", "connector_id", c.ID) // convert to a storage connector object @@ -351,11 +362,15 @@ func runServe(options serveOptions) error { } serverConfig := server.Config{ - AllowedGrantTypes: c.OAuth2.GrantTypes, - SupportedResponseTypes: c.OAuth2.ResponseTypes, - SkipApprovalScreen: c.OAuth2.SkipApprovalScreen, - AlwaysShowLoginScreen: c.OAuth2.AlwaysShowLoginScreen, - PasswordConnector: c.OAuth2.PasswordConnector, + AllowedGrantTypes: c.OAuth2.GrantTypes, + SupportedResponseTypes: c.OAuth2.ResponseTypes, + SkipApprovalScreen: c.OAuth2.SkipApprovalScreen, + AlwaysShowLoginScreen: c.OAuth2.AlwaysShowLoginScreen, + PasswordConnector: c.OAuth2.PasswordConnector, + PKCE: server.PKCEConfig{ + Enforce: c.OAuth2.PKCE.Enforce, + CodeChallengeMethodsSupported: c.OAuth2.PKCE.CodeChallengeMethodsSupported, + }, Headers: c.Web.Headers.ToHTTPHeader(), AllowedOrigins: c.Web.AllowedOrigins, AllowedHeaders: c.Web.AllowedHeaders, @@ -382,7 +397,7 @@ func runServe(options serveOptions) error { if c.Expiry.DeviceRequests != "" { deviceRequests, err := time.ParseDuration(c.Expiry.DeviceRequests) if err != nil { - return fmt.Errorf("invalid config value %q for device request expiry: %v", c.Expiry.AuthRequests, err) + return fmt.Errorf("invalid config value %q for device request expiry: %v", c.Expiry.DeviceRequests, err) } logger.Info("config device requests", "valid_for", deviceRequests) serverConfig.DeviceRequestsValidFor = deviceRequests @@ -556,7 +571,7 @@ func runServe(options serveOptions) error { grpcListener, err := net.Listen("tcp", c.GRPC.Addr) if err != nil { - return fmt.Errorf("listening (grcp) on %s: %w", c.GRPC.Addr, err) + return fmt.Errorf("listening (grpc) on %s: %w", c.GRPC.Addr, err) } grpcSrv := grpc.NewServer(grpcOptions...) @@ -616,6 +631,9 @@ func applyConfigOverrides(options serveOptions, config *Config) { "urn:ietf:params:oauth:grant-type:device_code", "urn:ietf:params:oauth:grant-type:token-exchange", } + if featureflags.ClientCredentialGrantEnabledByDefault.Enabled() { + config.OAuth2.GrantTypes = append(config.OAuth2.GrantTypes, "client_credentials") + } } } diff --git a/cmd/dex/serve_test.go b/cmd/dex/serve_test.go index 9e214480..12d0c0ff 100644 --- a/cmd/dex/serve_test.go +++ b/cmd/dex/serve_test.go @@ -9,19 +9,19 @@ import ( func TestNewLogger(t *testing.T) { t.Run("JSON", func(t *testing.T) { - logger, err := newLogger(slog.LevelInfo, "json") + logger, err := newLogger(slog.LevelInfo, "json", nil) require.NoError(t, err) require.NotEqual(t, (*slog.Logger)(nil), logger) }) t.Run("Text", func(t *testing.T) { - logger, err := newLogger(slog.LevelError, "text") + logger, err := newLogger(slog.LevelError, "text", nil) require.NoError(t, err) require.NotEqual(t, (*slog.Logger)(nil), logger) }) t.Run("Unknown", func(t *testing.T) { - logger, err := newLogger(slog.LevelError, "gofmt") + logger, err := newLogger(slog.LevelError, "gofmt", nil) require.Error(t, err) require.Equal(t, "log format is not one of the supported values (json, text): gofmt", err.Error()) require.Equal(t, (*slog.Logger)(nil), logger) diff --git a/config.yaml.dist b/config.yaml.dist index e570d7b2..917f8d1f 100644 --- a/config.yaml.dist +++ b/config.yaml.dist @@ -72,6 +72,8 @@ web: # logger: # level: "debug" # format: "text" # can also be "json" +# # Drop these attribute keys from all log output (useful for GDPR/PII suppression). +# # excludeFields: [email, username, preferred_username, groups] # gRPC API configuration # Uncomment this block to enable the gRPC API. @@ -109,6 +111,13 @@ web: # # # Uncomment to use a specific connector for password grants # passwordConnector: local +# +# # PKCE (Proof Key for Code Exchange) configuration +# pkce: +# # If true, PKCE is required for all authorization code flows (OAuth 2.1). +# enforce: false +# # Supported code challenge methods. Defaults to ["S256", "plain"]. +# codeChallengeMethodsSupported: ["S256", "plain"] # Static clients registered in Dex by default. # @@ -134,6 +143,16 @@ web: # - /device/callback # name: 'Static Client for Device Flow' # public: true +# +# # Example of a client restricted to specific connectors +# - id: restricted-client +# secret: restricted-client-secret +# redirectURIs: +# - 'https://app.example.com/callback' +# name: 'Restricted Client' +# allowedConnectors: +# - github +# - google # Connectors are used to authenticate users against upstream identity providers. # diff --git a/connector/atlassiancrowd/atlassiancrowd.go b/connector/atlassiancrowd/atlassiancrowd.go index d3683284..ca922147 100644 --- a/connector/atlassiancrowd/atlassiancrowd.go +++ b/connector/atlassiancrowd/atlassiancrowd.go @@ -87,16 +87,16 @@ func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, erro return &crowdConnector{Config: *c, logger: logger.With(slog.Group("connector", "type", "atlassiancrowd", "id", id))}, nil } -type crowdConnector struct { - Config - logger *slog.Logger -} - var ( _ connector.PasswordConnector = (*crowdConnector)(nil) _ connector.RefreshConnector = (*crowdConnector)(nil) ) +type crowdConnector struct { + Config + logger *slog.Logger +} + type refreshData struct { Username string `json:"username"` } diff --git a/connector/authproxy/authproxy.go b/connector/authproxy/authproxy.go index f3d87fcb..5756a0d4 100644 --- a/connector/authproxy/authproxy.go +++ b/connector/authproxy/authproxy.go @@ -68,6 +68,8 @@ func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, erro }, nil } +var _ connector.CallbackConnector = (*callback)(nil) + // Callback is a connector which returns an identity with the HTTP header // X-Remote-User as verified email. type callback struct { @@ -83,20 +85,20 @@ type callback struct { } // LoginURL returns the URL to redirect the user to login with. -func (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { +func (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, []byte, error) { u, err := url.Parse(callbackURL) if err != nil { - return "", fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err) + return "", nil, fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err) } u.Path += m.pathSuffix v := u.Query() v.Set("state", state) u.RawQuery = v.Encode() - return u.String(), nil + return u.String(), nil, nil } // HandleCallback parses the request and returns the user's identity -func (m *callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) { +func (m *callback) HandleCallback(s connector.Scopes, _ []byte, r *http.Request) (connector.Identity, error) { remoteUser := r.Header.Get(m.userHeader) if remoteUser == "" { return connector.Identity{}, fmt.Errorf("required HTTP header %s is not set", m.userHeader) diff --git a/connector/authproxy/authproxy_test.go b/connector/authproxy/authproxy_test.go index fbdd2a5d..bd8b4f36 100644 --- a/connector/authproxy/authproxy_test.go +++ b/connector/authproxy/authproxy_test.go @@ -36,7 +36,7 @@ func TestUser(t *testing.T) { "X-Remote-User": {testUsername}, } - ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req) + ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req) expectNil(t, err) // If not specified, the userID and email should fall back to the remote user @@ -62,7 +62,7 @@ func TestExtraHeaders(t *testing.T) { "X-Remote-User-Email": {testEmail}, } - ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req) + ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req) expectNil(t, err) expectEquals(t, ident.UserID, testUserID) @@ -85,7 +85,7 @@ func TestSingleGroup(t *testing.T) { "X-Remote-Group": {testGroup1}, } - ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req) + ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req) expectNil(t, err) expectEquals(t, ident.UserID, testEmail) @@ -106,7 +106,7 @@ func TestMultipleGroup(t *testing.T) { "X-Remote-Group": {testGroup1 + ", " + testGroup2 + ", " + testGroup3 + ", " + testGroup4}, } - ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req) + ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req) expectNil(t, err) expectEquals(t, ident.UserID, testEmail) @@ -132,7 +132,7 @@ func TestMultipleGroupWithCustomSeparator(t *testing.T) { "X-Remote-Group": {testGroup1 + ";" + testGroup2 + ";" + testGroup3 + ";" + testGroup4}, } - ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req) + ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req) expectNil(t, err) expectEquals(t, ident.UserID, testEmail) @@ -158,7 +158,7 @@ func TestStaticGroup(t *testing.T) { "X-Remote-Group": {testGroup1 + ", " + testGroup2 + ", " + testGroup3 + ", " + testGroup4}, } - ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, req) + ident, err := callback.HandleCallback(connector.Scopes{OfflineAccess: true, Groups: true}, nil, req) expectNil(t, err) expectEquals(t, ident.UserID, testEmail) diff --git a/connector/github/github.go b/connector/github/github.go index eb19f778..0712d3b9 100644 --- a/connector/github/github.go +++ b/connector/github/github.go @@ -28,6 +28,8 @@ const ( // GitHub requires this scope to access '/user/teams' and '/orgs' API endpoints // which are used when a client includes the 'groups' scope. scopeOrgs = "read:org" + // githubAPIVersion pins the GitHub REST API version used in requests. + githubAPIVersion = "2022-11-28" ) // Pagination URL patterns @@ -462,6 +464,7 @@ func get(ctx context.Context, client *http.Client, apiURL string, v interface{}) return "", fmt.Errorf("github: new req: %v", err) } req = req.WithContext(ctx) + req.Header.Set("X-GitHub-Api-Version", githubAPIVersion) resp, err := client.Do(req) if err != nil { return "", fmt.Errorf("github: get URL %v", err) @@ -659,6 +662,7 @@ func (c *githubConnector) userInOrg(ctx context.Context, client *http.Client, us return false, fmt.Errorf("github: new req: %v", err) } req = req.WithContext(ctx) + req.Header.Set("X-GitHub-Api-Version", githubAPIVersion) resp, err := client.Do(req) if err != nil { return false, fmt.Errorf("github: get teams: %v", err) diff --git a/connector/github/github_test.go b/connector/github/github_test.go index 543cfc89..de351496 100644 --- a/connector/github/github_test.go +++ b/connector/github/github_test.go @@ -485,6 +485,21 @@ func Test_Open_PreferredDomainConfig(t *testing.T) { } } +func TestGetSendsAPIVersionHeader(t *testing.T) { + var gotHeader string + s := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotHeader = r.Header.Get("X-GitHub-Api-Version") + w.Header().Add("Content-Type", "application/json") + json.NewEncoder(w).Encode([]org{}) + })) + defer s.Close() + + var result []org + _, err := get(context.Background(), newClient(), s.URL+"/user/orgs", &result) + expectNil(t, err) + expectEquals(t, gotHeader, githubAPIVersion) +} + func newTestServer(responses map[string]testResponse) *httptest.Server { var s *httptest.Server s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/connector/gitlab/gitlab.go b/connector/gitlab/gitlab.go index a4ccf738..b9fb3bec 100644 --- a/connector/gitlab/gitlab.go +++ b/connector/gitlab/gitlab.go @@ -87,8 +87,9 @@ type connectorData struct { } var ( - _ connector.CallbackConnector = (*gitlabConnector)(nil) - _ connector.RefreshConnector = (*gitlabConnector)(nil) + _ connector.CallbackConnector = (*gitlabConnector)(nil) + _ connector.RefreshConnector = (*gitlabConnector)(nil) + _ connector.TokenIdentityConnector = (*gitlabConnector)(nil) ) type gitlabConnector struct { @@ -243,6 +244,34 @@ func (c *gitlabConnector) Refresh(ctx context.Context, s connector.Scopes, ident } } +// TokenIdentity is used for token exchange, verifying a GitLab access token +// and returning the associated user identity. This enables direct authentication +// with Dex using an existing GitLab token without going through the OAuth flow. +// +// Note: The connector decides whether to fetch groups based on its configuration +// (groups filter, getGroupsPermission), not on the scopes from the token exchange request. +// The server will then decide whether to include groups in the final token based on +// the requested scopes. This matches the behavior of other connectors (e.g., OIDC). +func (c *gitlabConnector) TokenIdentity(ctx context.Context, _, subjectToken string) (connector.Identity, error) { + if c.httpClient != nil { + ctx = context.WithValue(ctx, oauth2.HTTPClient, c.httpClient) + } + + token := &oauth2.Token{ + AccessToken: subjectToken, + TokenType: "Bearer", // GitLab tokens are typically Bearer tokens even if the type is not explicitly provided. + } + + // For token exchange, we determine if groups should be fetched based on connector configuration. + // If the connector has groups filter or getGroupsPermission enabled, we fetch groups. + scopes := connector.Scopes{ + // Scopes are not provided in token exchange, so we request groups every time and return only if configured. + Groups: true, + } + + return c.identity(ctx, scopes, token) +} + func (c *gitlabConnector) groupsRequired(groupScope bool) bool { return len(c.groups) > 0 || groupScope } diff --git a/connector/gitlab/gitlab_test.go b/connector/gitlab/gitlab_test.go index 28ff2643..92614643 100644 --- a/connector/gitlab/gitlab_test.go +++ b/connector/gitlab/gitlab_test.go @@ -485,3 +485,88 @@ func expectEquals(t *testing.T, a interface{}, b interface{}) { t.Errorf("Expected %+v to equal %+v", a, b) } } + +func TestTokenIdentity(t *testing.T) { + // Note: These tests verify that the connector returns groups based on its configuration. + // The actual inclusion of groups in the final Dex token depends on the 'groups' scope + // in the token exchange request, which is handled by the Dex server, not the connector. + tests := []struct { + name string + userInfo userInfo + groups []string + getGroupsPermission bool + useLoginAsID bool + expectUserID string + expectGroups []string + }{ + { + name: "without groups config", + expectUserID: "12345678", + expectGroups: nil, + }, + { + name: "with groups filter", + userInfo: userInfo{ + Groups: []string{"team-1", "team-2"}, + }, + groups: []string{"team-1"}, + expectUserID: "12345678", + expectGroups: []string{"team-1"}, + }, + { + name: "with groups permission", + userInfo: userInfo{ + Groups: []string{"ops", "dev"}, + OwnerPermission: []string{"ops"}, + DeveloperPermission: []string{"dev"}, + MaintainerPermission: []string{}, + }, + getGroupsPermission: true, + expectUserID: "12345678", + expectGroups: []string{"ops", "dev", "ops:owner", "dev:developer"}, + }, + { + name: "with useLoginAsID", + useLoginAsID: true, + expectUserID: "joebloggs", + expectGroups: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + responses := map[string]interface{}{ + "/api/v4/user": gitlabUser{ + Email: "some@email.com", + ID: 12345678, + Name: "Joe Bloggs", + Username: "joebloggs", + }, + "/oauth/userinfo": tc.userInfo, + } + + s := newTestServer(responses) + defer s.Close() + + c := gitlabConnector{ + baseURL: s.URL, + httpClient: newClient(), + groups: tc.groups, + getGroupsPermission: tc.getGroupsPermission, + useLoginAsID: tc.useLoginAsID, + } + + accessToken := "test-access-token" + ctx := context.Background() + identity, err := c.TokenIdentity(ctx, "urn:ietf:params:oauth:token-type:access_token", accessToken) + + expectNil(t, err) + expectEquals(t, identity.UserID, tc.expectUserID) + expectEquals(t, identity.Username, "Joe Bloggs") + expectEquals(t, identity.PreferredUsername, "joebloggs") + expectEquals(t, identity.Email, "some@email.com") + expectEquals(t, identity.EmailVerified, true) + expectEquals(t, identity.Groups, tc.expectGroups) + }) + } +} diff --git a/connector/keystone/keystone.go b/connector/keystone/keystone.go index cdfdb558..7d3084b2 100644 --- a/connector/keystone/keystone.go +++ b/connector/keystone/keystone.go @@ -15,6 +15,11 @@ import ( "github.com/dexidp/dex/connector" ) +var ( + _ connector.PasswordConnector = (*conn)(nil) + _ connector.RefreshConnector = (*conn)(nil) +) + type conn struct { Domain domainKeystone Host string @@ -103,11 +108,6 @@ type userResponse struct { } `json:"user"` } -var ( - _ connector.PasswordConnector = &conn{} - _ connector.RefreshConnector = &conn{} -) - // Open returns an authentication strategy using Keystone. func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, error) { _, err := uuid.Parse(c.Domain) diff --git a/connector/ldap/ldap.go b/connector/ldap/ldap.go index 9ac993c4..4cb7180e 100644 --- a/connector/ldap/ldap.go +++ b/connector/ldap/ldap.go @@ -301,6 +301,11 @@ func (c *Config) openConnector(logger *slog.Logger) (*ldapConnector, error) { return &ldapConnector{*c, userSearchScope, groupSearchScope, tlsConfig, logger}, nil } +var ( + _ connector.PasswordConnector = (*ldapConnector)(nil) + _ connector.RefreshConnector = (*ldapConnector)(nil) +) + type ldapConnector struct { Config @@ -312,11 +317,6 @@ type ldapConnector struct { logger *slog.Logger } -var ( - _ connector.PasswordConnector = (*ldapConnector)(nil) - _ connector.RefreshConnector = (*ldapConnector)(nil) -) - // do initializes a connection to the LDAP directory and passes it to the // provided function. It then performs appropriate teardown or reuse before // returning. diff --git a/connector/linkedin/linkedin.go b/connector/linkedin/linkedin.go index 0c24ff47..32e33aea 100644 --- a/connector/linkedin/linkedin.go +++ b/connector/linkedin/linkedin.go @@ -49,18 +49,16 @@ type connectorData struct { AccessToken string `json:"accessToken"` } -type linkedInConnector struct { - oauth2Config *oauth2.Config - logger *slog.Logger -} - -// LinkedIn doesn't provide refresh tokens, so refresh tokens issued by Dex -// will expire in 60 days (default LinkedIn token lifetime). var ( _ connector.CallbackConnector = (*linkedInConnector)(nil) _ connector.RefreshConnector = (*linkedInConnector)(nil) ) +type linkedInConnector struct { + oauth2Config *oauth2.Config + logger *slog.Logger +} + // LoginURL returns an access token request URL func (c *linkedInConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, []byte, error) { if c.oauth2Config.RedirectURL != callbackURL { diff --git a/connector/mock/connectortest.go b/connector/mock/connectortest.go index be44bfd1..4d9e9e27 100644 --- a/connector/mock/connectortest.go +++ b/connector/mock/connectortest.go @@ -29,10 +29,9 @@ func NewCallbackConnector(logger *slog.Logger) connector.Connector { } var ( - _ connector.CallbackConnector = &Callback{} - - _ connector.PasswordConnector = passwordConnector{} - _ connector.RefreshConnector = passwordConnector{} + _ connector.CallbackConnector = &Callback{} + _ connector.RefreshConnector = &Callback{} + _ connector.TokenIdentityConnector = &Callback{} ) // Callback is a connector that requires no user interaction and always returns the same identity. @@ -97,6 +96,11 @@ func (c *PasswordConfig) Open(id string, logger *slog.Logger) (connector.Connect return &passwordConnector{c.Username, c.Password, logger}, nil } +var ( + _ connector.PasswordConnector = passwordConnector{} + _ connector.RefreshConnector = passwordConnector{} +) + type passwordConnector struct { username string password string diff --git a/connector/oauth/oauth.go b/connector/oauth/oauth.go index 413a813a..2ae13a69 100644 --- a/connector/oauth/oauth.go +++ b/connector/oauth/oauth.go @@ -16,6 +16,8 @@ import ( "github.com/dexidp/dex/pkg/httpclient" ) +var _ connector.CallbackConnector = (*oauthConnector)(nil) + type oauthConnector struct { clientID string clientSecret string @@ -116,9 +118,9 @@ func (c *Config) Open(id string, logger *slog.Logger) (connector.Connector, erro return oauthConn, err } -func (c *oauthConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { +func (c *oauthConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, []byte, error) { if c.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) + return "", nil, fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) } oauth2Config := &oauth2.Config{ @@ -129,10 +131,10 @@ func (c *oauthConnector) LoginURL(scopes connector.Scopes, callbackURL, state st Scopes: c.scopes, } - return oauth2Config.AuthCodeURL(state), nil + return oauth2Config.AuthCodeURL(state), nil, nil } -func (c *oauthConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { +func (c *oauthConnector) HandleCallback(s connector.Scopes, _ []byte, r *http.Request) (identity connector.Identity, err error) { q := r.URL.Query() if errType := q.Get("error"); errType != "" { return identity, errors.New(q.Get("error_description")) diff --git a/connector/oauth/oauth_test.go b/connector/oauth/oauth_test.go index 2f6b0b95..cdd2d3c6 100644 --- a/connector/oauth/oauth_test.go +++ b/connector/oauth/oauth_test.go @@ -50,7 +50,7 @@ func TestLoginURL(t *testing.T) { conn := newConnector(t, testServer.URL) - loginURL, err := conn.LoginURL(connector.Scopes{}, conn.redirectURI, "some-state") + loginURL, _, err := conn.LoginURL(connector.Scopes{}, conn.redirectURI, "some-state") assert.Equal(t, err, nil) expectedURL, err := url.Parse(testServer.URL + "/authorize") @@ -86,7 +86,7 @@ func TestHandleCallBackForGroupsInUserInfo(t *testing.T) { conn := newConnector(t, testServer.URL) req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallBackForGroupsInUserInfo") - identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req) + identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req) assert.Equal(t, err, nil) sort.Strings(identity.Groups) @@ -122,7 +122,7 @@ func TestHandleCallBackForGroupMapsInUserInfo(t *testing.T) { conn := newConnector(t, testServer.URL) req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallBackForGroupMapsInUserInfo") - identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req) + identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req) assert.Equal(t, err, nil) sort.Strings(identity.Groups) @@ -156,7 +156,7 @@ func TestHandleCallBackForGroupsInToken(t *testing.T) { conn := newConnector(t, testServer.URL) req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallBackForGroupsInToken") - identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req) + identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req) assert.Equal(t, err, nil) assert.Equal(t, len(identity.Groups), 1) @@ -186,7 +186,7 @@ func TestHandleCallbackForNumericUserID(t *testing.T) { conn := newConnector(t, testServer.URL) req := newRequestWithAuthCode(t, testServer.URL, "TestHandleCallbackForNumericUserID") - identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, req) + identity, err := conn.HandleCallback(connector.Scopes{Groups: true}, nil, req) assert.Equal(t, err, nil) assert.Equal(t, identity.UserID, "1000") diff --git a/connector/oidc/oidc.go b/connector/oidc/oidc.go index 879d82d7..8e1fe724 100644 --- a/connector/oidc/oidc.go +++ b/connector/oidc/oidc.go @@ -379,8 +379,9 @@ func (c *Config) Open(id string, logger *slog.Logger) (conn connector.Connector, } var ( - _ connector.CallbackConnector = (*oidcConnector)(nil) - _ connector.RefreshConnector = (*oidcConnector)(nil) + _ connector.CallbackConnector = (*oidcConnector)(nil) + _ connector.RefreshConnector = (*oidcConnector)(nil) + _ connector.TokenIdentityConnector = (*oidcConnector)(nil) ) type oidcConnector struct { diff --git a/connector/saml/saml.go b/connector/saml/saml.go index 3e44b477..8ef434b6 100644 --- a/connector/saml/saml.go +++ b/connector/saml/saml.go @@ -3,8 +3,10 @@ package saml import ( "bytes" + "context" "crypto/x509" "encoding/base64" + "encoding/json" "encoding/pem" "encoding/xml" "fmt" @@ -230,6 +232,11 @@ func (c *Config) openConnector(logger *slog.Logger) (*provider, error) { return p, nil } +var ( + _ connector.SAMLConnector = (*provider)(nil) + _ connector.RefreshConnector = (*provider)(nil) +) + type provider struct { entityIssuer string ssoIssuer string @@ -255,6 +262,36 @@ type provider struct { logger *slog.Logger } +// cachedIdentity stores the identity from SAML assertion for refresh token support. +// Since SAML has no native refresh mechanism, we cache the identity obtained during +// the initial authentication and return it on subsequent refresh requests. +type cachedIdentity struct { + UserID string `json:"userId"` + Username string `json:"username"` + PreferredUsername string `json:"preferredUsername"` + Email string `json:"email"` + EmailVerified bool `json:"emailVerified"` + Groups []string `json:"groups,omitempty"` +} + +// marshalCachedIdentity serializes the identity into ConnectorData for refresh token support. +func marshalCachedIdentity(ident connector.Identity) (connector.Identity, error) { + ci := cachedIdentity{ + UserID: ident.UserID, + Username: ident.Username, + PreferredUsername: ident.PreferredUsername, + Email: ident.Email, + EmailVerified: ident.EmailVerified, + Groups: ident.Groups, + } + connectorData, err := json.Marshal(ci) + if err != nil { + return ident, fmt.Errorf("saml: failed to marshal cached identity: %v", err) + } + ident.ConnectorData = connectorData + return ident, nil +} + func (p *provider) POSTData(s connector.Scopes, id string) (action, value string, err error) { r := &authnRequest{ ProtocolBinding: bindingPOST, @@ -405,7 +442,7 @@ func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo str if len(p.allowedGroups) == 0 && (!s.Groups || p.groupsAttr == "") { // Groups not requested or not configured. We're done. - return ident, nil + return marshalCachedIdentity(ident) } if len(p.allowedGroups) > 0 && (!s.Groups || p.groupsAttr == "") { @@ -431,7 +468,7 @@ func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo str if len(p.allowedGroups) == 0 { // No allowed groups set, just return the ident - return ident, nil + return marshalCachedIdentity(ident) } // Look for membership in one of the allowed groups @@ -447,6 +484,35 @@ func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo str } // Otherwise, we're good + return marshalCachedIdentity(ident) +} + +// Refresh implements connector.RefreshConnector. +// Since SAML has no native refresh mechanism, this method returns the cached +// identity from the initial SAML assertion stored in ConnectorData. +func (p *provider) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { + if len(ident.ConnectorData) == 0 { + return ident, fmt.Errorf("saml: no connector data available for refresh") + } + + var ci cachedIdentity + if err := json.Unmarshal(ident.ConnectorData, &ci); err != nil { + return ident, fmt.Errorf("saml: failed to unmarshal cached identity: %v", err) + } + + ident.UserID = ci.UserID + ident.Username = ci.Username + ident.PreferredUsername = ci.PreferredUsername + ident.Email = ci.Email + ident.EmailVerified = ci.EmailVerified + + // Only populate groups if the client requested the groups scope. + if s.Groups { + ident.Groups = ci.Groups + } else { + ident.Groups = nil + } + return ident, nil } diff --git a/connector/saml/saml_test.go b/connector/saml/saml_test.go index 03e891fe..3eba5cf8 100644 --- a/connector/saml/saml_test.go +++ b/connector/saml/saml_test.go @@ -1,8 +1,10 @@ package saml import ( + "context" "crypto/x509" "encoding/base64" + "encoding/json" "encoding/pem" "errors" "log/slog" @@ -448,6 +450,24 @@ func (r responseTest) run(t *testing.T) { } sort.Strings(ident.Groups) sort.Strings(r.wantIdent.Groups) + + // Verify ConnectorData contains valid cached identity, then clear it + // for the main identity comparison (ConnectorData is an implementation + // detail of refresh token support). + if len(ident.ConnectorData) > 0 { + var ci cachedIdentity + if err := json.Unmarshal(ident.ConnectorData, &ci); err != nil { + t.Fatalf("failed to unmarshal ConnectorData: %v", err) + } + if ci.UserID != ident.UserID { + t.Errorf("cached identity UserID mismatch: got %q, want %q", ci.UserID, ident.UserID) + } + if ci.Email != ident.Email { + t.Errorf("cached identity Email mismatch: got %q, want %q", ci.Email, ident.Email) + } + } + ident.ConnectorData = nil + if diff := pretty.Compare(ident, r.wantIdent); diff != "" { t.Error(diff) } @@ -589,3 +609,310 @@ func TestVerifySignedMessageAndSignedAssertion(t *testing.T) { func TestVerifyUnsignedMessageAndUnsignedAssertion(t *testing.T) { runVerify(t, "testdata/idp-cert.pem", "testdata/idp-resp.xml", false) } + +func TestSAMLRefresh(t *testing.T) { + // Create a provider using the same pattern as existing tests. + c := Config{ + CA: "testdata/ca.crt", + UsernameAttr: "Name", + EmailAttr: "email", + GroupsAttr: "groups", + RedirectURI: "http://127.0.0.1:5556/dex/callback", + SSOURL: "http://foo.bar/", + } + + conn, err := c.openConnector(slog.New(slog.DiscardHandler)) + if err != nil { + t.Fatal(err) + } + + t.Run("SuccessfulRefresh", func(t *testing.T) { + ci := cachedIdentity{ + UserID: "test-user-id", + Username: "testuser", + PreferredUsername: "testuser", + Email: "test@example.com", + EmailVerified: true, + Groups: []string{"group1", "group2"}, + } + connectorData, err := json.Marshal(ci) + if err != nil { + t.Fatal(err) + } + + ident := connector.Identity{ + UserID: "old-id", + Username: "old-name", + ConnectorData: connectorData, + } + + refreshed, err := conn.Refresh(context.Background(), connector.Scopes{Groups: true}, ident) + if err != nil { + t.Fatalf("Refresh failed: %v", err) + } + + if refreshed.UserID != "test-user-id" { + t.Errorf("expected UserID %q, got %q", "test-user-id", refreshed.UserID) + } + if refreshed.Username != "testuser" { + t.Errorf("expected Username %q, got %q", "testuser", refreshed.Username) + } + if refreshed.PreferredUsername != "testuser" { + t.Errorf("expected PreferredUsername %q, got %q", "testuser", refreshed.PreferredUsername) + } + if refreshed.Email != "test@example.com" { + t.Errorf("expected Email %q, got %q", "test@example.com", refreshed.Email) + } + if !refreshed.EmailVerified { + t.Error("expected EmailVerified to be true") + } + if len(refreshed.Groups) != 2 || refreshed.Groups[0] != "group1" || refreshed.Groups[1] != "group2" { + t.Errorf("expected groups [group1, group2], got %v", refreshed.Groups) + } + // ConnectorData should be preserved through refresh + if len(refreshed.ConnectorData) == 0 { + t.Error("expected ConnectorData to be preserved") + } + }) + + t.Run("RefreshPreservesConnectorData", func(t *testing.T) { + ci := cachedIdentity{ + UserID: "user-123", + Username: "alice", + Email: "alice@example.com", + EmailVerified: true, + } + connectorData, err := json.Marshal(ci) + if err != nil { + t.Fatal(err) + } + + ident := connector.Identity{ + UserID: "old-id", + ConnectorData: connectorData, + } + + refreshed, err := conn.Refresh(context.Background(), connector.Scopes{}, ident) + if err != nil { + t.Fatalf("Refresh failed: %v", err) + } + + // Verify the refreshed identity can be refreshed again (round-trip) + var roundTrip cachedIdentity + if err := json.Unmarshal(refreshed.ConnectorData, &roundTrip); err != nil { + t.Fatalf("failed to unmarshal ConnectorData after refresh: %v", err) + } + if roundTrip.UserID != "user-123" { + t.Errorf("round-trip UserID mismatch: got %q, want %q", roundTrip.UserID, "user-123") + } + }) + + t.Run("EmptyConnectorData", func(t *testing.T) { + ident := connector.Identity{ + UserID: "test-id", + ConnectorData: nil, + } + _, err := conn.Refresh(context.Background(), connector.Scopes{}, ident) + if err == nil { + t.Error("expected error for empty ConnectorData") + } + }) + + t.Run("InvalidJSON", func(t *testing.T) { + ident := connector.Identity{ + UserID: "test-id", + ConnectorData: []byte("not-json"), + } + _, err := conn.Refresh(context.Background(), connector.Scopes{}, ident) + if err == nil { + t.Error("expected error for invalid JSON") + } + }) + + t.Run("HandlePOSTThenRefresh", func(t *testing.T) { + // Full integration: HandlePOST → get ConnectorData → Refresh → verify identity + now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z") + if err != nil { + t.Fatal(err) + } + conn.now = func() time.Time { return now } + + resp, err := os.ReadFile("testdata/good-resp.xml") + if err != nil { + t.Fatal(err) + } + samlResp := base64.StdEncoding.EncodeToString(resp) + + scopes := connector.Scopes{ + OfflineAccess: true, + Groups: true, + } + ident, err := conn.HandlePOST(scopes, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m") + if err != nil { + t.Fatalf("HandlePOST failed: %v", err) + } + + if len(ident.ConnectorData) == 0 { + t.Fatal("expected ConnectorData to be set after HandlePOST") + } + + // Now refresh using the ConnectorData from HandlePOST + refreshed, err := conn.Refresh(context.Background(), scopes, ident) + if err != nil { + t.Fatalf("Refresh failed: %v", err) + } + + if refreshed.UserID != ident.UserID { + t.Errorf("UserID mismatch: got %q, want %q", refreshed.UserID, ident.UserID) + } + if refreshed.Username != ident.Username { + t.Errorf("Username mismatch: got %q, want %q", refreshed.Username, ident.Username) + } + if refreshed.Email != ident.Email { + t.Errorf("Email mismatch: got %q, want %q", refreshed.Email, ident.Email) + } + if refreshed.EmailVerified != ident.EmailVerified { + t.Errorf("EmailVerified mismatch: got %v, want %v", refreshed.EmailVerified, ident.EmailVerified) + } + sort.Strings(refreshed.Groups) + sort.Strings(ident.Groups) + if len(refreshed.Groups) != len(ident.Groups) { + t.Errorf("Groups length mismatch: got %d, want %d", len(refreshed.Groups), len(ident.Groups)) + } + for i := range ident.Groups { + if i < len(refreshed.Groups) && refreshed.Groups[i] != ident.Groups[i] { + t.Errorf("Groups[%d] mismatch: got %q, want %q", i, refreshed.Groups[i], ident.Groups[i]) + } + } + }) + + t.Run("HandlePOSTThenDoubleRefresh", func(t *testing.T) { + // Verify that refresh tokens can be chained: HandlePOST → Refresh → Refresh + now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z") + if err != nil { + t.Fatal(err) + } + conn.now = func() time.Time { return now } + + resp, err := os.ReadFile("testdata/good-resp.xml") + if err != nil { + t.Fatal(err) + } + samlResp := base64.StdEncoding.EncodeToString(resp) + + scopes := connector.Scopes{OfflineAccess: true, Groups: true} + ident, err := conn.HandlePOST(scopes, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m") + if err != nil { + t.Fatalf("HandlePOST failed: %v", err) + } + + // First refresh + refreshed1, err := conn.Refresh(context.Background(), scopes, ident) + if err != nil { + t.Fatalf("first Refresh failed: %v", err) + } + if len(refreshed1.ConnectorData) == 0 { + t.Fatal("expected ConnectorData after first refresh") + } + + // Second refresh using output of first refresh + refreshed2, err := conn.Refresh(context.Background(), scopes, refreshed1) + if err != nil { + t.Fatalf("second Refresh failed: %v", err) + } + + // All fields should match original + if refreshed2.UserID != ident.UserID { + t.Errorf("UserID mismatch after double refresh: got %q, want %q", refreshed2.UserID, ident.UserID) + } + if refreshed2.Email != ident.Email { + t.Errorf("Email mismatch after double refresh: got %q, want %q", refreshed2.Email, ident.Email) + } + if refreshed2.Username != ident.Username { + t.Errorf("Username mismatch after double refresh: got %q, want %q", refreshed2.Username, ident.Username) + } + }) + + t.Run("HandlePOSTWithAssertionSignedThenRefresh", func(t *testing.T) { + // Test with assertion-signed.xml (signature on assertion, not response) + now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z") + if err != nil { + t.Fatal(err) + } + conn.now = func() time.Time { return now } + + resp, err := os.ReadFile("testdata/assertion-signed.xml") + if err != nil { + t.Fatal(err) + } + samlResp := base64.StdEncoding.EncodeToString(resp) + + scopes := connector.Scopes{OfflineAccess: true, Groups: true} + ident, err := conn.HandlePOST(scopes, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m") + if err != nil { + t.Fatalf("HandlePOST with assertion-signed failed: %v", err) + } + + if len(ident.ConnectorData) == 0 { + t.Fatal("expected ConnectorData after HandlePOST with assertion-signed") + } + + refreshed, err := conn.Refresh(context.Background(), scopes, ident) + if err != nil { + t.Fatalf("Refresh after assertion-signed HandlePOST failed: %v", err) + } + + if refreshed.Email != ident.Email { + t.Errorf("Email mismatch: got %q, want %q", refreshed.Email, ident.Email) + } + if refreshed.Username != ident.Username { + t.Errorf("Username mismatch: got %q, want %q", refreshed.Username, ident.Username) + } + }) + + t.Run("HandlePOSTRefreshWithoutGroupsScope", func(t *testing.T) { + // Verify that groups are NOT returned when groups scope is not requested during refresh + now, err := time.Parse(timeFormat, "2017-04-04T04:34:59.330Z") + if err != nil { + t.Fatal(err) + } + conn.now = func() time.Time { return now } + + resp, err := os.ReadFile("testdata/good-resp.xml") + if err != nil { + t.Fatal(err) + } + samlResp := base64.StdEncoding.EncodeToString(resp) + + // Initial auth WITH groups + scopesWithGroups := connector.Scopes{OfflineAccess: true, Groups: true} + ident, err := conn.HandlePOST(scopesWithGroups, samlResp, "6zmm5mguyebwvajyf2sdwwcw6m") + if err != nil { + t.Fatalf("HandlePOST failed: %v", err) + } + if len(ident.Groups) == 0 { + t.Fatal("expected groups in initial identity") + } + + // Refresh WITHOUT groups scope + scopesNoGroups := connector.Scopes{OfflineAccess: true, Groups: false} + refreshed, err := conn.Refresh(context.Background(), scopesNoGroups, ident) + if err != nil { + t.Fatalf("Refresh failed: %v", err) + } + + if len(refreshed.Groups) != 0 { + t.Errorf("expected no groups when groups scope not requested, got %v", refreshed.Groups) + } + + // Refresh WITH groups scope — groups should be back + refreshedWithGroups, err := conn.Refresh(context.Background(), scopesWithGroups, ident) + if err != nil { + t.Fatalf("Refresh with groups failed: %v", err) + } + + if len(refreshedWithGroups.Groups) == 0 { + t.Error("expected groups when groups scope is requested") + } + }) +} diff --git a/docker-compose.override.yaml.dist b/docker-compose.override.yaml.dist index b9eefac5..30591add 100644 --- a/docker-compose.override.yaml.dist +++ b/docker-compose.override.yaml.dist @@ -5,6 +5,10 @@ services: ports: - "127.0.0.1:3306:3306" + mysql8: + ports: + - "127.0.0.1:3307:3306" + postgres: ports: - "127.0.0.1:5432:5432" diff --git a/docker-compose.yaml b/docker-compose.yaml index cfaf739c..6c5a052a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -17,6 +17,15 @@ services: MYSQL_PASSWORD: mysql MYSQL_ROOT_PASSWORD: root + mysql8: + image: mysql:8.0 + command: --default-authentication-plugin=mysql_native_password + environment: + MYSQL_DATABASE: dex + MYSQL_USER: mysql + MYSQL_PASSWORD: mysql + MYSQL_ROOT_PASSWORD: root + postgres: image: postgres:10.15 environment: diff --git a/docs/enhancements/auth-sessions-2026-02-18.md b/docs/enhancements/auth-sessions-2026-02-18.md new file mode 100644 index 00000000..28f5208c --- /dev/null +++ b/docs/enhancements/auth-sessions-2026-02-18.md @@ -0,0 +1,1505 @@ +# Dex Enhancement Proposal (DEP 4560) - 2026-02-18 - Auth Sessions + +## Table of Contents + +- [Summary](#summary) +- [Motivation](#motivation) + - [Goals/Pain](#goalspain) + - [Non-Goals](#non-goals) +- [Proposal](#proposal) + - [User Experience](#user-experience) + - [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints) + - [Risks and Mitigations](#risks-and-mitigations) + - [Alternatives](#alternatives) +- [Future Improvements](#future-improvements) + +## Summary + +This DEP introduces **auth sessions** - a persistent authentication state that enables Dex to track logged-in users across browser sessions. Currently, Dex relies entirely on refresh tokens for session management, which prevents proper implementation of OIDC conformance features like `prompt=none`, `prompt=login`, `id_token_hint`, SSO across clients, and proper logout. User Sessions will be stored server-side with a browser cookie reference, enabling these features while maintaining Dex's simplicity and compatibility with all storage backends (SQL, etcd, Kubernetes CRDs). + +## Context + +- [OIDC Core 1.0 - Authentication Request](https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest) - `prompt` parameter specification +- [OIDC Core 1.0 - ID Token Hint](https://openid.net/specs/openid-connect-core-1_0.html#IDToken) - `id_token_hint` specification +- [OIDC Session Management 1.0](https://openid.net/specs/openid-connect-session-1_0.html) - Session management specification +- [OIDC RP-Initiated Logout 1.0](https://openid.net/specs/openid-connect-rpinitiated-1_0.html) - Logout specification +- [OIDC Front-Channel Logout 1.0](https://openid.net/specs/openid-connect-frontchannel-1_0.html) - Front-channel logout +- [Keycloak Sessions](https://www.keycloak.org/docs/latest/server_admin/#_sessions) - Reference implementation +- [Ory Hydra Login & Consent Flow](https://www.ory.sh/docs/hydra/concepts/login) - Reference implementation + +Current limitations: +- No support for `prompt=none` (silent authentication) +- No support for `prompt=login` (force re-authentication) +- No support for `max_age` parameter +- No support for `id_token_hint` validation +- No SSO between clients (each client requires separate login) +- No proper logout (only refresh token revocation) +- No consent persistence (user must approve every time if not skipped globally) +- No 2FA enrollment storage +- No "Remember Me" functionality + +## Motivation + +### Goals/Pain + +1. **OIDC Conformance** - Enable proper `prompt=none`, `prompt=login`, `max_age`, and `id_token_hint` support +2. **SSO (Single Sign-On)** - Allow users to authenticate once and access multiple clients without re-login +3. **Remember Me** - Allow users to choose persistent vs session-based authentication +4. **Consent Persistence** - Store user consent decisions per client/scope combination within session +5. **Proper Logout** - Enable session termination with optional front-channel logout +6. **Foundation for 2FA** - Enable future TOTP/WebAuthn enrollment storage + +### Non-Goals + +- **2FA Implementation** - This DEP only provides storage foundation; 2FA flow is a separate DEP +- **Back-Channel Logout** - Server-to-server logout notifications are out of scope +- **Session Clustering/Replication** - Storage backends handle this +- **Admin Session Management UI** - API only, no admin UI +- **Per-connector Session Policies** - Single global session policy initially +- **Identity Refresh During Session** - Deferred to future DEP; initially identity is refreshed only at session termination (like Keycloak) +- **Upstream Connector Logout** - Terminating sessions at upstream IDPs is deferred + +## Proposal + +### User Experience + +#### Configuration + +Sessions are controlled by a feature flag and configuration: + +```yaml +# Feature flag (environment variable) +# DEX_SESSIONS_ENABLED=true + +# config.yaml +sessions: + # Session cookie name (default: "dex_session") + # Other cookie settings (Secure, HttpOnly, SameSite=Lax) are not configurable + # and are set to secure defaults automatically + cookieName: "dex_session" + + # Session lifetime settings (matches refresh token expiry naming) + absoluteLifetime: "24h" # Maximum session lifetime, default: 24h + validIfNotUsedFor: "1h" # Session expires if not used, default: 1h + + # Default SSO sharing policy for clients without explicit ssoSharedWith config + # Options: + # "all" - clients without ssoSharedWith share sessions with all other clients (Keycloak-like) + # "none" - clients without ssoSharedWith don't share sessions (default) + ssoSharedWithDefault: "none" + + # Whether "Remember Me" checkbox is checked by default in login/approval forms + # When true: checkbox is pre-checked, user can uncheck + # When false: checkbox is unchecked, user must check to persist session (default) + rememberMeCheckedByDefault: false +``` + +**ssoSharedWithDefault** controls the default SSO behavior: +- `"none"` (default): Clients without explicit `ssoSharedWith` config don't participate in SSO +- `"all"`: Clients without explicit `ssoSharedWith` config share sessions with all other clients (realm-wide SSO like Keycloak) + +Clients with explicit `ssoSharedWith` configuration always use their configured value. + +**Note**: The `ssoSharedWith` option is separate from the existing `trustedPeers` option. `trustedPeers` controls which clients can issue tokens on behalf of this client (existing behavior), while `ssoSharedWith` controls which clients can reuse this client's authentication session (new behavior). These can be configured independently based on different security requirements. + +**rememberMeCheckedByDefault** controls the initial checkbox state in templates. +This value is passed to templates as `.RememberMeChecked` boolean. + +**SSO via ssoSharedWith**: SSO between clients is controlled by the new `ssoSharedWith` configuration on clients. The `ssoSharedWith` setting defines **which clients can USE this client's session**, not which clients this client can use. + +If client B is listed in client A's `ssoSharedWith`: +1. If user logged in via client A, client B can reuse that session + +This is intentionally separate from `trustedPeers` (which controls token issuance on behalf of another client). Organizations may want different policies for session sharing vs token delegation: +- **ssoSharedWith**: "Can this client's login be reused by another client?" +- **trustedPeers**: "Can another client issue tokens claiming to be this client?" + +**Wildcard Support**: `ssoSharedWith: ["*"]` enables SSO with all clients. This is similar to Keycloak's default behavior where all clients in a realm share sessions. + +**SSO Direction**: SSO sharing is **unidirectional**. Client A sharing with client B does NOT mean client B shares with client A. + +```yaml +staticClients: + # Public app - allows any client to reuse its sessions + - id: public-app + name: Public App + ssoSharedWith: ["*"] + # trustedPeers can be configured separately for token delegation + # ... + + # Admin app - only specific apps can reuse its sessions + - id: admin-app + name: Admin App + ssoSharedWith: ["monitoring-app"] # Only monitoring can SSO from admin sessions + # ... + + # Secret internal service - NO other clients can reuse its sessions + - id: secret-service + name: Secret Service + ssoSharedWith: [] # Empty = no SSO allowed from this client's sessions + # But this client CAN use sessions from other clients that share with it! + # ... + + # Monitoring app - can SSO from admin-app (because admin-app shares with it) + - id: monitoring-app + name: Monitoring App + ssoSharedWith: ["admin-app"] # Bidirectional sharing with admin-app + # ... +``` + +**Example Scenarios:** + +| User logged in via | Accessing | SSO works? | Why | +|-------------------|-----------|------------|-----| +| public-app | admin-app | ✅ Yes | public-app has `ssoSharedWith: ["*"]` | +| admin-app | public-app | ❌ No | admin-app only shares with monitoring-app | +| admin-app | monitoring-app | ✅ Yes | admin-app shares with monitoring-app | +| secret-service | any client | ❌ No | secret-service has `ssoSharedWith: []` | +| public-app | secret-service | ✅ Yes | public-app has `ssoSharedWith: ["*"]` | + +**Key Insight**: A "secret" client that doesn't want others to SSO into it simply doesn't list them in `ssoSharedWith`. But it can still BENEFIT from SSO by being listed in OTHER clients' `ssoSharedWith`. + +**Comparison with Keycloak**: In Keycloak, SSO is realm-wide by default - all clients in a realm share sessions. Dex's approach is more granular: SSO is opt-in per client via `ssoSharedWith`. Use `["*"]` to achieve Keycloak-like behavior. + +**Comparison with trustedPeers**: The `trustedPeers` option continues to control cross-client token issuance (e.g., client B issuing tokens for client A). This is a separate security concern from session sharing. Organizations can configure these independently: +- High SSO sharing, restricted token delegation +- Restricted SSO sharing, high token delegation +- Or any combination based on their security model + +**Cookie Security**: The session cookie is always set with secure defaults: +- `HttpOnly: true` - Not accessible via JavaScript +- `Secure: (issuerURL.Scheme == "https")` - Only sent over HTTPS; for `http` (commonly used on localhost in dev) this is disabled +- `SameSite: Lax` - CSRF protection +- `Path: ` - Derived from issuer URL (e.g., `/dex` for `https://example.com/dex`) + +These settings are not configurable to prevent security misconfigurations. + +#### Authentication Flow with Sessions + +``` +┌─────────┐ ┌─────────┐ ┌───────────┐ ┌───────────┐ +│ Browser │ │ Dex │ │ Storage │ │ Connector │ +└────┬────┘ └────┬────┘ └─────┬─────┘ └─────┬─────┘ + │ │ │ │ + │ GET /auth │ │ │ + │ (no session) │ │ │ + ├────────────────>│ │ │ + │ │ │ │ + │ │ Check session │ │ + │ │ cookie │ │ + │ ├─────────────────>│ │ + │ │ (not found) │ │ + │ │<─────────────────│ │ + │ │ │ │ + │ Redirect to │ │ │ + │ connector │ │ │ + │<────────────────│ │ │ + │ │ │ │ + │ ... connector auth flow ... │ │ + │ │ │ │ + │ Callback with │ │ │ + │ identity │ │ │ + ├────────────────>│ │ │ + │ │ │ │ + │ │ Create/update │ │ + │ │ AuthSession │ │ + │ │ (ALWAYS) │ │ + │ ├─────────────────>│ │ + │ │ │ │ + │ Set-Cookie: │ │ │ + │ - Session cookie (no MaxAge) │ │ + │ if Remember Me unchecked │ │ + │ - Persistent cookie (with MaxAge) │ │ + │ if Remember Me checked │ │ + │ + redirect to /approval │ │ + │<────────────────│ │ │ + │ │ │ │ +``` + +**Key Point**: AuthSession is always created on successful authentication. The "Remember Me" checkbox only controls whether the cookie is a session cookie (deleted on browser close) or a persistent cookie (survives browser restart). This is consistent with Keycloak's behavior. + +#### SSO Flow (Returning User) + +``` +┌─────────┐ ┌─────────┐ ┌───────────┐ +│ Browser │ │ Dex │ │ Storage │ +└────┬────┘ └────┬────┘ └─────┬─────┘ + │ │ │ + │ GET /auth │ │ + │ (with cookie) │ │ + │ client_id=B │ │ + ├────────────────>│ │ + │ │ │ + │ │ Get session │ + │ ├─────────────────>│ + │ │ (valid session) │ + │ │<─────────────────│ + │ │ │ + │ │ Check SSO │ + │ │ policy for │ + │ │ client B │ + │ │ │ + │ │ Check consent │ + │ │ for client B │ + │ ├─────────────────>│ + │ │ │ + │ If consented: │ │ + │ redirect with │ │ + │ code │ │ + │<────────────────│ │ + │ │ │ + │ If not: │ │ + │ show approval │ │ + │<────────────────│ │ + │ │ │ +``` + +#### prompt=none Flow + +``` +┌─────────┐ ┌─────────┐ ┌───────────┐ +│ Browser │ │ Dex │ │ Storage │ +└────┬────┘ └────┬────┘ └─────┬─────┘ + │ │ │ + │ GET /auth │ │ + │ prompt=none │ │ + ├────────────────>│ │ + │ │ │ + │ │ Get session │ + │ ├─────────────────>│ + │ │ │ + │ If valid session + consent: │ + │ redirect with code │ + │<────────────────│ │ + │ │ │ + │ If no session or no consent: │ + │ redirect with error=login_required│ + │ or error=consent_required │ + │<────────────────│ │ + │ │ │ +``` + +#### Logout Flow + +``` +┌─────────┐ ┌─────────┐ ┌───────────┐ +│ Browser │ │ Dex │ │ Storage │ +└────┬────┘ └────┬────┘ └─────┬─────┘ + │ │ │ + │ GET /logout │ │ + │ id_token_hint= │ │ + ├────────────────>│ │ + │ │ │ + │ │ Validate │ + │ │ id_token_hint │ + │ │ │ + │ │ Get identity │ + │ │ by session ID │ + │ ├─────────────────>│ + │ │ │ + │ │ Deactivate │ + │ │ (Active=false) │ + │ ├─────────────────>│ + │ │ │ + │ │ Revoke refresh │ + │ │ tokens │ + │ ├─────────────────>│ + │ │ │ + │ Clear cookie + │ │ + │ redirect or │ │ + │ show logout │ │ + │ confirmation │ │ + │<────────────────│ │ + │ │ │ +``` + +### Implementation Details/Notes/Constraints + +#### Feature Flag + +```go +// pkg/featureflags/set.go +var ( + // ...existing flags... + + // SessionsEnabled enables user sessions feature + SessionsEnabled = newFlag("sessions_enabled", false) +) +``` + +#### New Storage Entities + + +Two entities are required to properly handle the case where a user might be logged into different clients as different identities in the same browser: + +###### AuthSession + +```go +// storage/storage.go + +// AuthSession represents a browser's authentication state. +// One per browser, referenced by session cookie. +// Key: SessionID (random 32-byte string, stored in cookie) +type AuthSession struct { + // ID is the session identifier stored in cookie + ID string + + // ClientStates maps clientID → authentication state for that client + // Allows different users/identities per client in same browser + // + // Design note: This map-based approach is consistent with how OfflineSessions + // stores refresh tokens per client (OfflineSessions.Refresh map). Given that + // the number of OAuth clients in a typical deployment is bounded and relatively + // small (tens to hundreds, not thousands), the serialized size of this map + // will not exceed practical storage limits for any supported backend. + ClientStates map[string]*ClientAuthState + + // CreatedAt is when this browser session started + CreatedAt time.Time + + // LastActivity is when any client was last accessed + LastActivity time.Time + + // IPAddress at session creation (for audit) + IPAddress string + + // UserAgent at session creation (for audit) + UserAgent string +} + +// ClientAuthState represents authentication state for a specific client within an auth session. +// Expiration follows OIDC conventions with both absolute and idle timeout: +// - ExpiresAt enforces absolute lifetime (sessions.absoluteLifetime) +// - LastActivity + sessions.validIfNotUsedFor enforces idle timeout +// A client state is considered expired if EITHER condition is met. +type ClientAuthState struct { + // UserID + ConnectorID identify which UserIdentity is authenticated for this client + UserID string + ConnectorID string + + // Active indicates if authentication is active for this client + Active bool + + // ExpiresAt is the absolute expiration time for this client session. + // Set to time.Now() + absoluteLifetime at session creation. + // Cannot be extended - hard upper bound on session duration. + ExpiresAt time.Time + + // LastActivity is when this client session was last used (token issued, SSO check, etc.) + // Used with validIfNotUsedFor to enforce idle timeout. + // Updated on each request that touches this client state. + LastActivity time.Time + + // LastTokenIssuedAt is when a token was last issued for this client. + // Used for logout notifications and audit. + LastTokenIssuedAt time.Time +} +``` + +###### UserIdentity + +```go +// storage/storage.go + +// UserIdentity represents a user's persistent identity data. +// Stores data that persists across sessions: +// - Consent decisions +// - Future: 2FA enrollment +// +// Key: composite of UserID + ConnectorID (one per user per connector) +type UserIdentity struct { + // UserID is the subject identifier from the connector + UserID string + + // ConnectorID is the connector that authenticated the user + ConnectorID string + + // Claims holds the user's identity claims + // Updated on: + // 1. Each login (from connector callback) + // 2. Each refresh token usage (from RefreshConnector.Refresh) + // This ensures claims stay in sync with OfflineSessions and upstream IDP + Claims Claims + + // Consents stores user consent per client: map[clientID][]scopes + // Persists across sessions so user doesn't need to re-consent + Consents map[string][]string + + // CreatedAt is when this identity was first created + CreatedAt time.Time + + // LastLogin is when the user last authenticated (used for auth_time claim) + LastLogin time.Time + + // BlockedUntil is set when user is blocked from logging in + BlockedUntil time.Time + + // Future: 2FA fields + // TOTPSecret string + // WebAuthnCredentials []WebAuthnCredential +} +``` + +**Two-Entity Design Rationale** + +| Entity | Purpose | Lifecycle | Key | +|--------|---------|-----------|-----| +| AuthSession | Browser binding, per-client auth state | Short-lived (session timeout) | SessionID (cookie) | +| UserIdentity | User data, consents, 2FA | Long-lived (persists) | UserID + ConnectorID | + +**How It Works: Different Users in Different Clients** + +``` +Auth Session (cookie: dex_session=abc123) +├── ClientStates["client-A"]: +│ └── UserID: "alice", ConnectorID: "google", Active: true +├── ClientStates["client-B"]: +│ └── UserID: "bob", ConnectorID: "ldap", Active: true +└── ClientStates["client-C"]: + └── (empty - never authenticated) + +UserIdentity (alice + google): +├── Claims: {email: alice@example.com, ...} +├── Consents: {"client-A": ["openid", "email"]} +└── LastLogin: 2024-01-01 + +UserIdentity (bob + ldap): +├── Claims: {email: bob@corp.com, ...} +├── Consents: {"client-B": ["openid", "groups"]} +└── LastLogin: 2024-01-02 +``` + +**How SSO Works** + +When user accesses client-B with existing session: + +1. Get `AuthSession` by cookie +2. Check `ClientStates["client-B"]`: + - If exists and active → user already authenticated for this client +3. If not, check SSO: + - Find any `ClientStates[X]` where client-X has `ssoSharedWith` containing "client-B" + - If found → SSO! Copy auth state to `ClientStates["client-B"]` + - If not found → require authentication + +**SSO Session Lookup Algorithm** + +```go +// findSSOSession searches for a valid SSO source session for the target client +func (s *Server) findSSOSession(authSession *AuthSession, targetClientID string) (*ClientAuthState, *UserIdentity) { + targetClient, err := s.storage.GetClient(ctx, targetClientID) + if err != nil { + return nil, nil + } + + // Iterate through all active client states in this browser session + for sourceClientID, state := range authSession.ClientStates { + // Skip inactive or expired states + if !state.Active || time.Now().After(state.ExpiresAt) { + continue + } + + // Get the source client configuration + sourceClient, err := s.storage.GetClient(ctx, sourceClientID) + if err != nil { + continue + } + + // Check if source client shares its session with the target client + // SSO is allowed if: + // 1. Source client has ssoSharedWith: ["*"] (shares with everyone) + // 2. Source client has targetClientID in its ssoSharedWith list + if !s.clientSharesSessionWith(sourceClient, targetClientID) { + continue + } + + // Found a valid SSO source! Get the user identity + identity, err := s.storage.GetUserIdentity(ctx, state.UserID, state.ConnectorID) + if err != nil { + continue + } + + // Check if user is not blocked + if identity.BlockedUntil.After(time.Now()) { + continue + } + + return state, identity + } + + return nil, nil +} + +// clientSharesSessionWith checks if sourceClient shares its session with targetClientID +func (s *Server) clientSharesSessionWith(sourceClient Client, targetClientID string) bool { + for _, peer := range sourceClient.SSOSharedWith { + if peer == "*" || peer == targetClientID { + return true + } + } + return false +} +``` + +**SSO Lookup Flow Diagram** + +``` +User accesses client-B with existing session + │ + ▼ +┌─────────────────────────────────┐ +│ Get AuthSession from cookie │ +└─────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────┐ +│ Check ClientStates["client-B"] │ +│ exists and active? │ +└─────────────────────────────────┘ + │ │ + Yes No + │ │ + ▼ ▼ +┌──────────────┐ ┌─────────────────────────────────┐ +│ Use existing │ │ For each ClientStates[X]: │ +│ session │ │ - Is state active? │ +└──────────────┘ │ - Get client-X config │ + │ - Does client-X share with B? │ + │ (X.ssoSharedWith has B or *)│ + └─────────────────────────────────┘ + │ │ + Found match No match + │ │ + ▼ ▼ + ┌──────────────┐ ┌──────────────┐ + │ SSO! Copy │ │ Require │ + │ state to B │ │ authentication│ + └──────────────┘ └──────────────┘ +``` + +**Example: SSO Flow** + +``` +1. User logs into client-A as alice + AuthSession.ClientStates["client-A"] = {UserID: "alice", Active: true} + +2. User accesses client-B + - client-A.ssoSharedWith includes "client-B" ✓ + - SSO! Copy: ClientStates["client-B"] = {UserID: "alice", Active: true} + - Issue tokens for alice to client-B +``` + +**Example: No SSO, Different User** + +``` +1. User logged into client-A as alice + AuthSession.ClientStates["client-A"] = {UserID: "alice", Active: true} + +2. User accesses client-B (client-A does NOT share with client-B) + - No SSO available + - Redirect to connector for authentication + +3. User logs in as bob (different account) + AuthSession.ClientStates["client-B"] = {UserID: "bob", Active: true} + +Same browser, two different users, no conflict! +``` + +**Claims Synchronization with Refresh Tokens** + +When a refresh token is used: +1. `RefreshConnector.Refresh()` returns updated claims +2. Update `OfflineSessions.ConnectorData` (existing behavior) +3. **NEW**: Also update `UserIdentity.Claims`: + +```go +// In refresh token handler +func (s *Server) handleRefreshToken(...) { + // ...existing refresh logic... + + newIdentity, err := refreshConn.Refresh(ctx, scopes, oldIdentity) + if err != nil { + // Handle refresh failure + } + + // Update OfflineSessions (existing) + s.storage.UpdateOfflineSessions(...) + + // Update UserIdentity claims (NEW) + if s.sessionsEnabled { + s.storage.UpdateUserIdentity(ctx, newIdentity.UserID, connectorID, + func(u UserIdentity) (UserIdentity, error) { + u.Claims = storage.Claims{ + UserID: newIdentity.UserID, + Username: newIdentity.Username, + Email: newIdentity.Email, + Groups: newIdentity.Groups, + // ... + } + return u, nil + }) + } +} +``` + +This ensures `UserIdentity.Claims` stays synchronized with: +- Connector's current user data +- `OfflineSessions.ConnectorData` +- Actual refresh token claims + +**Why UserIdentity instead of AuthSession?** + +The name `UserIdentity` is chosen because this entity stores more than just session state: +1. **Persistent data**: Consent decisions survive session expiration +2. **Future 2FA**: TOTP secrets and WebAuthn credentials will be stored here +3. **One per user/connector**: Unlike sessions which could be per-browser, this is per-identity + +**Session ID Regeneration** + +The `AuthSession.ID` is regenerated when: +- User logs in from a new browser (new session created) +- Security concern requires new session (e.g., after password change) + +Individual `ClientStates` can be invalidated without changing the auth session ID. + +**Multiple Users in Same Browser** + +With the two-entity design: +- `AuthSession` tracks which user is authenticated for which client +- Different clients can have different users (if no SSO trust) +- Same user can be authenticated for multiple clients (SSO or separate logins) + +**SSO and Different Users** + +With SSO enabled between clients, the same user is used for all sharing clients: +- User logs in to client-A as "alice@example.com" +- User accesses client-B (client-A shares with client-B) → automatically authenticated as "alice@example.com" +- SSO reuses the identity from the sharing client + +If user needs to login as different identity to a sharing client: +- Use `prompt=login` to force re-authentication +- This creates new ClientState for that client with potentially different user + +Without SSO, user can be different identities in different clients (see examples above). + +#### Storage Interface Extensions + +Two new entities require CRUD operations: + +```go +// storage/storage.go + +type Storage interface { + // ...existing methods... + + // AuthSession management + CreateAuthSession(ctx context.Context, s AuthSession) error + GetAuthSession(ctx context.Context, sessionID string) (AuthSession, error) + UpdateAuthSession(ctx context.Context, sessionID string, updater func(s AuthSession) (AuthSession, error)) error + DeleteAuthSession(ctx context.Context, sessionID string) error + + // UserIdentity management + CreateUserIdentity(ctx context.Context, u UserIdentity) error + GetUserIdentity(ctx context.Context, userID, connectorID string) (UserIdentity, error) + UpdateUserIdentity(ctx context.Context, userID, connectorID string, updater func(u UserIdentity) (UserIdentity, error)) error + DeleteUserIdentity(ctx context.Context, userID, connectorID string) error + + // List for admin API + ListUserIdentities(ctx context.Context) ([]UserIdentity, error) +} +``` + +**Garbage Collection** + +```go +type GCResult struct { + // ...existing fields... + AuthSessions int64 // NEW: expired auth sessions cleaned up +} +``` + +`AuthSession` objects are garbage collected when: +- `LastActivity + validIfNotUsedFor` exceeded (inactivity) +- All `ClientStates` have expired + +`UserIdentity` objects are NOT garbage collected (preserve consents, future 2FA). + +#### Session Expiration + +**AuthSession expiration:** +- Entire session expires when `LastActivity + validIfNotUsedFor` is reached (idle timeout) +- On expiration, `AuthSession` is deleted by GC +- User must re-authenticate for all clients + +**ClientAuthState expiration (per-client within AuthSession):** + +Each client state enforces **both** absolute lifetime and idle timeout, consistent with standard OIDC session semantics: + +```go +func (s *Server) isClientStateValid(state *ClientAuthState) bool { + now := time.Now() + + // 1. Check absolute lifetime - hard upper bound, cannot be extended + if now.After(state.ExpiresAt) { + return false + } + + // 2. Check idle timeout - session unused for too long + if now.After(state.LastActivity.Add(s.sessionsConfig.validIfNotUsedFor)) { + return false + } + + // 3. Check explicit deactivation (admin revoked) + if !state.Active { + return false + } + + return true +} +``` + +When a client state expires: +- Other clients in same auth session remain active +- User must re-authenticate only for the expired client +- On successful re-authentication, a new `ClientAuthState` is created with fresh `ExpiresAt` + +**Admin can force re-authentication:** +- Delete `AuthSession` → user must re-auth for all clients +- Set `ClientStates[clientID].Active = false` → user must re-auth for that client only + +#### Deletion Risks + +**Deleting AuthSession:** +- User must re-authenticate for all clients +- No data loss (consents preserved in UserIdentity) +- Safe operation for logout + +**Deleting UserIdentity:** + +| What's Lost | Impact | +|-------------|--------| +| Consent decisions | User must re-approve scopes for all clients | +| Future: 2FA enrollment | User must re-enroll TOTP/WebAuthn | + +**When to delete UserIdentity:** +- User explicitly requests account deletion (GDPR) +- Admin cleanup of stale identities +- User removed from upstream identity provider + +**When NOT to delete (delete AuthSession instead):** +- Regular logout - delete AuthSession or set ClientState.Active = false +- Session expiration - GC handles AuthSession cleanup +- Security concern - delete AuthSession to force re-auth + +#### Session Cookie Format + +The session cookie contains only the session ID (not the session data): + +``` +Cookie: dex_session=; Path=; Secure; HttpOnly; SameSite=Lax +``` + +**Cookie Path**: Derived from the issuer URL path (`issuerURL.Path`). For example: +- Issuer: `https://dex.example.com/` → `Path=/` +- Issuer: `https://example.com/dex` → `Path=/dex` + +This is consistent with how Dex already handles routing - all endpoints are prefixed with the issuer path. + +**Session Creation vs Cookie Persistence (Keycloak-like behavior)** + +Unlike some implementations where "Remember Me" controls session creation, we follow Keycloak's approach: + +- **AuthSession is ALWAYS created** on successful authentication +- **"Remember Me" controls cookie persistence**: + - Unchecked: Session cookie (expires when browser closes) + - Checked: Persistent cookie (expires at `absoluteLifetime`) + +This approach is better because: +1. SSO works within a browser session even without "Remember Me" +2. Consent decisions are preserved during the browser session +3. `prompt=none` works correctly within browser session +4. More intuitive: "Remember Me" = "remember me after I close the browser" + +```go +func (s *Server) setSessionCookie(w http.ResponseWriter, sessionID string, rememberMe bool) { + cookie := &http.Cookie{ + Name: s.sessionsConfig.CookieName, + Value: sessionID, + Path: s.issuerURL.Path, + HttpOnly: true, + Secure: s.issuerURL.Scheme == "https", + SameSite: http.SameSiteLaxMode, + } + + if rememberMe { + // Persistent cookie - survives browser restart + cookie.MaxAge = int(s.sessionsConfig.absoluteLifetime.Seconds()) + } + // else: Session cookie - no MaxAge, browser deletes on close + + http.SetCookie(w, cookie) +} +``` + +Session ID generation: +```go +func NewSessionID() string { + return newSecureID(32) // 256-bit random value +} +``` + +#### Client Configuration Extension + +A new client configuration field is introduced for SSO control: + +```go +// storage/storage.go + +type Client struct { + // ...existing fields... + + // TrustedPeers are a list of peers which can issue tokens on this client's behalf. + // This is used for cross-client token issuance (existing behavior). + TrustedPeers []string `json:"trustedPeers" yaml:"trustedPeers"` + + // SSOSharedWith defines which other clients can reuse this client's authentication session. + // When a user is authenticated for this client, clients listed here can skip authentication. + // This is separate from TrustedPeers - organizations may want different policies for + // session sharing vs token delegation. + // Special value "*" means share with all clients (Keycloak-like realm-wide SSO). + // nil means use ssoSharedWithDefault from sessions config. + // Empty slice [] means explicitly share with no one. + SSOSharedWith []string `json:"ssoSharedWith,omitempty" yaml:"ssoSharedWith,omitempty"` +} +``` + +#### Connector Logout (Future) + +Logout URLs should be configured on connectors, not clients. A new connector interface will be added: + +```go +// connector/connector.go + +// LogoutConnector is an optional interface for connectors that support +// terminating upstream sessions on logout. +type LogoutConnector interface { + // Logout terminates the user's session at the upstream identity provider. + // Returns a URL to redirect the user to for upstream logout, or empty string + // if no redirect is needed. + Logout(ctx context.Context, connectorData []byte) (logoutURL string, err error) +} +``` + +Connectors that implement this interface (e.g., OIDC with `end_session_endpoint`, SAML with SLO): +- Are called during Dex logout flow +- Can redirect user to upstream for complete logout +- Implementation details are connector-specific + +This is tracked as a future improvement. + +#### Server Configuration Extension + +```go +// cmd/dex/config.go + +type Sessions struct { + // CookieName is the session cookie name (default: "dex_session") + CookieName string `json:"cookieName"` + + // AbsoluteLifetime is the maximum session lifetime (default: "24h") + AbsoluteLifetime string `json:"absoluteLifetime"` + + // ValidIfNotUsedFor is the inactivity timeout (default: "1h") + ValidIfNotUsedFor string `json:"validIfNotUsedFor"` + + // SSOSharedWithDefault is the default SSO sharing policy + // "all" = share with all clients, "none" = share with no one (default: "none") + SSOSharedWithDefault string `json:"ssoSharedWithDefault"` + + // RememberMeCheckedByDefault controls the initial checkbox state in templates + // true = pre-checked, false = unchecked (default: false) + RememberMeCheckedByDefault bool `json:"rememberMeCheckedByDefault"` +} +``` + +**Using ssoSharedWithDefault in SSO logic:** + +```go +func (s *Server) clientSharesSessionWith(sourceClient Client, targetClientID string) bool { + ssoSharedWith := sourceClient.SSOSharedWith + + // If client has no explicit ssoSharedWith, use default + if ssoSharedWith == nil { + switch s.sessionsConfig.SSOSharedWithDefault { + case "all": + return true // Share with everyone by default + default: // "none" + return false // Share with no one by default + } + } + + // Explicit configuration: empty slice means explicitly share with no one + // This is different from nil (not configured) + if len(ssoSharedWith) == 0 { + return false + } + + // Check explicit sharing list + for _, peer := range ssoSharedWith { + if peer == "*" || peer == targetClientID { + return true + } + } + return false +} +``` + +**Three states for ssoSharedWith:** +1. `nil` (not configured) → use `ssoSharedWithDefault` +2. `[]` (empty slice) → explicitly share with no one +3. `["client-a", ...]` or `["*"]` → explicit sharing list + +#### Prompt Parameter Handling + +Dex will support the following `prompt` values per OIDC Core specification: +- `none` - Silent authentication, no UI displayed +- `login` - Force re-authentication +- `consent` - Force consent screen +- Empty (default) - Normal flow with session reuse + +The `select_account` value is not supported initially (would require account linking feature). + +```go +func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) { + // ...existing parsing... + + prompt := r.Form.Get("prompt") + maxAge := r.Form.Get("max_age") + idTokenHint := r.Form.Get("id_token_hint") + clientID := r.Form.Get("client_id") + + // Get auth session from cookie + authSession, err := s.getAuthSessionFromCookie(r) + + // Get client auth state for this specific client + var clientState *ClientAuthState + var userIdentity *UserIdentity + if authSession != nil { + clientState = authSession.ClientStates[clientID] + if clientState != nil && clientState.Active { + userIdentity, _ = s.storage.GetUserIdentity(ctx, clientState.UserID, clientState.ConnectorID) + } + } + + // Handle max_age parameter (OIDC Core 3.1.2.1) + if maxAge != "" && userIdentity != nil { + maxAgeSeconds, err := strconv.Atoi(maxAge) + if err == nil && maxAgeSeconds >= 0 { + authAge := time.Since(userIdentity.LastLogin) + if authAge > time.Duration(maxAgeSeconds)*time.Second { + // Session is too old, force re-authentication + clientState = nil + userIdentity = nil + } + } + } + + switch prompt { + case "none": + // Silent authentication - must have valid session and consent + if clientState == nil || userIdentity == nil { + s.authErr(w, r, redirectURI, "login_required", state) + return + } + // Check consent in identity + consentedScopes, hasConsent := userIdentity.Consents[clientID] + if !hasConsent || !s.scopesCovered(consentedScopes, requestedScopes) { + s.authErr(w, r, redirectURI, "consent_required", state) + return + } + // Issue tokens without UI + + case "login": + // Force re-authentication - ignore existing session for this client + clientState = nil + userIdentity = nil + // Continue to connector login + + case "consent": + // Force consent screen even if previously consented + // Continue but don't check consent + + default: // "" - normal flow + // Check for SSO from trusted clients if no direct session + if clientState == nil && authSession != nil { + clientState, userIdentity = s.findSSOSession(authSession, clientID) + } + } + + // Validate id_token_hint if provided + if idTokenHint != "" { + claims, err := s.validateIDTokenHint(idTokenHint) + if err != nil { + s.authErr(w, r, redirectURI, "invalid_request", state) + return + } + if userIdentity != nil && userIdentity.UserID != claims.Subject { + // Identity user doesn't match hint + if prompt == "none" { + s.authErr(w, r, redirectURI, "login_required", state) + return + } + // Force re-login for different user + clientState = nil + userIdentity = nil + } + } + + // ...continue with flow... +} + +// findSSOSession looks for a valid SSO session from a sharing client +func (s *Server) findSSOSession(authSession *AuthSession, targetClientID string) (*ClientAuthState, *UserIdentity) { + for sourceClientID, state := range authSession.ClientStates { + if !state.Active { + continue + } + sourceClient, _ := s.storage.GetClient(ctx, sourceClientID) + if sourceClient == nil { + continue + } + // Check if source client shares its session with target client + if s.clientSharesSessionWith(sourceClient, targetClientID) { + identity, _ := s.storage.GetUserIdentity(ctx, state.UserID, state.ConnectorID) + if identity != nil { + return state, identity + } + } + } + return nil, nil +} +``` + +**max_age Parameter** + +The `max_age` parameter is supported per OIDC Core specification: +- Specifies the maximum authentication age in seconds +- If the identity's last authentication time (`LastLogin`) exceeds `max_age`, force re-authentication +- When `max_age` is used, the `auth_time` claim MUST be included in the ID token + +#### New Endpoints + +``` +POST /logout +GET /logout +``` + +Logout endpoint following the OpenID RP-Initiated Logout specification ([OpenID spec](https://openid.net/specs/openid-connect-rpinitiated-1_0.html)): + +```go +func (s *Server) handleLogout(w http.ResponseWriter, r *http.Request) { + idTokenHint := r.FormValue("id_token_hint") + postLogoutRedirectURI := r.FormValue("post_logout_redirect_uri") + state := r.FormValue("state") + clientID := r.FormValue("client_id") // Optional: logout from specific client + + // Get auth session from cookie + authSession, _ := s.getAuthSessionFromCookie(r) + + // Validate id_token_hint if provided + var hintUserID, hintConnectorID string + if idTokenHint != "" { + claims, err := s.validateIDTokenHint(idTokenHint) + if err == nil { + hintUserID = claims.Subject + // Extract connector from token if possible + } + } + + if authSession != nil { + if clientID != "" { + // Logout from specific client only + delete(authSession.ClientStates, clientID) + s.storage.UpdateAuthSession(ctx, authSession.ID, ...) + } else { + // Logout from all clients - delete entire auth session + s.storage.DeleteAuthSession(ctx, authSession.ID) + } + + // Revoke refresh tokens for logged-out clients + // ... + } + + // Clear cookie and redirect + s.clearSessionCookie(w) + + // Show logout confirmation or redirect + if postLogoutRedirectURI != "" && s.isValidPostLogoutURI(postLogoutRedirectURI, idTokenHint) { + u, _ := url.Parse(postLogoutRedirectURI) + if state != "" { + q := u.Query() + q.Set("state", state) + u.RawQuery = q.Encode() + } + http.Redirect(w, r, u.String(), http.StatusFound) + return + } + + // Show logout confirmation page + s.templates.logout(w, r) +} +``` + +**Future: Upstream Connector Logout** + +For CallbackConnectors (OIDC, OAuth, SAML), the upstream identity provider may also have an active session. Future work should include: +- Implement `LogoutConnector` interface (see above) +- OIDC connectors use `end_session_endpoint` from discovery +- SAML connectors use Single Logout (SLO) +- Redirect user to upstream after Dex logout + +This is tracked as a future improvement. + +#### Discovery Updates + +```go +func (s *Server) constructDiscovery(ctx context.Context) discovery { + d := discovery{ + // ...existing fields... + } + + if s.sessionsEnabled { + d.EndSessionEndpoint = s.absURL("/logout") + } + + return d +} +``` + +#### Login Template Updates + +When sessions are enabled, add "Remember Me" checkbox to authentication flow. + +**Template Data** + +The server passes these values to templates: + +```go +type templateData struct { + // ...existing fields... + + // SessionsEnabled indicates if sessions feature is active + SessionsEnabled bool + + // RememberMeChecked is the default checkbox state + // Set from config: sessions.rememberMeCheckedByDefault + RememberMeChecked bool +} +``` + +**For PasswordConnector (login form exists in Dex):** + +```html + +
+ + + {{ if .SessionsEnabled }} +
+ + +
+ {{ end }} + + +
+``` + +**For CallbackConnector (no login form in Dex):** + +For OAuth/OIDC/SAML connectors, the user is redirected to upstream IDP and there's no Dex login form. + +**Show on Approval Page** (recommended): Add "Remember Me" checkbox to the approval/consent page. User sees it after returning from upstream IDP, before granting consent. + +```html + +
+ + + {{ if .SessionsEnabled }} +
+ + +
+ {{ end }} + + +
+``` + +**When skipApprovalScreen is true**: If approval screen is skipped, the `rememberMeCheckedByDefault` config determines cookie persistence: +- `false` (default): Session cookie (deleted on browser close) +- `true`: Persistent cookie (survives browser restart) + +**Remember Me Behavior** (Keycloak-like): +- **AuthSession is ALWAYS created** on successful authentication regardless of checkbox +- **Checkbox controls cookie persistence only**: + - **Unchecked**: Session cookie - expires when browser closes. SSO works within browser session. + - **Checked**: Persistent cookie - survives browser restart until `absoluteLifetime` expires. + +#### Connector Type Considerations + +**CallbackConnector** (OIDC, OAuth, SAML, GitHub, etc.): +- Session created after successful callback +- Upstream tokens stored in refresh token's ConnectorData (not in session) +- Identity refresh via RefreshConnector when refresh token is used + +**PasswordConnector** (LDAP, local passwords): +- Session created after successful password verification +- No upstream tokens +- Identity refresh re-validates against password backend when refresh token is used + +Both types work the same way with sessions - the connector type only affects: +1. Initial authentication flow (redirect vs password form) +2. How identity refresh works (via refresh tokens, not sessions) + +#### Connector Configuration Changes + +Sessions reference a `ConnectorID`, but connector configuration may change after session creation (e.g., OIDC issuer URL changes, LDAP server replaced, connector removed entirely). + +**Behavior**: Dex does NOT automatically invalidate sessions when connector configuration changes. This is by design - Dex has no mechanism to detect configuration changes at runtime, and connectors are typically reconfigured during planned maintenance. + +**Administrator responsibility**: When connector configuration changes in a way that invalidates existing user identities (e.g., connector removed, upstream IdP replaced), administrators should: +1. Terminate affected sessions via gRPC admin API (future: `DexSessions.TerminateByConnector(connectorID)`) +2. Or wait for sessions to expire naturally +3. Or restart Dex with `DEX_SESSIONS_ENABLED=false` temporarily to force re-authentication + +If a session references a connector that no longer exists, the session will fail gracefully at the next use: `GetConnector()` will return an error, and the user will be redirected to authenticate again. + +### Risks and Mitigations + +#### Security Risks + +| Risk | Mitigation | +|------|------------| +| Session hijacking | Secure cookie flags (HttpOnly, Secure, SameSite), short idle timeout | +| Session fixation | Generate new session ID after authentication (see below) | +| CSRF on logout | GET shows confirmation page, POST performs logout | +| Cookie theft | Bind session to fingerprint (IP range, partial user agent) - optional | +| Storage exposure | Session IDs are random 256-bit values, no sensitive data in cookie | + +**Session Fixation Protection** + +Session fixation attacks occur when an attacker sets a known session ID in a victim's browser before authentication, then hijacks the session after the victim logs in. + +References: +- [OWASP Session Fixation](https://owasp.org/www-community/attacks/Session_fixation) +- [OWASP Session Management Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html) + +**Mitigations implemented:** + +1. **Regenerate session ID on authentication**: When a user successfully authenticates, ALWAYS generate a new `AuthSession.ID` even if a session already exists. Never reuse a pre-authentication session ID. + +```go +// This is not the real method signature, but the implementation example of a specific behavior. +func (s *Server) onSuccessfulAuthentication(w http.ResponseWriter, userID, connectorID, clientID string, rememberMe bool) { + // ALWAYS generate new session ID - prevents session fixation + newSessionID := NewSessionID() + + // Create or update AuthSession with NEW ID + authSession := &AuthSession{ + ID: newSessionID, // Always new, never reuse + ClientStates: make(map[string]*ClientAuthState), + CreatedAt: time.Now(), + // ... + } + + // Set cookie with new session ID + s.setSessionCookie(w, newSessionID, rememberMe) +} +``` + +2. **Don't accept session IDs from URL parameters**: Session IDs are ONLY accepted from cookies, never from query parameters or POST data. + +3. **Strict cookie settings**: `HttpOnly`, `Secure`, `SameSite=Lax` prevent common session theft vectors. + +4. **Session binding (optional future enhancement)**: Bind session to client characteristics (IP range, user agent) to detect stolen cookies. + +**Handling existing sessions during authentication:** + +When a user authenticates and an existing `AuthSession` is found: +1. Generate a completely new session ID +2. Copy relevant state from old session to new session (if any) +3. Delete the old `AuthSession` from storage +4. Set cookie with new session ID + +This ensures that even if an attacker set a session cookie before authentication, they cannot use it after the victim logs in. + +#### Operational Risks + +| Risk | Mitigation | +|------|------------| +| Storage growth | AuthSessions are GC'd on inactivity; UserIdentities are per-user like OfflineSessions; admin API allows cleanup | +| Storage performance | Additional read per request to resolve session cookie. Impact depends on backend — see note below | +| Migration complexity | Feature flag allows gradual rollout, no breaking changes | + +**Storage Performance Note** + +Enabling sessions introduces an additional storage read on each authorization request (to resolve the session cookie to an `AuthSession`). The actual performance impact depends on the storage backend: + +- **SQL (Postgres, MySQL, SQLite)**: Session lookup by primary key is a single indexed read — negligible overhead +- **etcd**: Single key-value lookup — negligible overhead +- **Kubernetes CRDs**: GET by resource name — slightly higher latency than SQL/etcd but still within acceptable bounds (may require [priority&fairness](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/) tuning) +- **Memory**: In-process map lookup — no overhead + +At this stage, we do not have production metrics to quantify the exact impact. The storage access pattern is identical to existing `OfflineSessions` lookups (single record by key), which are already proven in production. It is recommended to monitor storage latency after enabling sessions and adjusting `validIfNotUsedFor` if the GC frequency needs tuning. + +#### Breaking Changes + +**None** - Sessions are opt-in via feature flag and configuration. Existing deployments continue to work without changes. + +#### Rollback Plan + +Sessions are fully controlled by the `DEX_SESSIONS_ENABLED` feature flag. Rollback is straightforward: + +1. **Disable feature flag**: Set `DEX_SESSIONS_ENABLED=false` (or remove it) +2. **Immediate effect**: Dex stops creating, reading, and validating sessions. All authorization requests proceed as before sessions were introduced — connector authentication on every request, no SSO, no session cookies +3. **Cookie cleanup**: Existing session cookies in browsers become inert — Dex ignores them when sessions are disabled. They expire naturally per their MaxAge or when the browser is closed +4. **Storage cleanup**: `AuthSession` and `UserIdentity` records remain in storage but are unused. They can be cleaned up manually or left to accumulate no further growth +5. **No downtime required**: Feature flag can be toggled without restart if environment variable reload is supported; otherwise, a rolling restart is sufficient + +**Key guarantee**: Disabling the feature flag returns Dex to its pre-sessions behavior with zero side effects. No existing functionality (refresh tokens, connector authentication, token issuance) depends on sessions. Additional tables in the database cost nothing when the feature flag is disabled: they remain unused schema objects and can be deleted later if desired. + +#### Migration Path + +1. Deploy new Dex version - storage migrations create `AuthSession` and `UserIdentity` tables/resources automatically (no feature flag needed for schema) +2. Enable feature flag `DEX_SESSIONS_ENABLED=true` when ready to use sessions +3. Add `sessions:` configuration block +4. Sessions start being created for all new logins; "Remember Me" controls cookie persistence (session vs persistent cookie) +5. Existing refresh tokens continue to work + +**Note**: Storage schema changes (new tables/CRDs) are applied on startup regardless of feature flag. The feature flag only controls whether sessions are actually created and used. This simplifies deployment - you can deploy the new version, then enable sessions later without another deployment. + +### Alternatives + +#### 1. Stateless Sessions (JWT in Cookie) + +**Approach**: Store session data directly in a signed/encrypted JWT cookie. + +**Pros**: +- No server-side storage required +- Scales horizontally without shared state + +**Cons**: +- Cannot revoke sessions without blocklist +- Cookie size limits (~4KB) +- Cannot store consent history or client tracking for logout +- No server-side session list for logout + +**Decision**: Rejected. Server-side sessions are required for proper logout and SSO. + +#### 2. Extend OfflineSessions + +**Approach**: Add session data to existing OfflineSessions entity. + +**Pros**: +- Reuses existing storage +- Simpler migration + +**Cons**: +- OfflineSessions are per-connector, not per-browser +- Different lifecycle (refresh token vs browser session) +- Would complicate existing OfflineSessions logic + +**Decision**: Rejected. Clean separation is better for maintainability. + +#### 3. External Session Store (Redis) + +**Approach**: Use Redis for session storage instead of existing backends. + +**Pros**: +- Built-in TTL support +- Fast reads/writes +- Proven session store + +**Cons**: +- Adds infrastructure dependency +- Against Dex's simplicity philosophy +- Doesn't work with Kubernetes CRD backend + +**Decision**: Rejected. Must work with existing storage backends. + +#### 4. Do Nothing + +**Approach**: Keep using refresh tokens as implicit sessions. + +**Cons**: +- Cannot implement OIDC conformance features +- No proper SSO +- No proper logout +- Blocks future features (2FA, etc.) + +**Decision**: Rejected. These features are essential for enterprise adoption. + +## Future Improvements + +1. **Identity Refresh for Long-Lived Sessions** + - Periodic refresh of user identity from connector during active session + - Configurable refresh interval + - Refresh on token request option + - Handle connector revocation (terminate session) + +2. **Upstream Connector Logout** + - Redirect to upstream IDP logout endpoint after Dex logout + - Support RP-Initiated Logout towards upstream OIDC providers + - SAML Single Logout (SLO) support + - Configurable per-connector logout URLs + +3. **Session Introspection Endpoint** + - Implement session check endpoint similar to [RFC 7662 Token Introspection](https://datatracker.ietf.org/doc/html/rfc7662) + - Could enable replacing OAuth2 Proxy in some deployments + - Endpoint: `GET /session/introspect` or similar + - Returns session validity and user claims + - Useful for reverse proxies to validate session cookies directly + +4. **Front-Channel Logout** + - Implement [OIDC Front-Channel Logout 1.0](https://openid.net/specs/openid-connect-frontchannel-1_0.html) + - Notify client applications when user logs out via iframes + - Requires client `logoutURL` configuration + +5. **2FA/MFA Support** + - Store TOTP secrets in user profile + - Add MFA enrollment flow + - Step-up authentication for sensitive operations + - WebAuthn/Passkey support + +6. **Session Management API** + - List active sessions via gRPC API + - Revoke sessions via gRPC API + - Session activity audit log + +7. **Back-Channel Logout** + - Implement [OIDC Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) + - Server-to-server logout notifications + +8. **Account Linking** + - Link multiple connector identities to single user + - Switch between linked identities + +9. **Device/Session Fingerprinting** + - Optional session binding to client characteristics + - Anomaly detection for session theft + +10. **Per-Connector Session Policies** + - Different session lifetimes per connector + - Different SSO policies per connector + +11. **Session Impersonation for Admin** + - Admin can impersonate user sessions for debugging + - Audit logging for impersonation + +12. **Consent Management UI** + - User-facing page to view/revoke consents + - GDPR compliance features + diff --git a/docs/enhancements/cel-expressions-2026-02-28.md b/docs/enhancements/cel-expressions-2026-02-28.md new file mode 100644 index 00000000..efd2831f --- /dev/null +++ b/docs/enhancements/cel-expressions-2026-02-28.md @@ -0,0 +1,732 @@ +# Dex Enhancement Proposal (DEP) - 2026-02-28 - CEL (Common Expression Language) Integration + +## Table of Contents + +- [Summary](#summary) +- [Context](#context) +- [Motivation](#motivation) + - [Goals/Pain](#goalspain) + - [Non-Goals](#non-goals) +- [Proposal](#proposal) + - [User Experience](#user-experience) + - [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints) + - [Phase 1: pkg/cel - Core CEL Library](#phase-1-pkgcel---core-cel-library) + - [Phase 2: Authentication Policies](#phase-2-authentication-policies) + - [Phase 3: Token Policies](#phase-3-token-policies) + - [Phase 4: OIDC Connector Claim Mapping](#phase-4-oidc-connector-claim-mapping) + - [Policy Application Flow](#policy-application-flow) + - [Risks and Mitigations](#risks-and-mitigations) + - [Alternatives](#alternatives) +- [Future Improvements](#future-improvements) + +## Summary + +This DEP proposes integrating [CEL (Common Expression Language)][cel-spec] into Dex as a first-class +expression engine for policy evaluation, claim mapping, and token customization. A new reusable +`pkg/cel` package will provide a safe, sandboxed CEL environment with Kubernetes-grade compatibility +guarantees, cost budgets, and a curated set of extension libraries. Subsequent phases will leverage +this package to implement authentication policies, token policies, advanced claim mapping in +connectors, and per-client/global access rules — replacing the need for ad-hoc configuration fields +and external policy engines. + +[cel-spec]: https://github.com/google/cel-spec + +## Context + +- [#1583 Add allowedGroups option for clients config][#1583] — a long-standing request for a + configuration option to allow a client to specify a list of allowed groups. +- [#1635 Connector Middleware][#1635] — long-standing request for a policy/middleware layer between + connectors and the server for claim transformations and access control. +- [#1052 Allow restricting connectors per client][#1052] — frequently requested feature to restrict + which connectors are available to specific OAuth2 clients. +- [#2178 Custom claims in ID tokens][#2178] — requests for including additional payload in issued tokens. +- [#2812 Token Exchange DEP][dep-token-exchange] — mentions CEL/Rego as future improvement for + policy-based assertions on exchanged tokens. +- The OIDC connector already has a growing set of ad-hoc claim mutation options + (`ClaimMapping`, `ClaimMutations.NewGroupFromClaims`, `FilterGroupClaims`, `ModifyGroupNames`) + that would benefit from a unified expression language. +- Previous community discussions explored OPA/Rego and JMESPath, but CEL offers a better fit + (see [Alternatives](#alternatives)). + +[#1583]: https://github.com/dexidp/dex/pull/1583 +[#1635]: https://github.com/dexidp/dex/issues/1635 +[#1052]: https://github.com/dexidp/dex/issues/1052 +[#2178]: https://github.com/dexidp/dex/issues/2178 +[dep-token-exchange]: /docs/enhancements/token-exchange-2023-02-03-%232812.md + +## Motivation + +### Goals/Pain + +1. **Complex query/filter capabilities** — Dex needs a way to express complex validations and + mutations in multiple places (authentication flow, token issuance, claim mapping). Today each + feature requires new Go code, new config fields, and a new release cycle. CEL allows operators + to express these rules declaratively without code changes. + +2. **Authentication policies** — Operators want to control _who_ can log in based on rich + conditions: restrict specific connectors to specific clients, require group membership for + certain clients, deny login based on email domain, enforce MFA claims, etc. Currently there is + no unified mechanism; users rely on downstream applications or external proxies. + +3. **Token policies** — Operators want to customize issued tokens: add extra claims to ID tokens, + restrict scopes per client, modify `aud` claims, include upstream connector metadata, etc. + Today this requires forking Dex or using a reverse proxy. + +4. **Claim mapping in OIDC connector** — The OIDC connector has accumulated multiple ad-hoc config + options for claim mapping and group mutations (`ClaimMapping`, `NewGroupFromClaims`, + `FilterGroupClaims`, `ModifyGroupNames`). A single CEL expression field would replace all of + these with a more powerful and composable approach. + +5. **Per-client and global policies** — One of the most frequent requests is allowing different + connectors for different clients and restricting group-based access per client. CEL policies at + the global and per-client level address this cleanly. + +6. **CNCF ecosystem alignment** — CEL has massive adoption across the CNCF ecosystem: + + | Project | CEL Usage | Evidence | + |---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| + | **Kubernetes** | ValidatingAdmissionPolicy, CRD validation rules (`x-kubernetes-validations`), AuthorizationPolicy, field selectors, CEL-based match conditions in webhooks | [KEP-3488][k8s-cel-kep], [CRD Validation Rules][k8s-crd-cel], [AuthorizationPolicy KEP-3221][k8s-authz-cel] | + | **Kyverno** | CEL expressions in validation/mutation policies (v1.12+), preconditions | [Kyverno CEL docs][kyverno-cel] | + | **OPA Gatekeeper** | Partially added support for CEL in constraint templates | [Gatekeeper CEL][gatekeeper-cel] | + | **Istio** | AuthorizationPolicy conditions, request routing, telemetry | [Istio CEL docs][istio-cel] | + | **Envoy / Envoy Gateway** | RBAC filter, ext_authz, rate limiting, route matching, access logging | [Envoy CEL docs][envoy-cel] | + | **Tekton** | Pipeline when expressions, CEL custom tasks | [Tekton CEL Interceptor][tekton-cel] | + | **Knative** | Trigger filters using CEL expressions | [Knative CEL filters][knative-cel] | + | **Google Cloud** | IAM Conditions, Cloud Deploy, Security Command Center | [Google IAM CEL][gcp-cel] | + | **Cert-Manager** | CertificateRequestPolicy approval using CEL | [cert-manager approver-policy CEL][cert-manager-cel] | + | **Cilium** | Hubble CEL filter logic | [Cilium CEL docs][cilium-cel] | + | **Crossplane** | Composition functions with CEL-based patch transforms | [Crossplane CEL transforms][crossplane-cel] | + | **Kube-OVN** | Network policy extensions using CEL | [Kube-OVN CEL][kube-ovn-cel] | + + [k8s-cel-kep]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3488-cel-admission-control + [k8s-crd-cel]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules + [k8s-authz-cel]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/3221-structured-authorization-configuration + [kyverno-cel]: https://kyverno.io/docs/writing-policies/cel/ + [gatekeeper-cel]: https://open-policy-agent.github.io/gatekeeper/website/docs/validating-admission-policy/#policy-updates-to-add-vap-cel + [istio-cel]: https://istio.io/latest/docs/reference/config/security/conditions/ + [envoy-cel]: https://www.envoyproxy.io/docs/envoy/latest/xds/type/v3/cel.proto + [tekton-cel]: https://tekton.dev/docs/triggers/cel_expressions/ + [knative-cel]: https://github.com/knative/eventing/blob/main/docs/broker/filtering.md#add-cel-expression-filter + [gcp-cel]: https://cloud.google.com/iam/docs/conditions-overview + [cert-manager-cel]: https://cert-manager.io/docs/policy/approval/approver-policy/#validations + [cilium-cel]: https://docs.cilium.io/en/stable/_api/v1/flow/README/#flowfilter-experimental + [crossplane-cel]: https://github.com/crossplane-contrib/function-cel-filter + [kube-ovn-cel]: https://kubeovn.github.io/docs/stable/en/advance/cel-expression/ + + By choosing CEL, Dex operators who already use Kubernetes or other CNCF tools can reuse their + existing knowledge of the expression language. + +### Non-Goals + +- **Full policy engine** — This DEP does not aim to replace dedicated external policy engines + (OPA, Kyverno). CEL in Dex is scoped to identity and token operations. +- **Breaking changes to existing configuration** — All existing config fields (`ClaimMapping`, + `ClaimMutations`, etc.) will continue to work. CEL expressions are additive/opt-in. +- **Authorization (beyond Dex scope)** — Dex is an identity provider; downstream authorization + decisions remain the responsibility of relying parties. CEL policies in Dex are limited to + authentication and token issuance concerns. +- **Multi-phase CEL in a single DEP** — Only Phase 1 (`pkg/cel` package) is targeted for + immediate implementation. Phases 2-4 are included here for design context and will have their + own implementation PRs. +- **Multi-step logic** — CEL in Dex is scoped to single-expression evaluation. Each expression + is a standalone, stateless computation with no intermediate variables, chaining, or + multi-step transformations. If a use case requires sequential logic or conditionally chained + expressions, it belongs outside Dex (e.g. in an external policy engine or middleware). + This boundary protects the design from scope creep that pushes CEL beyond what it's good at. + +## Proposal + +### User Experience + +#### Authentication Policy (Phase 2) + +Operators can define global and per-client authentication policies in the Dex config: + +```yaml +# Global authentication policy — each expression evaluates to bool. +# If true — the request is denied. Evaluated in order; first match wins. +authPolicy: + - expression: "!identity.email.endsWith('@example.com')" + message: "'Login restricted to example.com domain'" + - expression: "!identity.email_verified" + message: "'Email must be verified'" + +staticClients: + - id: admin-app + name: Admin Application + secret: ... + redirectURIs: [...] + # Per-client policy — same structure as global + authPolicy: + - expression: "!(request.connector_id in ['okta', 'ldap'])" + message: "'This application requires Okta or LDAP login'" + - expression: "!('admin' in identity.groups)" + message: "'Admin group membership required'" +``` + +#### Token Policy (Phase 3) + +Operators can add extra claims or mutate token contents: + +```yaml +tokenPolicy: + # Global mutations applied to all ID tokens + claims: + # Add a custom claim based on group membership + - key: "'role'" + value: "identity.groups.exists(g, g == 'admin') ? 'admin' : 'user'" + # Include connector ID as a claim + - key: "'idp'" + value: "request.connector_id" + # Add department from upstream claims (only if present) + - key: "'department'" + value: "identity.extra['department']" + condition: "'department' in identity.extra" + +staticClients: + - id: internal-api + name: Internal API + secret: ... + redirectURIs: [...] + tokenPolicy: + claims: + - key: "'custom-claim.company.com/team'" + value: "identity.extra['team'].orValue('engineering')" + # Only add on-call claim for ops group members + - key: "'on_call'" + value: "true" + condition: "identity.groups.exists(g, g == 'ops')" + # Restrict scopes + filter: + expression: "request.scopes.all(s, s in ['openid', 'email', 'profile'])" + message: "'Unsupported scope requested'" +``` + +#### OIDC Connector Claim Mapping (Phase 4) + +Replace ad-hoc claim mapping with CEL: + +```yaml +connectors: + - type: oidc + id: corporate-idp + name: Corporate IdP + config: + issuer: https://idp.example.com + clientID: dex-client + clientSecret: ... + # CEL-based claim mapping — replaces claimMapping and claimModifications + claimMappingExpressions: + username: "claims.preferred_username.orValue(claims.email)" + email: "claims.email" + groups: > + claims.groups + .filter(g, g.startsWith('dex:')) + .map(g, g.trimPrefix('dex:')) + emailVerified: "claims.email_verified.orValue(true)" + # Extra claims to pass through to token policies + extra: + department: "claims.department.orValue('unknown')" + cost_center: "claims.cost_center.orValue('')" +``` + +### Implementation Details/Notes/Constraints + +### Phase 1: `pkg/cel` — Core CEL Library + +This is the foundation that all subsequent phases build upon. The package provides a safe, +reusable CEL environment with Kubernetes-grade guarantees. + +#### Package Structure + +``` +pkg/ + cel/ + cel.go # Core Environment, compilation, evaluation + types.go # CEL type declarations (Identity, Request, etc.) + cost.go # Cost estimation and budgeting + doc.go # Package documentation + library/ + email.go # Email-related CEL functions + groups.go # Group-related CEL functions +``` + +#### Dependencies + +``` +github.com/google/cel-go v0.27.0 +``` + +The `cel-go` library is the canonical Go implementation maintained by Google, used by Kubernetes +and all major CNCF projects. It follows semantic versioning and provides strong backward +compatibility guarantees. + +#### Core API Design + +**Public types:** + +```go +// CompilationResult holds a compiled CEL program ready for evaluation. +type CompilationResult struct { + Program cel.Program + OutputType *cel.Type + Expression string +} + +// Compiler compiles CEL expressions against a specific environment. +type Compiler struct { /* ... */ } + +// CompilerOption configures a Compiler. +type CompilerOption func(*compilerConfig) +``` + +**Compilation pipeline:** + +Each `Compile*` call performs these steps sequentially: +1. Reject expressions exceeding `MaxExpressionLength` (10,240 chars). +2. Compile and type-check the expression via `cel-go`. +3. Validate output type matches the expected type (for typed variants). +4. Estimate cost using `defaultCostEstimator` with size hints — reject if estimated max cost + exceeds the cost budget. +5. Create an optimized `cel.Program` with runtime cost limit. + +Presence tests (`has(field)`, `'key' in map`) have zero cost, matching Kubernetes CEL behavior. + +#### Variable Declarations + +Variables are declared via `VariableDeclaration{Name, Type}` and registered with `NewCompiler`. +Helper constructors provide pre-defined variable sets: + +**`IdentityVariables()`** — the `identity` variable (from `connector.Identity`), +typed as `cel.ObjectType`: + +| Field | CEL Type | Source | +|-------|----------|--------| +| `identity.user_id` | `string` | `connector.Identity.UserID` | +| `identity.username` | `string` | `connector.Identity.Username` | +| `identity.preferred_username` | `string` | `connector.Identity.PreferredUsername` | +| `identity.email` | `string` | `connector.Identity.Email` | +| `identity.email_verified` | `bool` | `connector.Identity.EmailVerified` | +| `identity.groups` | `list(string)` | `connector.Identity.Groups` | + +**`RequestVariables()`** — the `request` variable (from `RequestContext`), +typed as `cel.ObjectType`: + +| Field | CEL Type | +|-------|----------| +| `request.client_id` | `string` | +| `request.connector_id` | `string` | +| `request.scopes` | `list(string)` | +| `request.redirect_uri` | `string` | + +**`ClaimsVariable()`** — the `claims` variable for raw upstream claims as `map(string, dyn)`. + +**Typing strategy:** + +`identity` and `request` use `cel.ObjectType` with explicitly declared fields. This gives +compile-time type checking: a typo like `identity.emial` is rejected at config load time +rather than silently evaluating to null in production — critical for an auth system where a +misconfigured policy could lock users out. + +`claims` remains `map(string, dyn)` because its shape is genuinely unknown — it carries +arbitrary upstream IdP data. + +#### Compatibility Guarantees + +Following the Kubernetes CEL compatibility model +([KEP-3488: CEL for Admission Control][kep-3488], [Kubernetes CEL Migration Guide][k8s-cel-compat]): + +1. **Environment versioning** — The CEL environment is versioned. When new functions or variables + are added, they are introduced under a new environment version. Existing expressions compiled + against an older version continue to work. + + ```go + // EnvironmentVersion represents the version of the CEL environment. + // New variables, functions, or libraries are introduced in new versions. + type EnvironmentVersion uint32 + + const ( + // EnvironmentV1 is the initial CEL environment. + EnvironmentV1 EnvironmentVersion = 1 + ) + + // WithVersion sets the target environment version for the compiler. + func WithVersion(v EnvironmentVersion) CompilerOption + ``` + + This is directly modeled on `k8s.io/apiserver/pkg/cel/environment`. + +2. **Library stability** — Custom functions in the `pkg/cel/library` subpackage follow these rules: + - Functions MUST NOT be removed once released. + - Function signatures MUST NOT change once released. + - New functions MUST be added under a new `EnvironmentVersion`. + - If a function needs to be replaced, the old one is deprecated but kept forever. + +3. **Type stability** — CEL types (`Identity`, `Request`, `Claims`) follow the same rules: + - Fields MUST NOT be removed. + - Field types MUST NOT change. + - New fields are added in a new `EnvironmentVersion`. + +4. **Semantic versioning of `cel-go`** — The `cel-go` dependency follows semver. Dex pins to a + minor version range and updates are tested for behavioral changes. This is exactly the approach + Kubernetes takes: `k8s.io/apiextensions-apiserver` pins `cel-go` and gates new features behind + environment versions. + +5. **Feature gates** — New CEL-powered features are gated behind Dex feature flags (using the + existing `pkg/featureflags` mechanism) during their alpha phase. + +[kep-3488]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3488-cel-admission-control +[k8s-cel-compat]: https://kubernetes.io/docs/reference/using-api/cel/ + +#### Cost Estimation and Budgets + +Like Kubernetes, Dex CEL expressions must be bounded to prevent denial-of-service. + +**Constants:** + +| Constant | Value | Description | +|----------|-------|-------------| +| `DefaultCostBudget` | `10_000_000` | Max cost units per evaluation (aligned with Kubernetes) | +| `MaxExpressionLength` | `10_240` | Max expression string length in characters | +| `DefaultStringMaxLength` | `256` | Estimated max string size for cost estimation | +| `DefaultListMaxLength` | `100` | Estimated max list size for cost estimation | + +**How it works:** + +A `defaultCostEstimator` (implementing `checker.CostEstimator`) provides size hints for known +variables (`identity`, `request`, `claims`) so the `cel-go` cost estimator doesn't assume +unbounded sizes. It also provides call cost estimates for custom Dex functions +(`dex.emailDomain`, `dex.emailLocalPart`, `dex.groupMatches`, `dex.groupFilter`). + +Expressions are validated at three levels: +1. **Length check** — reject expressions exceeding `MaxExpressionLength`. +2. **Compile-time cost estimation** — reject expressions whose estimated max cost exceeds + the cost budget. +3. **Runtime cost limit** — abort evaluation if actual cost exceeds the budget. + +#### Extension Libraries + +The `pkg/cel` environment includes these cel-go standard extensions (same set as Kubernetes): + +| Library | Description | Examples | +|---------|-------------|---------| +| `ext.Strings()` | Extended string functions | `"hello".upperAscii()`, `"foo:bar".split(':')`, `s.trim()`, `s.replace('a','b')` | +| `ext.Encoders()` | Base64 encoding/decoding | `base64.encode(bytes)`, `base64.decode(str)` | +| `ext.Lists()` | Extended list functions | `list.slice(1, 3)`, `list.flatten()` | +| `ext.Sets()` | Set operations on lists | `sets.contains(a, b)`, `sets.intersects(a, b)`, `sets.equivalent(a, b)` | +| `ext.Math()` | Math functions | `math.greatest(a, b)`, `math.least(a, b)` | + +Plus custom Dex libraries in the `pkg/cel/library` subpackage, each implementing the +`cel.Library` interface: + +**`library.Email`** — email-related helpers: + +| Function | Signature | Description | +|----------|-----------|-------------| +| `dex.emailDomain` | `(string) -> string` | Returns the domain portion of an email address. `dex.emailDomain("user@example.com") == "example.com"` | +| `dex.emailLocalPart` | `(string) -> string` | Returns the local part of an email address. `dex.emailLocalPart("user@example.com") == "user"` | + +**`library.Groups`** — group-related helpers: + +| Function | Signature | Description | +|----------|-----------|-------------| +| `dex.groupMatches` | `(list(string), string) -> list(string)` | Returns groups matching a glob pattern. `dex.groupMatches(identity.groups, "team:*")` | +| `dex.groupFilter` | `(list(string), list(string)) -> list(string)` | Returns only groups present in the allowed list. `dex.groupFilter(identity.groups, ["admin", "ops"])` | + +#### Example: Compile and Evaluate + +```go +// 1. Create a compiler with identity and request variables +compiler, _ := cel.NewCompiler( + append(cel.IdentityVariables(), cel.RequestVariables()...), +) + +// 2. Compile a policy expression (type-checked, cost-estimated) +prog, _ := compiler.CompileBool( + `identity.email.endsWith('@example.com') && 'admin' in identity.groups`, +) + +// 3. Evaluate against real data +result, _ := cel.EvalBool(ctx, prog, map[string]any{ + "identity": cel.IdentityFromConnector(connectorIdentity), + "request": cel.RequestFromContext(cel.RequestContext{...}), +}) +// result == true +``` + +### Phase 2: Authentication Policies + +**Config Model:** + +```go +// AuthPolicy is a list of deny expressions evaluated after a user +// authenticates with a connector. Each expression evaluates to bool. +// If true — the request is denied. Evaluated in order; first match wins. +type AuthPolicy []PolicyExpression + +// PolicyExpression is a CEL expression with an optional human-readable message. +type PolicyExpression struct { + // Expression is a CEL expression that evaluates to bool. + Expression string `json:"expression"` + // Message is a CEL expression that evaluates to string (displayed to the user on deny). + // If empty, a generic message is shown. + Message string `json:"message,omitempty"` +} +``` + +**Evaluation point:** After `connector.CallbackConnector.HandleCallback()` or +`connector.PasswordConnector.Login()` returns an identity, and before the auth request is +finalized. Implemented in `server/handlers.go` at `handleConnectorCallback`. + +**Available CEL variables:** `identity` (from connector), `request` (client_id, connector_id, +scopes, redirect_uri). + +**Compilation:** All policy expressions are compiled once at config load time (in +`cmd/dex/serve.go`) and stored in the `Server` struct. This ensures: +- Syntax/type errors are caught at startup, not at runtime. +- No compilation overhead per request. +- Cost estimation can warn operators about expensive expressions at startup. + +**Evaluation flow:** + +``` +User authenticates via connector + │ + v +connector.HandleCallback() returns Identity + │ + v +Evaluate global authPolicy (in order) + - For each expression: evaluate → bool + - If true → deny with message, HTTP 403 + │ + v +Evaluate per-client authPolicy (in order) + - Same logic as global + │ + v +Continue normal flow (approval screen or redirect) +``` + +### Phase 3: Token Policies + +**Config Model:** + +```go +// TokenPolicy defines policies for token issuance. +type TokenPolicy struct { + // Claims adds or overrides claims in the issued ID token. + Claims []ClaimExpression `json:"claims,omitempty"` + // Filter validates the token request. If expression evaluates to false, + // the request is denied. + Filter *PolicyExpression `json:"filter,omitempty"` +} + +type ClaimExpression struct { + // Key is a CEL expression evaluating to string — the claim name. + Key string `json:"key"` + // Value is a CEL expression evaluating to dyn — the claim value. + Value string `json:"value"` + // Condition is an optional CEL expression evaluating to bool. + // When set, the claim is only included in the token if the condition + // evaluates to true. If omitted, the claim is always included. + Condition string `json:"condition,omitempty"` +} +``` + +**Evaluation point:** In `server/oauth2.go` during ID token construction, after standard +claims are built but before JWT signing. + +**Available CEL variables:** `identity`, `request`, `existing_claims` (the standard claims already +computed as `map(string, dyn)`). + +**Claim merge order:** +1. Standard Dex claims (sub, iss, aud, email, groups, etc.) +2. Global `tokenPolicy.claims` evaluated and merged +3. Per-client `tokenPolicy.claims` evaluated and merged (overrides global) + +**Reserved (forbidden) claim names:** + +Certain claim names are reserved and MUST NOT be set or overridden by CEL token policy +expressions. Attempting to use a reserved claim key will result in a config validation error at +startup. This prevents operators from accidentally breaking the OIDC/OAuth2 contract or +undermining Dex's security guarantees. + +```go +// ReservedClaimNames is the set of claim names that CEL token policy +// expressions are forbidden from setting. These are core OIDC/OAuth2 claims +// managed exclusively by Dex. +var ReservedClaimNames = map[string]struct{}{ + "iss": {}, // Issuer — always set by Dex to its own issuer URL + "sub": {}, // Subject — derived from connector identity, must not be spoofed + "aud": {}, // Audience — determined by the OAuth2 client, not policy + "exp": {}, // Expiration — controlled by Dex token TTL configuration + "iat": {}, // Issued At — set by Dex at signing time + "nbf": {}, // Not Before — set by Dex at signing time + "jti": {}, // JWT ID — generated by Dex for token revocation/uniqueness + "auth_time": {}, // Authentication Time — set by Dex from the auth session + "nonce": {}, // Nonce — echoed from the client's authorization request + "at_hash": {}, // Access Token Hash — computed by Dex from the access token + "c_hash": {}, // Code Hash — computed by Dex from the authorization code +} +``` + +The reserved list is enforced in two places: +1. **Config load time** — When compiling token policy `ClaimExpression` entries, Dex statically + evaluates the `Key` expression (which must be a string literal or constant-foldable) and rejects + it if the result is in `ReservedClaimNames`. +2. **Runtime (defense in depth)** — Before merging evaluated claims into the ID token, Dex checks + each key against `ReservedClaimNames` and logs a warning + skips the claim if it matches. This + guards against dynamic key expressions that couldn't be statically checked. + +### Phase 4: OIDC Connector Claim Mapping + +**Config Model:** + +In `connector/oidc/oidc.go`: + +```go +type Config struct { + // ... existing fields ... + + // ClaimMappingExpressions provides CEL-based claim mapping. + // When set, these take precedence over ClaimMapping and ClaimMutations. + ClaimMappingExpressions *ClaimMappingExpression `json:"claimMappingExpressions,omitempty"` +} + +type ClaimMappingExpression struct { + // Username is a CEL expression evaluating to string. + // Available variable: 'claims' (map of upstream claims). + Username string `json:"username,omitempty"` + // Email is a CEL expression evaluating to string. + Email string `json:"email,omitempty"` + // Groups is a CEL expression evaluating to list(string). + Groups string `json:"groups,omitempty"` + // EmailVerified is a CEL expression evaluating to bool. + EmailVerified string `json:"emailVerified,omitempty"` + // Extra is a map of claim names to CEL expressions evaluating to dyn. + // These are carried through to token policies. + Extra map[string]string `json:"extra,omitempty"` +} +``` + +**Available CEL variable:** `claims` — `map(string, dyn)` containing all raw upstream claims from +the ID token and/or UserInfo endpoint. + +This replaces the need for `ClaimMapping`, `NewGroupFromClaims`, `FilterGroupClaims`, and +`ModifyGroupNames` with a single, more powerful mechanism. + +**Backward compatibility:** When `claimMappingExpressions` is nil, the existing `ClaimMapping` and +`ClaimMutations` logic is used unchanged. When `claimMappingExpressions` is set, a startup warning is +logged if legacy mapping fields are also configured. + +### Policy Application Flow + +The following diagram shows the order in which CEL policies are applied. +Each step is optional — if not configured, it is skipped. + +``` +Connector Authentication + │ + │ upstream claims → connector.Identity + │ + v +Authentication Policies + │ + │ Global authPolicy + │ Per-client authPolicy + │ + v +Token Issuance + │ + │ Global tokenPolicy.filter + │ Per-client tokenPolicy.filter + │ + │ Global tokenPolicy.claims + │ Per-client tokenPolicy.claims + │ + │ Sign JWT + │ + v +Token Response +``` + +| Step | Policy | Scope | Action on match | +|------|--------|-------|-----------------| +| 2 | `authPolicy` (global) | Global | Expression → `true` = DENY login | +| 3 | `authPolicy` (per-client) | Per-client | Expression → `true` = DENY login | +| 4 | `tokenPolicy.filter` (global) | Global | Expression → `false` = DENY token | +| 5 | `tokenPolicy.filter` (per-client) | Per-client | Expression → `false` = DENY token | +| 6 | `tokenPolicy.claims` (global) | Global | Adds/overrides claims (with optional condition) | +| 7 | `tokenPolicy.claims` (per-client) | Per-client | Adds/overrides claims (overrides global) | + +### Risks and Mitigations + +| Risk | Mitigation | +|------|------------| +| **CEL expression complexity / DoS** | Cost budgets with configurable limits (default aligned with Kubernetes). Expressions are validated at config load time. Runtime evaluation is aborted if cost exceeds budget. | +| **Learning curve for operators** | CEL has excellent documentation, playground ([cel.dev](https://cel.dev)), and massive CNCF adoption. Dex docs will include a dedicated CEL guide with examples. Most operators already know CEL from Kubernetes. | +| **`cel-go` dependency size** | `cel-go` adds ~5MB to binary. This is acceptable for the functionality provided. Kubernetes, Istio, Envoy all accept this trade-off. | +| **Breaking changes in `cel-go`** | Pin to semver minor range. Environment versioning ensures existing expressions continue to work across upgrades. | +| **Security: CEL expression injection** | CEL expressions are defined by operators in the server config, not by end users. No CEL expression is ever constructed from user input at runtime. | +| **Config migration** | Old config fields (`ClaimMapping`, `ClaimMutations`) continue to work. CEL expressions are opt-in. If both are specified, CEL takes precedence with a config-time warning. | +| **Error messages exposing internals** | CEL deny `message` expressions are controlled by the operator. Default messages are generic. Evaluation errors are logged server-side, not exposed to end users. | +| **Performance** | Expressions are compiled once at startup. Evaluation is sub-millisecond for typical identity operations. Cost budgets prevent pathological cases. Benchmarks will be included in `pkg/cel` tests. | + +### Alternatives + +#### OPA/Rego + +OPA was previously considered ([#1635], token exchange DEP). While powerful, it has significant +drawbacks for Dex: + +- **Separate daemon** — OPA typically runs as a sidecar or daemon; adds operational complexity. + Even the embedded Go library (`github.com/open-policy-agent/opa/rego`) is significantly + heavier than `cel-go`. +- **Rego learning curve** — Rego is a Datalog-derived language unfamiliar to most developers. + CEL syntax is closer to C/Java/Go and is immediately readable. +- **Overkill** — Dex needs simple expression evaluation, not a full policy engine with data + loading, bundles, and partial evaluation. +- **No inline expressions** — Rego policies are typically separate files, not inline config + expressions. This makes the config harder to understand and deploy. +- **Smaller CNCF footprint for embedding** — While OPA is a graduated CNCF project, CEL has + broader adoption as an _embedded_ language (Kubernetes, Istio, Envoy, Kyverno, etc.). + +#### JMESPath + +JMESPath was proposed for claim mapping. Drawbacks: + +- **Query-only** — JMESPath is a JSON query language. It cannot express boolean conditions, + mutations, or string operations naturally. +- **Limited type system** — No type checking at compile time. Errors are only caught at runtime. +- **Small ecosystem** — Limited adoption compared to CEL. No CNCF projects use JMESPath for + policy evaluation. +- **No cost estimation** — No way to bound execution time. + +#### Hardcoded Go Logic + +The current approach: each feature requires new Go structs, config fields, and code. This is +unsustainable: +- `ClaimMapping`, `NewGroupFromClaims`, `FilterGroupClaims`, `ModifyGroupNames` are each separate + features that could be one CEL expression. +- Every new policy need requires a Dex code change and release. +- Combinatorial explosion of config options. + +#### No Change + +Without CEL or an equivalent: +- Operators continue to request per-client connector restrictions, custom claims, claim + transformations, and access policies — issues remain open indefinitely. +- Dex accumulates more ad-hoc config fields, increasing maintenance burden. +- Complex use cases require external reverse proxies, forking Dex, or middleware. + +## Future Improvements + +- **CEL in other connectors** — Extend CEL claim mapping beyond OIDC to LDAP (attribute mapping), + SAML (assertion mapping), and other connectors with complex attribute mapping needs. +- **Policy testing framework** — Unit test framework for operators to validate their CEL + expressions against fixture data before deployment. +- **Connector selection via CEL** — Replace the static connector-per-client mapping with a CEL + expression that dynamically determines which connectors to show based on request attributes. + + diff --git a/examples/config-dev.yaml b/examples/config-dev.yaml index 94a40bff..0e8bb575 100644 --- a/examples/config-dev.yaml +++ b/examples/config-dev.yaml @@ -68,6 +68,7 @@ web: # issuer: dex # logoURL: theme/logo.png # dir: web/ +# Allowed values: light, dark # theme: light # Configuration for telemetry @@ -101,26 +102,33 @@ telemetry: # Default values shown below # oauth2: - # grantTypes determines the allowed set of authorization flows. +# # grantTypes determines the allowed set of authorization flows. # grantTypes: # - "authorization_code" +# - "client_credentials" # - "refresh_token" # - "implicit" # - "password" # - "urn:ietf:params:oauth:grant-type:device_code" # - "urn:ietf:params:oauth:grant-type:token-exchange" - # responseTypes determines the allowed response contents of a successful authorization flow. - # use ["code", "token", "id_token"] to enable implicit flow for web-only clients. +# # responseTypes determines the allowed response contents of a successful authorization flow. +# # use ["code", "token", "id_token"] to enable implicit flow for web-only clients. # responseTypes: [ "code" ] # also allowed are "token" and "id_token" - # By default, Dex will ask for approval to share data with application - # (approval for sharing data from connected IdP to Dex is separate process on IdP) +# # By default, Dex will ask for approval to share data with application +# # (approval for sharing data from connected IdP to Dex is separate process on IdP) # skipApprovalScreen: false - # If only one authentication method is enabled, the default behavior is to - # go directly to it. For connected IdPs, this redirects the browser away - # from application to upstream provider such as the Google login page +# # If only one authentication method is enabled, the default behavior is to +# # go directly to it. For connected IdPs, this redirects the browser away +# # from application to upstream provider such as the Google login page # alwaysShowLoginScreen: false - # Uncomment the passwordConnector to use a specific connector for password grants +# # Uncomment the passwordConnector to use a specific connector for password grants # passwordConnector: local +# # PKCE (Proof Key for Code Exchange) configuration +# pkce: +# # If true, PKCE is required for all authorization code flows (OAuth 2.1). +# enforce: false +# # Supported code challenge methods. Defaults to ["S256", "plain"]. +# codeChallengeMethodsSupported: ["S256", "plain"] # Instead of reading from an external storage, use this list of clients. # @@ -132,6 +140,10 @@ staticClients: - '/dex/device/callback' name: 'Example App' secret: ZXhhbXBsZS1hcHAtc2VjcmV0 + # Optional: restrict which connectors this client can use for authentication. + # If omitted or empty, all connectors are allowed. + # allowedConnectors: + # - mock # Example using environment variables # Set DEX_CLIENT_ID and DEX_SECURE_CLIENT_SECRET before starting Dex @@ -146,10 +158,23 @@ staticClients: # - /device/callback # name: 'Static Client for Device Flow' # public: true + connectors: - type: mockCallback id: mock name: Example + # grantTypes restricts which grant types can use this connector. + # If not specified, all grant types are allowed. + # Supported values: + # - "authorization_code" + # - "implicit" + # - "refresh_token" + # - "password" + # - "urn:ietf:params:oauth:grant-type:device_code" + # - "urn:ietf:params:oauth:grant-type:token-exchange" +# grantTypes: +# - "authorization_code" +# - "refresh_token" # - type: google # id: google # name: Google diff --git a/examples/go.mod b/examples/go.mod index 39e20ea4..1a6756f6 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -1,13 +1,13 @@ module github.com/dexidp/dex/examples -go 1.24.0 +go 1.25.0 require ( github.com/coreos/go-oidc/v3 v3.17.0 github.com/dexidp/dex/api/v2 v2.4.0 github.com/spf13/cobra v1.10.2 - golang.org/x/oauth2 v0.35.0 - google.golang.org/grpc v1.79.1 + golang.org/x/oauth2 v0.36.0 + google.golang.org/grpc v1.79.2 ) require ( diff --git a/examples/go.sum b/examples/go.sum index 1221aafd..c0aa9ceb 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -39,8 +39,8 @@ go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4Etq go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= -golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= -golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= @@ -49,8 +49,8 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= -google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= +google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/go.mod b/go.mod index f19abaa9..ab315866 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/go-ldap/ldap/v3 v3.4.12 github.com/go-sql-driver/mysql v1.9.3 github.com/golang-jwt/jwt/v5 v5.3.1 + github.com/google/cel-go v0.27.0 github.com/google/uuid v1.6.0 github.com/gorilla/handlers v1.5.2 github.com/gorilla/mux v1.8.1 @@ -34,17 +35,18 @@ require ( github.com/stretchr/testify v1.11.1 go.etcd.io/etcd/client/pkg/v3 v3.6.8 go.etcd.io/etcd/client/v3 v3.6.8 - golang.org/x/crypto v0.48.0 - golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741 - golang.org/x/net v0.50.0 - golang.org/x/oauth2 v0.35.0 - google.golang.org/api v0.269.0 - google.golang.org/grpc v1.79.1 + golang.org/x/crypto v0.49.0 + golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 + golang.org/x/net v0.52.0 + golang.org/x/oauth2 v0.36.0 + google.golang.org/api v0.271.0 + google.golang.org/grpc v1.79.2 google.golang.org/protobuf v1.36.11 ) require ( ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 // indirect + cel.dev/expr v0.25.1 // indirect cloud.google.com/go/auth v0.18.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect dario.cat/mergo v1.0.1 // indirect @@ -53,6 +55,7 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmatcuk/doublestar v1.3.4 // indirect @@ -71,7 +74,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.12 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect github.com/googleapis/gax-go/v2 v2.17.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -112,15 +115,15 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/mod v0.32.0 // indirect - golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.41.0 // indirect - golang.org/x/text v0.34.0 // indirect - golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.41.0 // indirect + golang.org/x/mod v0.33.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.42.0 // indirect + golang.org/x/text v0.35.0 // indirect + golang.org/x/time v0.15.0 // indirect + golang.org/x/tools v0.42.0 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index ae39ff09..918a9d8b 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 h1:E0wvcUXTkgyN4wy4LGtNzMNGMytJN8afmIWXJVMi4cc= ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= @@ -30,6 +32,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE= @@ -91,14 +95,16 @@ github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63Y github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= +github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.12 h1:Fg+zsqzYEs1ZnvmcztTYxhgCBsx3eEhEwQ1W/lHq/sQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.12/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc= github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -266,50 +272,51 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= -golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= -golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741 h1:fGZugkZk2UgYBxtpKmvub51Yno1LJDeEsRp2xGD+0gY= -golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= -golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= -golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= -golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= -golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= -golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= -golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= -golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -320,16 +327,16 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.269.0 h1:qDrTOxKUQ/P0MveH6a7vZ+DNHxJQjtGm/uvdbdGXCQg= -google.golang.org/api v0.269.0/go.mod h1:N8Wpcu23Tlccl0zSHEkcAZQKDLdquxK+l9r2LkwAauE= +google.golang.org/api v0.271.0 h1:cIPN4qcUc61jlh7oXu6pwOQqbJW2GqYh5PS6rB2C/JY= +google.golang.org/api v0.271.0/go.mod h1:CGT29bhwkbF+i11qkRUJb2KMKqcJ1hdFceEIRd9u64Q= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d h1:t/LOSXPJ9R0B6fnZNyALBRfZBH0Uy0gT+uR+SJ6syqQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= -google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= -google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= +google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/cel/cel.go b/pkg/cel/cel.go new file mode 100644 index 00000000..8dd686ba --- /dev/null +++ b/pkg/cel/cel.go @@ -0,0 +1,232 @@ +package cel + +import ( + "context" + "fmt" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/ext" + + "github.com/dexidp/dex/pkg/cel/library" +) + +// EnvironmentVersion represents the version of the CEL environment. +// New variables, functions, or libraries are introduced in new versions. +type EnvironmentVersion uint32 + +const ( + // EnvironmentV1 is the initial CEL environment. + EnvironmentV1 EnvironmentVersion = 1 +) + +// CompilationResult holds a compiled CEL program ready for evaluation. +type CompilationResult struct { + Program cel.Program + OutputType *cel.Type + Expression string + + ast *cel.Ast +} + +// CompilerOption configures a Compiler. +type CompilerOption func(*compilerConfig) + +type compilerConfig struct { + costBudget uint64 + version EnvironmentVersion +} + +func defaultCompilerConfig() *compilerConfig { + return &compilerConfig{ + costBudget: DefaultCostBudget, + version: EnvironmentV1, + } +} + +// WithCostBudget sets a custom cost budget for expression evaluation. +func WithCostBudget(budget uint64) CompilerOption { + return func(cfg *compilerConfig) { + cfg.costBudget = budget + } +} + +// WithVersion sets the target environment version for the compiler. +// Defaults to the latest version. Specifying an older version ensures +// that only functions/types available at that version are used. +func WithVersion(v EnvironmentVersion) CompilerOption { + return func(cfg *compilerConfig) { + cfg.version = v + } +} + +// Compiler compiles CEL expressions against a specific environment. +type Compiler struct { + env *cel.Env + cfg *compilerConfig +} + +// NewCompiler creates a new CEL compiler with the specified variable +// declarations and options. +// +// All custom Dex libraries are automatically included. +// The environment is configured with cost limits and safe defaults. +func NewCompiler(variables []VariableDeclaration, opts ...CompilerOption) (*Compiler, error) { + cfg := defaultCompilerConfig() + for _, opt := range opts { + opt(cfg) + } + + envOpts := make([]cel.EnvOption, 0, 8+len(variables)) + envOpts = append(envOpts, + cel.DefaultUTCTimeZone(true), + + // Standard extension libraries (same set as Kubernetes) + ext.Strings(), + ext.Encoders(), + ext.Lists(), + ext.Sets(), + ext.Math(), + + // Native Go types for typed variable access. + // This gives compile-time field checking: identity.emial → error at config load. + ext.NativeTypes( + ext.ParseStructTags(true), + reflect.TypeOf(IdentityVal{}), + reflect.TypeOf(RequestVal{}), + ), + + // Custom Dex libraries + cel.Lib(&library.Email{}), + cel.Lib(&library.Groups{}), + + // Presence tests like has(field) and 'key' in map are O(1) hash + // lookups on map(string, dyn) variables, so they should not count + // toward the cost budget. Without this, expressions with multiple + // 'in' checks (e.g. "'admin' in identity.groups") would accumulate + // inflated cost estimates. This matches Kubernetes CEL behavior + // where presence tests are free for CRD validation rules. + cel.CostEstimatorOptions( + checker.PresenceTestHasCost(false), + ), + ) + + for _, v := range variables { + envOpts = append(envOpts, cel.Variable(v.Name, v.Type)) + } + + env, err := cel.NewEnv(envOpts...) + if err != nil { + return nil, fmt.Errorf("failed to create CEL environment: %w", err) + } + + return &Compiler{env: env, cfg: cfg}, nil +} + +// CompileBool compiles a CEL expression that must evaluate to bool. +func (c *Compiler) CompileBool(expression string) (*CompilationResult, error) { + return c.compile(expression, cel.BoolType) +} + +// CompileString compiles a CEL expression that must evaluate to string. +func (c *Compiler) CompileString(expression string) (*CompilationResult, error) { + return c.compile(expression, cel.StringType) +} + +// CompileStringList compiles a CEL expression that must evaluate to list(string). +func (c *Compiler) CompileStringList(expression string) (*CompilationResult, error) { + return c.compile(expression, cel.ListType(cel.StringType)) +} + +// Compile compiles a CEL expression with any output type. +func (c *Compiler) Compile(expression string) (*CompilationResult, error) { + return c.compile(expression, nil) +} + +func (c *Compiler) compile(expression string, expectedType *cel.Type) (*CompilationResult, error) { + if len(expression) > MaxExpressionLength { + return nil, fmt.Errorf("expression exceeds maximum length of %d characters", MaxExpressionLength) + } + + ast, issues := c.env.Compile(expression) + if issues != nil && issues.Err() != nil { + return nil, fmt.Errorf("CEL compilation failed: %w", issues.Err()) + } + + if expectedType != nil && !ast.OutputType().IsEquivalentType(expectedType) { + return nil, fmt.Errorf( + "expected expression output type %s, got %s", + expectedType, ast.OutputType(), + ) + } + + // Estimate cost at compile time and reject expressions that are too expensive. + costEst, err := c.env.EstimateCost(ast, &defaultCostEstimator{}) + if err != nil { + return nil, fmt.Errorf("CEL cost estimation failed: %w", err) + } + + if costEst.Max > c.cfg.costBudget { + return nil, fmt.Errorf( + "CEL expression estimated cost %d exceeds budget %d", + costEst.Max, c.cfg.costBudget, + ) + } + + prog, err := c.env.Program(ast, + cel.EvalOptions(cel.OptOptimize), + cel.CostLimit(c.cfg.costBudget), + ) + if err != nil { + return nil, fmt.Errorf("CEL program creation failed: %w", err) + } + + return &CompilationResult{ + Program: prog, + OutputType: ast.OutputType(), + Expression: expression, + ast: ast, + }, nil +} + +// Eval evaluates a compiled program against the given variables. +func Eval(ctx context.Context, result *CompilationResult, variables map[string]any) (ref.Val, error) { + out, _, err := result.Program.ContextEval(ctx, variables) + if err != nil { + return nil, fmt.Errorf("CEL evaluation failed: %w", err) + } + + return out, nil +} + +// EvalBool is a convenience function that evaluates and asserts bool output. +func EvalBool(ctx context.Context, result *CompilationResult, variables map[string]any) (bool, error) { + out, err := Eval(ctx, result, variables) + if err != nil { + return false, err + } + + v, ok := out.Value().(bool) + if !ok { + return false, fmt.Errorf("expected bool result, got %T", out.Value()) + } + + return v, nil +} + +// EvalString is a convenience function that evaluates and asserts string output. +func EvalString(ctx context.Context, result *CompilationResult, variables map[string]any) (string, error) { + out, err := Eval(ctx, result, variables) + if err != nil { + return "", err + } + + v, ok := out.Value().(string) + if !ok { + return "", fmt.Errorf("expected string result, got %T", out.Value()) + } + + return v, nil +} diff --git a/pkg/cel/cel_test.go b/pkg/cel/cel_test.go new file mode 100644 index 00000000..b211f344 --- /dev/null +++ b/pkg/cel/cel_test.go @@ -0,0 +1,280 @@ +package cel_test + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/dexidp/dex/connector" + dexcel "github.com/dexidp/dex/pkg/cel" +) + +func TestCompileBool(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + tests := map[string]struct { + expr string + wantErr bool + }{ + "true literal": { + expr: "true", + }, + "comparison": { + expr: "1 == 1", + }, + "string type mismatch": { + expr: "'hello'", + wantErr: true, + }, + "int type mismatch": { + expr: "42", + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := compiler.CompileBool(tc.expr) + if tc.wantErr { + assert.Error(t, err) + assert.Nil(t, result) + } else { + assert.NoError(t, err) + assert.NotNil(t, result) + } + }) + } +} + +func TestCompileString(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + tests := map[string]struct { + expr string + wantErr bool + }{ + "string literal": { + expr: "'hello'", + }, + "string concatenation": { + expr: "'hello' + ' ' + 'world'", + }, + "bool type mismatch": { + expr: "true", + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := compiler.CompileString(tc.expr) + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, result) + } + }) + } +} + +func TestCompileStringList(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + result, err := compiler.CompileStringList("['a', 'b', 'c']") + assert.NoError(t, err) + assert.NotNil(t, result) + + _, err = compiler.CompileStringList("'not a list'") + assert.Error(t, err) +} + +func TestCompile(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + // Compile accepts any type + result, err := compiler.Compile("true") + assert.NoError(t, err) + assert.NotNil(t, result) + + result, err = compiler.Compile("'hello'") + assert.NoError(t, err) + assert.NotNil(t, result) + + result, err = compiler.Compile("42") + assert.NoError(t, err) + assert.NotNil(t, result) +} + +func TestCompileErrors(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + tests := map[string]struct { + expr string + }{ + "syntax error": { + expr: "1 +", + }, + "undefined variable": { + expr: "undefined_var", + }, + "undefined function": { + expr: "undefinedFunc()", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + _, err := compiler.Compile(tc.expr) + assert.Error(t, err) + }) + } +} + +func TestCompileRejectsUnknownFields(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + // Typo in field name: should fail at compile time with ObjectType + _, err = compiler.CompileBool("identity.emial == 'test@example.com'") + assert.Error(t, err) + assert.Contains(t, err.Error(), "compilation failed") + + // Type mismatch: comparing string field to int should fail at compile time + _, err = compiler.CompileBool("identity.email == 123") + assert.Error(t, err) + assert.Contains(t, err.Error(), "compilation failed") + + // Valid field: should compile fine + _, err = compiler.CompileBool("identity.email == 'test@example.com'") + assert.NoError(t, err) +} + +func TestMaxExpressionLength(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + longExpr := "'" + strings.Repeat("a", dexcel.MaxExpressionLength) + "'" + _, err = compiler.Compile(longExpr) + assert.Error(t, err) + assert.Contains(t, err.Error(), "maximum length") +} + +func TestEvalBool(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + tests := map[string]struct { + expr string + identity dexcel.IdentityVal + want bool + }{ + "email endsWith": { + expr: "identity.email.endsWith('@example.com')", + identity: dexcel.IdentityVal{Email: "user@example.com"}, + want: true, + }, + "email endsWith false": { + expr: "identity.email.endsWith('@example.com')", + identity: dexcel.IdentityVal{Email: "user@other.com"}, + want: false, + }, + "email_verified": { + expr: "identity.email_verified == true", + identity: dexcel.IdentityVal{EmailVerified: true}, + want: true, + }, + "group membership": { + expr: "identity.groups.exists(g, g == 'admin')", + identity: dexcel.IdentityVal{Groups: []string{"admin", "dev"}}, + want: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + prog, err := compiler.CompileBool(tc.expr) + require.NoError(t, err) + + result, err := dexcel.EvalBool(context.Background(), prog, map[string]any{ + "identity": tc.identity, + }) + require.NoError(t, err) + assert.Equal(t, tc.want, result) + }) + } +} + +func TestEvalString(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + // With ObjectType, identity.email is typed as string, so CompileString works. + prog, err := compiler.CompileString("identity.email") + require.NoError(t, err) + + result, err := dexcel.EvalString(context.Background(), prog, map[string]any{ + "identity": dexcel.IdentityVal{Email: "user@example.com"}, + }) + require.NoError(t, err) + assert.Equal(t, "user@example.com", result) +} + +func TestEvalWithIdentityAndRequest(t *testing.T) { + vars := append(dexcel.IdentityVariables(), dexcel.RequestVariables()...) + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + prog, err := compiler.CompileBool( + `identity.email.endsWith('@example.com') && 'admin' in identity.groups && request.connector_id == 'okta'`, + ) + require.NoError(t, err) + + identity := dexcel.IdentityFromConnector(connector.Identity{ + UserID: "123", + Username: "john", + Email: "john@example.com", + Groups: []string{"admin", "dev"}, + }) + request := dexcel.RequestFromContext(dexcel.RequestContext{ + ClientID: "my-app", + ConnectorID: "okta", + Scopes: []string{"openid", "email"}, + }) + + result, err := dexcel.EvalBool(context.Background(), prog, map[string]any{ + "identity": identity, + "request": request, + }) + require.NoError(t, err) + assert.True(t, result) +} + +func TestNewCompilerWithVariables(t *testing.T) { + // Claims variable — remains map(string, dyn) + compiler, err := dexcel.NewCompiler(dexcel.ClaimsVariable()) + require.NoError(t, err) + + // claims.email returns dyn from map access, use Compile (not CompileString) + prog, err := compiler.Compile("claims.email") + require.NoError(t, err) + + result, err := dexcel.EvalString(context.Background(), prog, map[string]any{ + "claims": map[string]any{ + "email": "test@example.com", + }, + }) + require.NoError(t, err) + assert.Equal(t, "test@example.com", result) +} diff --git a/pkg/cel/cost.go b/pkg/cel/cost.go new file mode 100644 index 00000000..d7a09102 --- /dev/null +++ b/pkg/cel/cost.go @@ -0,0 +1,105 @@ +package cel + +import ( + "fmt" + + "github.com/google/cel-go/checker" +) + +// DefaultCostBudget is the default cost budget for a single expression +// evaluation. Aligned with Kubernetes defaults: enough for typical identity +// operations but prevents runaway expressions. +const DefaultCostBudget uint64 = 10_000_000 + +// MaxExpressionLength is the maximum length of a CEL expression string. +const MaxExpressionLength = 10_240 + +// DefaultStringMaxLength is the estimated max length of string values +// (emails, usernames, group names, etc.) used for compile-time cost estimation. +const DefaultStringMaxLength = 256 + +// DefaultListMaxLength is the estimated max length of list values +// (groups, scopes) used for compile-time cost estimation. +const DefaultListMaxLength = 100 + +// CostEstimate holds the estimated cost range for a compiled expression. +type CostEstimate struct { + Min uint64 + Max uint64 +} + +// EstimateCost returns the estimated cost range for a compiled expression. +// This is computed statically at compile time without evaluating the expression. +func (c *Compiler) EstimateCost(result *CompilationResult) (CostEstimate, error) { + costEst, err := c.env.EstimateCost(result.ast, &defaultCostEstimator{}) + if err != nil { + return CostEstimate{}, fmt.Errorf("CEL cost estimation failed: %w", err) + } + + return CostEstimate{Min: costEst.Min, Max: costEst.Max}, nil +} + +// defaultCostEstimator provides size hints for compile-time cost estimation. +// Without these hints, the CEL cost estimator assumes unbounded sizes for +// variables, leading to wildly overestimated max costs. +type defaultCostEstimator struct{} + +func (defaultCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + // Provide size hints for map(string, dyn) variables: identity, request, claims. + // Without these, the estimator assumes lists/strings can be infinitely large. + if element.Path() == nil { + return nil + } + + path := element.Path() + if len(path) == 0 { + return nil + } + + root := path[0] + + switch root { + case "identity", "request", "claims": + // Nested field access (e.g. identity.email, identity.groups) + if len(path) >= 2 { + field := path[1] + switch field { + case "groups", "scopes": + // list(string) fields + return &checker.SizeEstimate{Min: 0, Max: DefaultListMaxLength} + case "email_verified": + // bool field — size is always 1 + return &checker.SizeEstimate{Min: 1, Max: 1} + default: + // string fields (email, username, user_id, client_id, etc.) + return &checker.SizeEstimate{Min: 0, Max: DefaultStringMaxLength} + } + } + // The map itself: number of keys + return &checker.SizeEstimate{Min: 0, Max: 20} + } + + return nil +} + +func (defaultCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + switch function { + case "dex.emailDomain", "dex.emailLocalPart": + // Simple string split — O(n) where n is string length, bounded. + return &checker.CallEstimate{ + CostEstimate: checker.CostEstimate{Min: 1, Max: 2}, + } + case "dex.groupMatches": + // Iterates over groups list and matches each against a pattern. + return &checker.CallEstimate{ + CostEstimate: checker.CostEstimate{Min: 1, Max: DefaultListMaxLength}, + } + case "dex.groupFilter": + // Builds a set from allowed list, then iterates groups. + return &checker.CallEstimate{ + CostEstimate: checker.CostEstimate{Min: 1, Max: 2 * DefaultListMaxLength}, + } + } + + return nil +} diff --git a/pkg/cel/cost_test.go b/pkg/cel/cost_test.go new file mode 100644 index 00000000..9a068be4 --- /dev/null +++ b/pkg/cel/cost_test.go @@ -0,0 +1,137 @@ +package cel_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dexcel "github.com/dexidp/dex/pkg/cel" +) + +func TestEstimateCost(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + tests := map[string]struct { + expr string + }{ + "simple bool": { + expr: "true", + }, + "string comparison": { + expr: "identity.email == 'test@example.com'", + }, + "group membership": { + expr: "identity.groups.exists(g, g == 'admin')", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + prog, err := compiler.Compile(tc.expr) + require.NoError(t, err) + + est, err := compiler.EstimateCost(prog) + require.NoError(t, err) + assert.True(t, est.Max >= est.Min, "max cost should be >= min cost") + assert.True(t, est.Max <= dexcel.DefaultCostBudget, + "estimated max cost %d should be within default budget %d", est.Max, dexcel.DefaultCostBudget) + }) + } +} + +func TestCompileTimeCostAcceptsSimpleExpressions(t *testing.T) { + vars := append(dexcel.IdentityVariables(), dexcel.RequestVariables()...) + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + tests := map[string]string{ + "literal": "true", + "email endsWith": "identity.email.endsWith('@example.com')", + "group check": "'admin' in identity.groups", + "emailDomain": `dex.emailDomain(identity.email)`, + "groupMatches": `dex.groupMatches(identity.groups, "team:*")`, + "groupFilter": `dex.groupFilter(identity.groups, ["admin", "dev"])`, + "combined policy": `identity.email.endsWith('@example.com') && 'admin' in identity.groups`, + "complex policy": `identity.email.endsWith('@example.com') && + identity.groups.exists(g, g == 'admin') && + request.connector_id == 'okta' && + request.scopes.exists(s, s == 'openid')`, + "filter+map chain": `identity.groups + .filter(g, g.startsWith('team:')) + .map(g, g.replace('team:', '')) + .size() > 0`, + } + + for name, expr := range tests { + t.Run(name, func(t *testing.T) { + _, err := compiler.Compile(expr) + assert.NoError(t, err, "expression should compile within default budget") + }) + } +} + +func TestCompileTimeCostRejection(t *testing.T) { + vars := append(dexcel.IdentityVariables(), dexcel.RequestVariables()...) + + tests := map[string]struct { + budget uint64 + expr string + }{ + "simple exists exceeds tiny budget": { + budget: 1, + expr: "identity.groups.exists(g, g == 'admin')", + }, + "endsWith exceeds tiny budget": { + budget: 2, + expr: "identity.email.endsWith('@example.com')", + }, + "nested comprehension over groups exceeds moderate budget": { + // Two nested iterations over groups: O(n^2) where n=100 → ~280K + budget: 10_000, + expr: `identity.groups.exists(g1, + identity.groups.exists(g2, + g1 != g2 && g1.startsWith(g2) + ) + )`, + }, + "cross-variable comprehension exceeds moderate budget": { + // filter groups then check each against scopes: O(n*m) → ~162K + budget: 10_000, + expr: `identity.groups + .filter(g, g.startsWith('team:')) + .exists(g, request.scopes.exists(s, s == g))`, + }, + "chained filter+map+filter+map exceeds small budget": { + budget: 1000, + expr: `identity.groups + .filter(g, g.startsWith('team:')) + .map(g, g.replace('team:', '')) + .filter(g, g.size() > 3) + .map(g, g.upperAscii()) + .size() > 0`, + }, + "many independent exists exceeds small budget": { + budget: 5000, + expr: `identity.groups.exists(g, g.contains('a')) && + identity.groups.exists(g, g.contains('b')) && + identity.groups.exists(g, g.contains('c')) && + identity.groups.exists(g, g.contains('d')) && + identity.groups.exists(g, g.contains('e'))`, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + compiler, err := dexcel.NewCompiler(vars, dexcel.WithCostBudget(tc.budget)) + require.NoError(t, err) + + _, err = compiler.Compile(tc.expr) + assert.Error(t, err) + assert.Contains(t, err.Error(), "estimated cost") + assert.Contains(t, err.Error(), "exceeds budget") + }) + } +} diff --git a/pkg/cel/doc.go b/pkg/cel/doc.go new file mode 100644 index 00000000..64c1dbd3 --- /dev/null +++ b/pkg/cel/doc.go @@ -0,0 +1,5 @@ +// Package cel provides a safe, sandboxed CEL (Common Expression Language) +// environment for policy evaluation, claim mapping, and token customization +// in Dex. It includes cost budgets, Kubernetes-grade compatibility guarantees, +// and a curated set of extension libraries. +package cel diff --git a/pkg/cel/library/doc.go b/pkg/cel/library/doc.go new file mode 100644 index 00000000..1452d2b9 --- /dev/null +++ b/pkg/cel/library/doc.go @@ -0,0 +1,4 @@ +// Package library provides custom CEL function libraries for Dex. +// Each library implements the cel.Library interface and can be registered +// in a CEL environment. +package library diff --git a/pkg/cel/library/email.go b/pkg/cel/library/email.go new file mode 100644 index 00000000..38fe0dee --- /dev/null +++ b/pkg/cel/library/email.go @@ -0,0 +1,73 @@ +package library + +import ( + "strings" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Email provides email-related CEL functions. +// +// Functions (V1): +// +// dex.emailDomain(email: string) -> string +// Returns the domain portion of an email address. +// Example: dex.emailDomain("user@example.com") == "example.com" +// +// dex.emailLocalPart(email: string) -> string +// Returns the local part of an email address. +// Example: dex.emailLocalPart("user@example.com") == "user" +type Email struct{} + +func (Email) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("dex.emailDomain", + cel.Overload("dex_email_domain_string", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(emailDomainImpl), + ), + ), + cel.Function("dex.emailLocalPart", + cel.Overload("dex_email_local_part_string", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(emailLocalPartImpl), + ), + ), + } +} + +func (Email) ProgramOptions() []cel.ProgramOption { + return nil +} + +func emailDomainImpl(arg ref.Val) ref.Val { + email, ok := arg.Value().(string) + if !ok { + return types.NewErr("dex.emailDomain: expected string argument") + } + + _, domain, found := strings.Cut(email, "@") + if !found { + return types.String("") + } + + return types.String(domain) +} + +func emailLocalPartImpl(arg ref.Val) ref.Val { + email, ok := arg.Value().(string) + if !ok { + return types.NewErr("dex.emailLocalPart: expected string argument") + } + + localPart, _, found := strings.Cut(email, "@") + if !found { + return types.String(email) + } + + return types.String(localPart) +} diff --git a/pkg/cel/library/email_test.go b/pkg/cel/library/email_test.go new file mode 100644 index 00000000..d13e73a1 --- /dev/null +++ b/pkg/cel/library/email_test.go @@ -0,0 +1,106 @@ +package library_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dexcel "github.com/dexidp/dex/pkg/cel" +) + +func TestEmailDomain(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + tests := map[string]struct { + expr string + want string + }{ + "standard email": { + expr: `dex.emailDomain("user@example.com")`, + want: "example.com", + }, + "subdomain": { + expr: `dex.emailDomain("admin@sub.domain.org")`, + want: "sub.domain.org", + }, + "no at sign": { + expr: `dex.emailDomain("nodomain")`, + want: "", + }, + "empty string": { + expr: `dex.emailDomain("")`, + want: "", + }, + "multiple at signs": { + expr: `dex.emailDomain("user@name@example.com")`, + want: "name@example.com", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + prog, err := compiler.CompileString(tc.expr) + require.NoError(t, err) + + result, err := dexcel.EvalString(context.Background(), prog, map[string]any{}) + require.NoError(t, err) + assert.Equal(t, tc.want, result) + }) + } +} + +func TestEmailLocalPart(t *testing.T) { + compiler, err := dexcel.NewCompiler(nil) + require.NoError(t, err) + + tests := map[string]struct { + expr string + want string + }{ + "standard email": { + expr: `dex.emailLocalPart("user@example.com")`, + want: "user", + }, + "no at sign": { + expr: `dex.emailLocalPart("justuser")`, + want: "justuser", + }, + "empty string": { + expr: `dex.emailLocalPart("")`, + want: "", + }, + "multiple at signs": { + expr: `dex.emailLocalPart("user@name@example.com")`, + want: "user", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + prog, err := compiler.CompileString(tc.expr) + require.NoError(t, err) + + result, err := dexcel.EvalString(context.Background(), prog, map[string]any{}) + require.NoError(t, err) + assert.Equal(t, tc.want, result) + }) + } +} + +func TestEmailDomainWithIdentityVariable(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + prog, err := compiler.CompileString(`dex.emailDomain(identity.email)`) + require.NoError(t, err) + + result, err := dexcel.EvalString(context.Background(), prog, map[string]any{ + "identity": dexcel.IdentityVal{Email: "admin@corp.example.com"}, + }) + require.NoError(t, err) + assert.Equal(t, "corp.example.com", result) +} diff --git a/pkg/cel/library/groups.go b/pkg/cel/library/groups.go new file mode 100644 index 00000000..fd7f3603 --- /dev/null +++ b/pkg/cel/library/groups.go @@ -0,0 +1,123 @@ +package library + +import ( + "path" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Groups provides group-related CEL functions. +// +// Functions (V1): +// +// dex.groupMatches(groups: list(string), pattern: string) -> list(string) +// Returns groups matching a glob pattern. +// Example: dex.groupMatches(["team:dev", "team:ops", "admin"], "team:*") +// +// dex.groupFilter(groups: list(string), allowed: list(string)) -> list(string) +// Returns only groups present in the allowed list. +// Example: dex.groupFilter(["admin", "dev", "ops"], ["admin", "ops"]) +type Groups struct{} + +func (Groups) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("dex.groupMatches", + cel.Overload("dex_group_matches_list_string", + []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, + cel.ListType(cel.StringType), + cel.BinaryBinding(groupMatchesImpl), + ), + ), + cel.Function("dex.groupFilter", + cel.Overload("dex_group_filter_list_list", + []*cel.Type{cel.ListType(cel.StringType), cel.ListType(cel.StringType)}, + cel.ListType(cel.StringType), + cel.BinaryBinding(groupFilterImpl), + ), + ), + } +} + +func (Groups) ProgramOptions() []cel.ProgramOption { + return nil +} + +func groupMatchesImpl(lhs, rhs ref.Val) ref.Val { + groupList, ok := lhs.(traits.Lister) + if !ok { + return types.NewErr("dex.groupMatches: expected list(string) as first argument") + } + + pattern, ok := rhs.Value().(string) + if !ok { + return types.NewErr("dex.groupMatches: expected string pattern as second argument") + } + + iter := groupList.Iterator() + var matched []ref.Val + + for iter.HasNext() == types.True { + item := iter.Next() + + group, ok := item.Value().(string) + if !ok { + continue + } + + ok, err := path.Match(pattern, group) + if err != nil { + return types.NewErr("dex.groupMatches: invalid pattern %q: %v", pattern, err) + } + if ok { + matched = append(matched, types.String(group)) + } + } + + return types.NewRefValList(types.DefaultTypeAdapter, matched) +} + +func groupFilterImpl(lhs, rhs ref.Val) ref.Val { + groupList, ok := lhs.(traits.Lister) + if !ok { + return types.NewErr("dex.groupFilter: expected list(string) as first argument") + } + + allowedList, ok := rhs.(traits.Lister) + if !ok { + return types.NewErr("dex.groupFilter: expected list(string) as second argument") + } + + allowed := make(map[string]struct{}) + iter := allowedList.Iterator() + for iter.HasNext() == types.True { + item := iter.Next() + + s, ok := item.Value().(string) + if !ok { + continue + } + + allowed[s] = struct{}{} + } + + var filtered []ref.Val + iter = groupList.Iterator() + + for iter.HasNext() == types.True { + item := iter.Next() + + group, ok := item.Value().(string) + if !ok { + continue + } + + if _, exists := allowed[group]; exists { + filtered = append(filtered, types.String(group)) + } + } + + return types.NewRefValList(types.DefaultTypeAdapter, filtered) +} diff --git a/pkg/cel/library/groups_test.go b/pkg/cel/library/groups_test.go new file mode 100644 index 00000000..70a68fb2 --- /dev/null +++ b/pkg/cel/library/groups_test.go @@ -0,0 +1,141 @@ +package library_test + +import ( + "context" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dexcel "github.com/dexidp/dex/pkg/cel" +) + +func TestGroupMatches(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + tests := map[string]struct { + expr string + groups []string + want []string + }{ + "wildcard pattern": { + expr: `dex.groupMatches(identity.groups, "team:*")`, + groups: []string{"team:dev", "team:ops", "admin"}, + want: []string{"team:dev", "team:ops"}, + }, + "exact match": { + expr: `dex.groupMatches(identity.groups, "admin")`, + groups: []string{"team:dev", "admin", "user"}, + want: []string{"admin"}, + }, + "no matches": { + expr: `dex.groupMatches(identity.groups, "nonexistent")`, + groups: []string{"team:dev", "admin"}, + want: []string{}, + }, + "question mark pattern": { + expr: `dex.groupMatches(identity.groups, "team?")`, + groups: []string{"teamA", "teamB", "teams-long"}, + want: []string{"teamA", "teamB"}, + }, + "match all": { + expr: `dex.groupMatches(identity.groups, "*")`, + groups: []string{"a", "b", "c"}, + want: []string{"a", "b", "c"}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + prog, err := compiler.CompileStringList(tc.expr) + require.NoError(t, err) + + out, err := dexcel.Eval(context.Background(), prog, map[string]any{ + "identity": dexcel.IdentityVal{Groups: tc.groups}, + }) + require.NoError(t, err) + + nativeVal, err := out.ConvertToNative(reflect.TypeOf([]string{})) + require.NoError(t, err) + + got, ok := nativeVal.([]string) + require.True(t, ok, "expected []string, got %T", nativeVal) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestGroupMatchesInvalidPattern(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + prog, err := compiler.CompileStringList(`dex.groupMatches(identity.groups, "[invalid")`) + require.NoError(t, err) + + _, err = dexcel.Eval(context.Background(), prog, map[string]any{ + "identity": dexcel.IdentityVal{Groups: []string{"admin"}}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid pattern") +} + +func TestGroupFilter(t *testing.T) { + vars := dexcel.IdentityVariables() + compiler, err := dexcel.NewCompiler(vars) + require.NoError(t, err) + + tests := map[string]struct { + expr string + groups []string + want []string + }{ + "filter to allowed": { + expr: `dex.groupFilter(identity.groups, ["admin", "ops"])`, + groups: []string{"admin", "dev", "ops"}, + want: []string{"admin", "ops"}, + }, + "no overlap": { + expr: `dex.groupFilter(identity.groups, ["marketing"])`, + groups: []string{"admin", "dev"}, + want: []string{}, + }, + "all allowed": { + expr: `dex.groupFilter(identity.groups, ["a", "b", "c"])`, + groups: []string{"a", "b", "c"}, + want: []string{"a", "b", "c"}, + }, + "empty allowed list": { + expr: `dex.groupFilter(identity.groups, [])`, + groups: []string{"admin", "dev"}, + want: []string{}, + }, + "preserves order": { + expr: `dex.groupFilter(identity.groups, ["z", "a"])`, + groups: []string{"a", "b", "z"}, + want: []string{"a", "z"}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + prog, err := compiler.CompileStringList(tc.expr) + require.NoError(t, err) + + out, err := dexcel.Eval(context.Background(), prog, map[string]any{ + "identity": dexcel.IdentityVal{Groups: tc.groups}, + }) + require.NoError(t, err) + + nativeVal, err := out.ConvertToNative(reflect.TypeOf([]string{})) + require.NoError(t, err) + + got, ok := nativeVal.([]string) + require.True(t, ok, "expected []string, got %T", nativeVal) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/pkg/cel/types.go b/pkg/cel/types.go new file mode 100644 index 00000000..4e657922 --- /dev/null +++ b/pkg/cel/types.go @@ -0,0 +1,109 @@ +package cel + +import ( + "github.com/google/cel-go/cel" + + "github.com/dexidp/dex/connector" +) + +// VariableDeclaration declares a named variable and its CEL type +// that will be available in expressions. +type VariableDeclaration struct { + Name string + Type *cel.Type +} + +// IdentityVal is the CEL native type for the identity variable. +// Fields are typed so that the CEL compiler rejects unknown field access +// (e.g. identity.emial) at config load time rather than at evaluation time. +type IdentityVal struct { + UserID string `cel:"user_id"` + Username string `cel:"username"` + PreferredUsername string `cel:"preferred_username"` + Email string `cel:"email"` + EmailVerified bool `cel:"email_verified"` + Groups []string `cel:"groups"` +} + +// RequestVal is the CEL native type for the request variable. +type RequestVal struct { + ClientID string `cel:"client_id"` + ConnectorID string `cel:"connector_id"` + Scopes []string `cel:"scopes"` + RedirectURI string `cel:"redirect_uri"` +} + +// identityTypeName is the CEL type name for IdentityVal. +// Derived by ext.NativeTypes as simplePkgAlias(pkgPath) + "." + structName. +const identityTypeName = "cel.IdentityVal" + +// requestTypeName is the CEL type name for RequestVal. +const requestTypeName = "cel.RequestVal" + +// IdentityVariables provides the 'identity' variable with typed fields. +// +// identity.user_id — string +// identity.username — string +// identity.preferred_username — string +// identity.email — string +// identity.email_verified — bool +// identity.groups — list(string) +func IdentityVariables() []VariableDeclaration { + return []VariableDeclaration{ + {Name: "identity", Type: cel.ObjectType(identityTypeName)}, + } +} + +// RequestVariables provides the 'request' variable with typed fields. +// +// request.client_id — string +// request.connector_id — string +// request.scopes — list(string) +// request.redirect_uri — string +func RequestVariables() []VariableDeclaration { + return []VariableDeclaration{ + {Name: "request", Type: cel.ObjectType(requestTypeName)}, + } +} + +// ClaimsVariable provides a 'claims' map for raw upstream claims. +// Claims remain map(string, dyn) because their shape is genuinely +// unknown — they carry arbitrary upstream IdP data. +// +// claims — map(string, dyn) +func ClaimsVariable() []VariableDeclaration { + return []VariableDeclaration{ + {Name: "claims", Type: cel.MapType(cel.StringType, cel.DynType)}, + } +} + +// IdentityFromConnector converts a connector.Identity to a CEL-compatible IdentityVal. +func IdentityFromConnector(id connector.Identity) IdentityVal { + return IdentityVal{ + UserID: id.UserID, + Username: id.Username, + PreferredUsername: id.PreferredUsername, + Email: id.Email, + EmailVerified: id.EmailVerified, + Groups: id.Groups, + } +} + +// RequestContext represents the authentication/token request context +// available as the 'request' variable in CEL expressions. +type RequestContext struct { + ClientID string + ConnectorID string + Scopes []string + RedirectURI string +} + +// RequestFromContext converts a RequestContext to a CEL-compatible RequestVal. +func RequestFromContext(rc RequestContext) RequestVal { + return RequestVal{ + ClientID: rc.ClientID, + ConnectorID: rc.ConnectorID, + Scopes: rc.Scopes, + RedirectURI: rc.RedirectURI, + } +} diff --git a/pkg/featureflags/doc.go b/pkg/featureflags/doc.go new file mode 100644 index 00000000..27033293 --- /dev/null +++ b/pkg/featureflags/doc.go @@ -0,0 +1,3 @@ +// Package featureflags provides a mechanism for toggling experimental or +// optional Dex features via environment variables (DEX_). +package featureflags diff --git a/pkg/featureflags/set.go b/pkg/featureflags/set.go index d4cdf4d8..d3942979 100644 --- a/pkg/featureflags/set.go +++ b/pkg/featureflags/set.go @@ -14,4 +14,14 @@ var ( // ContinueOnConnectorFailure allows the server to start even if some connectors fail to initialize. ContinueOnConnectorFailure = newFlag("continue_on_connector_failure", true) + + // ConfigDisallowUnknownFields enables to forbid unknown fields in the config while unmarshaling. + ConfigDisallowUnknownFields = newFlag("config_disallow_unknown_fields", false) + + // ClientCredentialGrantEnabledByDefault enables the client_credentials grant type by default + // without requiring explicit configuration in oauth2.grantTypes. + ClientCredentialGrantEnabledByDefault = newFlag("client_credential_grant_enabled_by_default", false) + + // SessionsEnabled enables experimental auth sessions support. + SessionsEnabled = newFlag("sessions_enabled", false) ) diff --git a/pkg/groups/doc.go b/pkg/groups/doc.go new file mode 100644 index 00000000..f1a21d02 --- /dev/null +++ b/pkg/groups/doc.go @@ -0,0 +1,2 @@ +// Package groups contains helper functions related to groups. +package groups diff --git a/pkg/groups/groups.go b/pkg/groups/groups.go index 5dde65ab..d31a5dee 100644 --- a/pkg/groups/groups.go +++ b/pkg/groups/groups.go @@ -1,4 +1,3 @@ -// Package groups contains helper functions related to groups package groups // Filter filters out any groups of given that are not in required. Thus it may diff --git a/pkg/httpclient/doc.go b/pkg/httpclient/doc.go new file mode 100644 index 00000000..3d028a3a --- /dev/null +++ b/pkg/httpclient/doc.go @@ -0,0 +1,3 @@ +// Package httpclient provides a configurable HTTP client constructor with +// support for custom CA certificates, root CAs, and TLS settings. +package httpclient diff --git a/server/api.go b/server/api.go index 724c4807..1d2f73ba 100644 --- a/server/api.go +++ b/server/api.go @@ -58,13 +58,14 @@ func (d dexAPI) GetClient(ctx context.Context, req *api.GetClientReq) (*api.GetC return &api.GetClientResp{ Client: &api.Client{ - Id: c.ID, - Name: c.Name, - Secret: c.Secret, - RedirectUris: c.RedirectURIs, - TrustedPeers: c.TrustedPeers, - Public: c.Public, - LogoUrl: c.LogoURL, + Id: c.ID, + Name: c.Name, + Secret: c.Secret, + RedirectUris: c.RedirectURIs, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + LogoUrl: c.LogoURL, + AllowedConnectors: c.AllowedConnectors, }, }, nil } @@ -82,13 +83,14 @@ func (d dexAPI) CreateClient(ctx context.Context, req *api.CreateClientReq) (*ap } c := storage.Client{ - ID: req.Client.Id, - Secret: req.Client.Secret, - RedirectURIs: req.Client.RedirectUris, - TrustedPeers: req.Client.TrustedPeers, - Public: req.Client.Public, - Name: req.Client.Name, - LogoURL: req.Client.LogoUrl, + ID: req.Client.Id, + Secret: req.Client.Secret, + RedirectURIs: req.Client.RedirectUris, + TrustedPeers: req.Client.TrustedPeers, + Public: req.Client.Public, + Name: req.Client.Name, + LogoURL: req.Client.LogoUrl, + AllowedConnectors: req.Client.AllowedConnectors, } if err := d.s.CreateClient(ctx, c); err != nil { if err == storage.ErrAlreadyExists { @@ -121,6 +123,9 @@ func (d dexAPI) UpdateClient(ctx context.Context, req *api.UpdateClientReq) (*ap if req.LogoUrl != "" { old.LogoURL = req.LogoUrl } + if req.AllowedConnectors != nil { + old.AllowedConnectors = req.AllowedConnectors + } return old, nil }) if err != nil { @@ -155,12 +160,13 @@ func (d dexAPI) ListClients(ctx context.Context, req *api.ListClientReq) (*api.L clients := make([]*api.ClientInfo, 0, len(clientList)) for _, client := range clientList { c := api.ClientInfo{ - Id: client.ID, - Name: client.Name, - RedirectUris: client.RedirectURIs, - TrustedPeers: client.TrustedPeers, - Public: client.Public, - LogoUrl: client.LogoURL, + Id: client.ID, + Name: client.Name, + RedirectUris: client.RedirectURIs, + TrustedPeers: client.TrustedPeers, + Public: client.Public, + LogoUrl: client.LogoURL, + AllowedConnectors: client.AllowedConnectors, } clients = append(clients, &c) } @@ -455,12 +461,19 @@ func (d dexAPI) CreateConnector(ctx context.Context, req *api.CreateConnectorReq return nil, errors.New("invalid config supplied") } + for _, gt := range req.Connector.GrantTypes { + if !ConnectorGrantTypes[gt] { + return nil, fmt.Errorf("unknown grant type %q", gt) + } + } + c := storage.Connector{ ID: req.Connector.Id, Name: req.Connector.Name, Type: req.Connector.Type, ResourceVersion: "1", Config: req.Connector.Config, + GrantTypes: req.Connector.GrantTypes, } if err := d.s.CreateConnector(ctx, c); err != nil { if err == storage.ErrAlreadyExists { @@ -470,6 +483,11 @@ func (d dexAPI) CreateConnector(ctx context.Context, req *api.CreateConnectorReq return nil, fmt.Errorf("create connector: %v", err) } + // Make sure we don't reuse stale entries in the cache + if d.server != nil { + d.server.CloseConnector(req.Connector.Id) + } + return &api.CreateConnectorResp{}, nil } @@ -482,14 +500,26 @@ func (d dexAPI) UpdateConnector(ctx context.Context, req *api.UpdateConnectorReq return nil, errors.New("no email supplied") } - if len(req.NewConfig) == 0 && req.NewName == "" && req.NewType == "" { + hasUpdate := len(req.NewConfig) != 0 || + req.NewName != "" || + req.NewType != "" || + req.NewGrantTypes != nil + if !hasUpdate { return nil, errors.New("nothing to update") } - if !json.Valid(req.NewConfig) { + if len(req.NewConfig) != 0 && !json.Valid(req.NewConfig) { return nil, errors.New("invalid config supplied") } + if req.NewGrantTypes != nil { + for _, gt := range req.NewGrantTypes.GrantTypes { + if !ConnectorGrantTypes[gt] { + return nil, fmt.Errorf("unknown grant type %q", gt) + } + } + } + updater := func(old storage.Connector) (storage.Connector, error) { if req.NewType != "" { old.Type = req.NewType @@ -503,6 +533,10 @@ func (d dexAPI) UpdateConnector(ctx context.Context, req *api.UpdateConnectorReq old.Config = req.NewConfig } + if req.NewGrantTypes != nil { + old.GrantTypes = req.NewGrantTypes.GrantTypes + } + if rev, err := strconv.Atoi(defaultTo(old.ResourceVersion, "0")); err == nil { old.ResourceVersion = strconv.Itoa(rev + 1) } @@ -538,6 +572,7 @@ func (d dexAPI) DeleteConnector(ctx context.Context, req *api.DeleteConnectorReq d.logger.Error("api: failed to delete connector", "err", err) return nil, fmt.Errorf("delete connector: %v", err) } + return &api.DeleteConnectorResp{}, nil } @@ -555,10 +590,11 @@ func (d dexAPI) ListConnectors(ctx context.Context, req *api.ListConnectorReq) ( connectors := make([]*api.Connector, 0, len(connectorList)) for _, connector := range connectorList { c := api.Connector{ - Id: connector.ID, - Name: connector.Name, - Type: connector.Type, - Config: connector.Config, + Id: connector.ID, + Name: connector.Name, + Type: connector.Type, + Config: connector.Config, + GrantTypes: connector.GrantTypes, } connectors = append(connectors, &c) } diff --git a/server/api_cache_test.go b/server/api_cache_test.go new file mode 100644 index 00000000..64564c46 --- /dev/null +++ b/server/api_cache_test.go @@ -0,0 +1,133 @@ +package server + +import ( + "context" + "encoding/json" + "testing" + + "github.com/dexidp/dex/api/v2" + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/connector/mock" + "github.com/dexidp/dex/storage/memory" +) + +func TestConnectorCacheInvalidation(t *testing.T) { + t.Setenv("DEX_API_CONNECTORS_CRUD", "true") + + logger := newLogger(t) + s := memory.New(logger) + + serv := &Server{ + storage: s, + logger: logger, + connectors: make(map[string]Connector), + } + + apiServer := NewAPI(s, logger, "test", serv) + ctx := context.Background() + + connID := "mock-conn" + + // 1. Create a connector via API + config1 := mock.PasswordConfig{ + Username: "user", + Password: "first-password", + } + config1Bytes, _ := json.Marshal(config1) + + _, err := apiServer.CreateConnector(ctx, &api.CreateConnectorReq{ + Connector: &api.Connector{ + Id: connID, + Type: "mockPassword", + Name: "Mock", + Config: config1Bytes, + }, + }) + if err != nil { + t.Fatalf("failed to create connector: %v", err) + } + + // 2. Load it into server cache + c1, err := serv.getConnector(ctx, connID) + if err != nil { + t.Fatalf("failed to get connector: %v", err) + } + + pc1 := c1.Connector.(connector.PasswordConnector) + _, valid, err := pc1.Login(ctx, connector.Scopes{}, "user", "first-password") + if err != nil || !valid { + t.Fatalf("failed to login with first password: %v", err) + } + + // 3. Delete it via API + _, err = apiServer.DeleteConnector(ctx, &api.DeleteConnectorReq{Id: connID}) + if err != nil { + t.Fatalf("failed to delete connector: %v", err) + } + + // 4. Create it again with different password + config2 := mock.PasswordConfig{ + Username: "user", + Password: "second-password", + } + config2Bytes, _ := json.Marshal(config2) + + _, err = apiServer.CreateConnector(ctx, &api.CreateConnectorReq{ + Connector: &api.Connector{ + Id: connID, + Type: "mockPassword", + Name: "Mock", + Config: config2Bytes, + }, + }) + if err != nil { + t.Fatalf("failed to create connector: %v", err) + } + + // 5. Load it again + c2, err := serv.getConnector(ctx, connID) + if err != nil { + t.Fatalf("failed to get connector second time: %v", err) + } + + pc2 := c2.Connector.(connector.PasswordConnector) + + // If the fix works, it should now use the second password. + _, valid2, err := pc2.Login(ctx, connector.Scopes{}, "user", "second-password") + if err != nil || !valid2 { + t.Errorf("failed to login with second password, cache might still be stale") + } + + _, valid1, _ := pc2.Login(ctx, connector.Scopes{}, "user", "first-password") + if valid1 { + t.Errorf("unexpectedly logged in with first password, cache is definitely stale") + } + + // 6. Update it via API with a third password + config3 := mock.PasswordConfig{ + Username: "user", + Password: "third-password", + } + config3Bytes, _ := json.Marshal(config3) + + _, err = apiServer.UpdateConnector(ctx, &api.UpdateConnectorReq{ + Id: connID, + NewConfig: config3Bytes, + }) + if err != nil { + t.Fatalf("failed to update connector: %v", err) + } + + // 7. Load it again + c3, err := serv.getConnector(ctx, connID) + if err != nil { + t.Fatalf("failed to get connector third time: %v", err) + } + + pc3 := c3.Connector.(connector.PasswordConnector) + + _, valid3, err := pc3.Login(ctx, connector.Scopes{}, "user", "third-password") + if err != nil || !valid3 { + t.Errorf("failed to login with third password, UpdateConnector might be missing cache invalidation") + } +} diff --git a/server/api_test.go b/server/api_test.go index 5ddbcc4a..09cfa678 100644 --- a/server/api_test.go +++ b/server/api_test.go @@ -606,6 +606,105 @@ func TestUpdateConnector(t *testing.T) { } } +func TestUpdateConnectorGrantTypes(t *testing.T) { + t.Setenv("DEX_API_CONNECTORS_CRUD", "true") + + logger := newLogger(t) + s := memory.New(logger) + + client := newAPI(t, s, logger) + defer client.Close() + + ctx := t.Context() + + connectorID := "connector-gt" + + // Create a connector without grant types + createReq := api.CreateConnectorReq{ + Connector: &api.Connector{ + Id: connectorID, + Name: "TestConnector", + Type: "TestType", + Config: []byte(`{"key": "value"}`), + }, + } + _, err := client.CreateConnector(ctx, &createReq) + if err != nil { + t.Fatalf("failed to create connector: %v", err) + } + + // Set grant types + _, err = client.UpdateConnector(ctx, &api.UpdateConnectorReq{ + Id: connectorID, + NewGrantTypes: &api.GrantTypes{GrantTypes: []string{"authorization_code", "refresh_token"}}, + }) + if err != nil { + t.Fatalf("failed to update connector grant types: %v", err) + } + + resp, err := client.ListConnectors(ctx, &api.ListConnectorReq{}) + if err != nil { + t.Fatalf("failed to list connectors: %v", err) + } + for _, c := range resp.Connectors { + if c.Id == connectorID { + if !slices.Equal(c.GrantTypes, []string{"authorization_code", "refresh_token"}) { + t.Fatalf("expected grant types [authorization_code refresh_token], got %v", c.GrantTypes) + } + } + } + + // Clear grant types by passing empty GrantTypes message + _, err = client.UpdateConnector(ctx, &api.UpdateConnectorReq{ + Id: connectorID, + NewGrantTypes: &api.GrantTypes{}, + }) + if err != nil { + t.Fatalf("failed to clear connector grant types: %v", err) + } + + resp, err = client.ListConnectors(ctx, &api.ListConnectorReq{}) + if err != nil { + t.Fatalf("failed to list connectors: %v", err) + } + for _, c := range resp.Connectors { + if c.Id == connectorID { + if len(c.GrantTypes) != 0 { + t.Fatalf("expected empty grant types after clear, got %v", c.GrantTypes) + } + } + } + + // Reject invalid grant type on update + _, err = client.UpdateConnector(ctx, &api.UpdateConnectorReq{ + Id: connectorID, + NewGrantTypes: &api.GrantTypes{GrantTypes: []string{"bogus"}}, + }) + if err == nil { + t.Fatal("expected error for invalid grant type, got nil") + } + if !strings.Contains(err.Error(), `unknown grant type "bogus"`) { + t.Fatalf("unexpected error: %v", err) + } + + // Reject invalid grant type on create + _, err = client.CreateConnector(ctx, &api.CreateConnectorReq{ + Connector: &api.Connector{ + Id: "bad-gt", + Name: "Bad", + Type: "TestType", + Config: []byte(`{}`), + GrantTypes: []string{"invalid_type"}, + }, + }) + if err == nil { + t.Fatal("expected error for invalid grant type on create, got nil") + } + if !strings.Contains(err.Error(), `unknown grant type "invalid_type"`) { + t.Fatalf("unexpected error: %v", err) + } +} + func TestDeleteConnector(t *testing.T) { t.Setenv("DEX_API_CONNECTORS_CRUD", "true") diff --git a/server/deviceflowhandlers.go b/server/deviceflowhandlers.go index ec5fb52b..b9fb652a 100644 --- a/server/deviceflowhandlers.go +++ b/server/deviceflowhandlers.go @@ -431,7 +431,7 @@ func (s *Server) verifyUserCode(w http.ResponseWriter, r *http.Request) { } // Redirect to Dex Auth Endpoint - authURL := path.Join(s.issuerURL.Path, "/auth") + authURL := s.absURL("/auth") u, err := url.Parse(authURL) if err != nil { s.renderError(r, w, http.StatusInternalServerError, "Invalid auth URI.") @@ -442,7 +442,7 @@ func (s *Server) verifyUserCode(w http.ResponseWriter, r *http.Request) { q.Set("client_secret", deviceRequest.ClientSecret) q.Set("state", deviceRequest.UserCode) q.Set("response_type", "code") - q.Set("redirect_uri", "/device/callback") + q.Set("redirect_uri", s.absPath(deviceCallbackURI)) q.Set("scope", strings.Join(deviceRequest.Scopes, " ")) u.RawQuery = q.Encode() diff --git a/server/deviceflowhandlers_test.go b/server/deviceflowhandlers_test.go index ec7bf29d..1cbd60f7 100644 --- a/server/deviceflowhandlers_test.go +++ b/server/deviceflowhandlers_test.go @@ -364,7 +364,7 @@ func TestDeviceCallback(t *testing.T) { // Setup a dex server. httpServer, s := newTestServer(t, func(c *Config) { - // c.Issuer = c.Issuer + "/non-root-path" + c.Issuer = c.Issuer + "/non-root-path" c.Now = now }) defer httpServer.Close() @@ -752,7 +752,8 @@ func TestVerifyCodeResponse(t *testing.T) { testDeviceRequest storage.DeviceRequest userCode string expectedResponseCode int - expectedRedirectPath string + expectedAuthPath string + shouldRedirectToAuth bool }{ { testName: "Unknown user code", @@ -765,7 +766,6 @@ func TestVerifyCodeResponse(t *testing.T) { }, userCode: "CODE-TEST", expectedResponseCode: http.StatusBadRequest, - expectedRedirectPath: "", }, { testName: "Expired user code", @@ -778,7 +778,6 @@ func TestVerifyCodeResponse(t *testing.T) { }, userCode: "ABCD-WXYZ", expectedResponseCode: http.StatusBadRequest, - expectedRedirectPath: "", }, { testName: "No user code", @@ -791,10 +790,9 @@ func TestVerifyCodeResponse(t *testing.T) { }, userCode: "", expectedResponseCode: http.StatusBadRequest, - expectedRedirectPath: "", }, { - testName: "Valid user code, expect redirect to auth endpoint", + testName: "Valid user code, expect redirect to auth endpoint with device callback", testDeviceRequest: storage.DeviceRequest{ UserCode: "ABCD-WXYZ", DeviceCode: "f00bar", @@ -804,7 +802,8 @@ func TestVerifyCodeResponse(t *testing.T) { }, userCode: "ABCD-WXYZ", expectedResponseCode: http.StatusFound, - expectedRedirectPath: "/auth", + expectedAuthPath: "/auth", + shouldRedirectToAuth: true, }, } for _, tc := range tests { @@ -839,15 +838,24 @@ func TestVerifyCodeResponse(t *testing.T) { t.Errorf("Unexpected Response Type. Expected %v got %v", tc.expectedResponseCode, rr.Code) } - u, err = url.Parse(s.issuerURL.String()) - if err != nil { - t.Errorf("Could not parse issuer URL %v", err) - } - u.Path = path.Join(u.Path, tc.expectedRedirectPath) - location := rr.Header().Get("Location") - if rr.Code == http.StatusFound && !strings.HasPrefix(location, u.Path) { - t.Errorf("Invalid Redirect. Expected %v got %v", u.Path, location) + if rr.Code == http.StatusFound && tc.shouldRedirectToAuth { + // Parse the redirect location + redirectURL, err := url.Parse(location) + if err != nil { + t.Errorf("Could not parse redirect URL: %v", err) + return + } + + // Check that the redirect path contains /auth + if !strings.Contains(redirectURL.Path, tc.expectedAuthPath) { + t.Errorf("Invalid Redirect Path. Expected to contain %q got %q", tc.expectedAuthPath, redirectURL.Path) + } + + // Check that redirect_uri parameter contains /device/callback + if !strings.Contains(location, "redirect_uri=%2Fnon-root-path%2Fdevice%2Fcallback") { + t.Errorf("Invalid redirect_uri parameter. Expected to contain /device/callback (URL encoded), got %v", location) + } } }) } diff --git a/server/handlers.go b/server/handlers.go index f64aa1d7..bfdb0375 100644 --- a/server/handlers.go +++ b/server/handlers.go @@ -23,6 +23,7 @@ import ( "github.com/gorilla/mux" "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/featureflags" "github.com/dexidp/dex/server/internal" "github.com/dexidp/dex/storage" ) @@ -116,7 +117,7 @@ func (s *Server) constructDiscovery(ctx context.Context) discovery { Introspect: s.absURL("/token/introspect"), Subjects: []string{"public"}, IDTokenAlgs: []string{string(jose.RS256)}, - CodeChallengeAlgs: []string{codeChallengeMethodS256, codeChallengeMethodPlain}, + CodeChallengeAlgs: s.pkce.CodeChallengeMethodsSupported, Scopes: []string{"openid", "email", "groups", "profile", "offline_access"}, AuthMethods: []string{"client_secret_basic", "client_secret_post"}, Claims: []string{ @@ -142,6 +143,21 @@ func (s *Server) constructDiscovery(ctx context.Context) discovery { return d } +// grantTypeFromAuthRequest determines the grant type from the authorization request parameters. +func (s *Server) grantTypeFromAuthRequest(r *http.Request) string { + redirectURI := r.Form.Get("redirect_uri") + if redirectURI == deviceCallbackURI || strings.HasSuffix(redirectURI, deviceCallbackURI) { + return grantTypeDeviceCode + } + responseType := r.Form.Get("response_type") + for _, rt := range strings.Fields(responseType) { + if rt == "token" || rt == "id_token" { + return grantTypeImplicit + } + } + return grantTypeAuthorizationCode +} + // handleAuthorization handles the OAuth2 auth endpoint. func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -154,13 +170,36 @@ func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) { } connectorID := r.Form.Get("connector_id") - connectors, err := s.storage.ListConnectors(ctx) + allConnectors, err := s.storage.ListConnectors(ctx) if err != nil { s.logger.ErrorContext(r.Context(), "failed to get list of connectors", "err", err) s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve connector list.") return } + // Determine the grant type from the authorization request to filter connectors. + grantType := s.grantTypeFromAuthRequest(r) + connectors := make([]storage.Connector, 0, len(allConnectors)) + for _, c := range allConnectors { + if GrantTypeAllowed(c.GrantTypes, grantType) { + connectors = append(connectors, c) + } + } + + // Filter connectors based on the client's allowed connectors list. + // client_id is required per RFC 6749 §4.1.1. + client, authErr := s.getClientWithAuthError(ctx, r.Form.Get("client_id")) + if authErr != nil { + s.renderError(r, w, authErr.Status, authErr.Error()) + return + } + connectors = filterConnectors(connectors, client.AllowedConnectors) + + if len(connectors) == 0 { + s.renderError(r, w, http.StatusBadRequest, "No connectors available for this client.") + return + } + // We don't need connector_id any more r.Form.Del("connector_id") @@ -187,15 +226,15 @@ func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, connURL.String(), http.StatusFound) } - connectorInfos := make([]connectorInfo, len(connectors)) - for index, conn := range connectors { + connectorInfos := make([]connectorInfo, 0, len(connectors)) + for _, conn := range connectors { connURL.Path = s.absPath("/auth", url.PathEscape(conn.ID)) - connectorInfos[index] = connectorInfo{ + connectorInfos = append(connectorInfos, connectorInfo{ ID: conn.ID, Name: conn.Name, Type: conn.Type, URL: template.URL(connURL.String()), - } + }) } if err := s.templates.login(r, w, connectorInfos); err != nil { @@ -203,6 +242,57 @@ func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) { } } +// filterConnectors filters the list of connectors by the allowed connector IDs. +// If allowedConnectors is empty, all connectors are returned (no filtering). +func filterConnectors(connectors []storage.Connector, allowedConnectors []string) []storage.Connector { + if len(allowedConnectors) == 0 { + return connectors + } + + allowed := make(map[string]bool, len(allowedConnectors)) + for _, id := range allowedConnectors { + allowed[id] = true + } + + filtered := make([]storage.Connector, 0, len(connectors)) + for _, c := range connectors { + if allowed[c.ID] { + filtered = append(filtered, c) + } + } + return filtered +} + +// isConnectorAllowed checks if a connector ID is in the client's allowed connectors list. +// If allowedConnectors is empty, all connectors are allowed. +func isConnectorAllowed(allowedConnectors []string, connectorID string) bool { + if len(allowedConnectors) == 0 { + return true + } + for _, id := range allowedConnectors { + if id == connectorID { + return true + } + } + return false +} + +// getClientWithAuthError retrieves a client by ID and returns a displayedAuthErr on failure. +// Invalid client_id is not treated as a redirect error per RFC 6749 §4.1.2.1. +// https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.2.1 +func (s *Server) getClientWithAuthError(ctx context.Context, clientID string) (storage.Client, *displayedAuthErr) { + client, err := s.storage.GetClient(ctx, clientID) + if err != nil { + if err == storage.ErrNotFound { + s.logger.ErrorContext(ctx, "invalid client_id provided", "client_id", clientID) + return storage.Client{}, newDisplayedErr(http.StatusBadRequest, "Invalid client_id provided.") + } + s.logger.ErrorContext(ctx, "failed to get client", "client_id", clientID, "err", err) + return storage.Client{}, newDisplayedErr(http.StatusInternalServerError, "Database error.") + } + return client, nil +} + func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) { ctx := r.Context() authReq, err := s.parseAuthorizationRequest(r) @@ -228,6 +318,19 @@ func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) { return } + // Validate that the connector is allowed for this client. + client, authErr := s.getClientWithAuthError(ctx, authReq.ClientID) + if authErr != nil { + s.renderError(r, w, authErr.Status, authErr.Error()) + return + } + if !isConnectorAllowed(client.AllowedConnectors, connID) { + s.logger.ErrorContext(r.Context(), "connector not allowed for client", + "connector_id", connID, "client_id", authReq.ClientID) + s.renderError(r, w, http.StatusForbidden, "Connector not allowed for this client.") + return + } + conn, err := s.getConnector(ctx, connID) if err != nil { s.logger.ErrorContext(r.Context(), "Failed to get connector", "err", err) @@ -235,6 +338,15 @@ func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) { return } + // Check if the connector allows the requested grant type. + grantType := s.grantTypeFromAuthRequest(r) + if !GrantTypeAllowed(conn.GrantTypes, grantType) { + s.logger.ErrorContext(r.Context(), "connector does not allow requested grant type", + "connector_id", connID, "grant_type", grantType) + s.renderError(r, w, http.StatusBadRequest, "Requested connector does not support this grant type.") + return + } + // Set the connector being used for the login. if authReq.ConnectorID != "" && authReq.ConnectorID != connID { s.logger.ErrorContext(r.Context(), "mismatched connector ID in auth request", @@ -558,8 +670,9 @@ func (s *Server) finalizeLogin(ctx context.Context, identity connector.Identity, } s.logger.InfoContext(ctx, "login successful", - "connector_id", authReq.ConnectorID, "username", claims.Username, - "preferred_username", claims.PreferredUsername, "email", email, "groups", claims.Groups) + "connector_id", authReq.ConnectorID, "user_id", claims.UserID, + "username", claims.Username, "preferred_username", claims.PreferredUsername, + "email", email, "groups", claims.Groups) offlineAccessRequested := false for _, scope := range authReq.Scopes { @@ -605,11 +718,60 @@ func (s *Server) finalizeLogin(ctx context.Context, identity connector.Identity, } } + // Create or update UserIdentity to persist user claims across sessions. + var userIdentity *storage.UserIdentity + if featureflags.SessionsEnabled.Enabled() { + now := s.now() + + ui, err := s.storage.GetUserIdentity(ctx, identity.UserID, authReq.ConnectorID) + switch { + case err != nil && errors.Is(err, storage.ErrNotFound): + ui = storage.UserIdentity{ + UserID: identity.UserID, + ConnectorID: authReq.ConnectorID, + Claims: claims, + Consents: make(map[string][]string), + CreatedAt: now, + LastLogin: now, + } + if err := s.storage.CreateUserIdentity(ctx, ui); err != nil { + s.logger.ErrorContext(ctx, "failed to create user identity", "err", err) + return "", false, err + } + case err == nil: + if err := s.storage.UpdateUserIdentity(ctx, identity.UserID, authReq.ConnectorID, func(old storage.UserIdentity) (storage.UserIdentity, error) { + if len(identity.ConnectorData) > 0 { + old.Claims = claims + old.LastLogin = now + return old, nil + } + return old, nil + }); err != nil { + s.logger.ErrorContext(ctx, "failed to update user identity", "err", err) + return "", false, err + } + // Update the existing UserIdentity obj with new claims to use them later in the flow. + ui.Claims = claims + ui.LastLogin = now + default: + s.logger.ErrorContext(ctx, "failed to get user identity", "err", err) + return "", false, err + } + userIdentity = &ui + } + // we can skip the redirect to /approval and go ahead and send code if it's not required if s.skipApproval && !authReq.ForceApprovalPrompt { return "", true, nil } + // Skip approval if user already consented to the requested scopes for this client. + if !authReq.ForceApprovalPrompt && userIdentity != nil { + if scopesCoveredByConsent(userIdentity.Consents[authReq.ClientID], authReq.Scopes) { + return "", true, nil + } + } + // an HMAC is used here to ensure that the request ID is unpredictable, ensuring that an attacker who intercepted the original // flow would be unable to poll for the result at the /approval endpoint h := hmac.New(sha256.New, authReq.HMACKey) @@ -635,6 +797,10 @@ func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) { authReq, err := s.storage.GetAuthRequest(ctx, r.FormValue("req")) if err != nil { + if err == storage.ErrNotFound { + s.renderError(r, w, http.StatusBadRequest, "User session error.") + return + } s.logger.ErrorContext(r.Context(), "failed to get auth request", "err", err) s.renderError(r, w, http.StatusInternalServerError, "Database error.") return @@ -671,6 +837,18 @@ func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) { s.renderError(r, w, http.StatusInternalServerError, "Approval rejected.") return } + // Persist user-approved scopes as consent for this client. + if featureflags.SessionsEnabled.Enabled() { + if err := s.storage.UpdateUserIdentity(ctx, authReq.Claims.UserID, authReq.ConnectorID, func(old storage.UserIdentity) (storage.UserIdentity, error) { + if old.Consents == nil { + old.Consents = make(map[string][]string) + } + old.Consents[authReq.ClientID] = authReq.Scopes + return old, nil + }); err != nil { + s.logger.ErrorContext(ctx, "failed to update user identity consents", "err", err) + } + } s.sendCodeResponse(w, r, authReq) } } @@ -816,6 +994,27 @@ func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authRe http.Redirect(w, r, u.String(), http.StatusSeeOther) } +// scopesCoveredByConsent checks whether the approved scopes cover all requested scopes. +// The openid scope is excluded from the comparison as it is a technical scope +// that does not require user consent. +func scopesCoveredByConsent(approved, requested []string) bool { + approvedSet := make(map[string]struct{}, len(approved)) + for _, s := range approved { + approvedSet[s] = struct{}{} + } + + for _, scope := range requested { + if scope == scopeOpenID { + continue + } + if _, ok := approvedSet[scope]; !ok { + return false + } + } + + return true +} + func (s *Server) withClientFromStorage(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, storage.Client)) { ctx := r.Context() clientID, clientSecret, ok := r.BasicAuth() @@ -889,6 +1088,8 @@ func (s *Server) handleToken(w http.ResponseWriter, r *http.Request) { s.withClientFromStorage(w, r, s.handlePasswordGrant) case grantTypeTokenExchange: s.withClientFromStorage(w, r, s.handleTokenExchange) + case grantTypeClientCredentials: + s.withClientFromStorage(w, r, s.handleClientCredentialsGrant) default: s.tokenErrHelper(w, errUnsupportedGrantType, "", http.StatusBadRequest) } @@ -989,9 +1190,16 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au } reqRefresh := func() bool { - // Ensure the connector supports refresh tokens. + // Determine whether to issue a refresh token. A refresh token is only + // issued when all of the following are true: + // 1. The connector implements RefreshConnector. + // 2. The connector's grantTypes config allows refresh_token. + // 3. The client requested the offline_access scope. // - // Connectors like `saml` do not implement RefreshConnector. + // When any condition is not met, the refresh token is silently omitted + // rather than returning an error. This matches the OAuth2 spec: the + // server is never required to issue a refresh token (RFC 6749 §1.5). + // https://datatracker.ietf.org/doc/html/rfc6749#section-1.5 conn, err := s.getConnector(ctx, authCode.ConnectorID) if err != nil { s.logger.ErrorContext(ctx, "connector not found", "connector_id", authCode.ConnectorID, "err", err) @@ -1004,6 +1212,10 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au return false } + if !GrantTypeAllowed(conn.GrantTypes, grantTypeRefreshToken) { + return false + } + for _, scope := range authCode.Scopes { if scope == scopeOfflineAccess { return true @@ -1072,9 +1284,10 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au return nil, err } offlineSessions := storage.OfflineSessions{ - UserID: refresh.Claims.UserID, - ConnID: refresh.ConnectorID, - Refresh: make(map[string]*storage.RefreshTokenRef), + UserID: refresh.Claims.UserID, + ConnID: refresh.ConnectorID, + Refresh: make(map[string]*storage.RefreshTokenRef), + ConnectorData: refresh.ConnectorData, } offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef @@ -1100,6 +1313,9 @@ func (s *Server) exchangeAuthCode(ctx context.Context, w http.ResponseWriter, au // Update existing OfflineSession obj with new RefreshTokenRef. if err := s.storage.UpdateOfflineSessions(ctx, session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { old.Refresh[tokenRef.ClientID] = &tokenRef + if len(refresh.ConnectorData) > 0 { + old.ConnectorData = refresh.ConnectorData + } return old, nil }); err != nil { s.logger.ErrorContext(ctx, "failed to update offline session", "err", err) @@ -1205,6 +1421,11 @@ func (s *Server) handlePasswordGrant(w http.ResponseWriter, r *http.Request, cli s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest) return } + if !GrantTypeAllowed(conn.GrantTypes, grantTypePassword) { + s.logger.ErrorContext(r.Context(), "connector does not allow password grant", "connector_id", connID) + s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not support password grant.", http.StatusBadRequest) + return + } passwordConnector, ok := conn.Connector.(connector.PasswordConnector) if !ok { @@ -1251,11 +1472,15 @@ func (s *Server) handlePasswordGrant(w http.ResponseWriter, r *http.Request, cli } reqRefresh := func() bool { - // Ensure the connector supports refresh tokens. - // - // Connectors like `saml` do not implement RefreshConnector. - _, ok := conn.Connector.(connector.RefreshConnector) - if !ok { + // Same logic as in exchangeAuthCode: silently omit refresh token + // when the connector doesn't support it or grantTypes forbids it. + // See RFC 6749 §1.5 — refresh tokens are never mandatory. + // https://datatracker.ietf.org/doc/html/rfc6749#section-1.5 + if _, ok := conn.Connector.(connector.RefreshConnector); !ok { + return false + } + + if !GrantTypeAllowed(conn.GrantTypes, grantTypeRefreshToken) { return false } @@ -1417,6 +1642,11 @@ func (s *Server) handleTokenExchange(w http.ResponseWriter, r *http.Request, cli s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest) return } + if !GrantTypeAllowed(conn.GrantTypes, grantTypeTokenExchange) { + s.logger.ErrorContext(r.Context(), "connector does not allow token exchange", "connector_id", connID) + s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not support token exchange.", http.StatusBadRequest) + return + } teConn, ok := conn.Connector.(connector.TokenIdentityConnector) if !ok { s.logger.ErrorContext(r.Context(), "connector doesn't implement token exchange", "connector_id", connID) @@ -1476,6 +1706,108 @@ func (s *Server) handleTokenExchange(w http.ResponseWriter, r *http.Request, cli json.NewEncoder(w).Encode(resp) } +func (s *Server) handleClientCredentialsGrant(w http.ResponseWriter, r *http.Request, client storage.Client) { + ctx := r.Context() + + // client_credentials requires a confidential client. + if client.Public { + s.tokenErrHelper(w, errUnauthorizedClient, "Public clients cannot use client_credentials grant.", http.StatusBadRequest) + return + } + + // Parse scopes from request. + if err := r.ParseForm(); err != nil { + s.tokenErrHelper(w, errInvalidRequest, "Couldn't parse data", http.StatusBadRequest) + return + } + scopes := strings.Fields(r.Form.Get("scope")) + + // Validate scopes. + var ( + unrecognized []string + invalidScopes []string + ) + hasOpenIDScope := false + for _, scope := range scopes { + switch scope { + case scopeOpenID: + hasOpenIDScope = true + case scopeEmail, scopeProfile, scopeGroups: + // allowed + case scopeOfflineAccess: + s.tokenErrHelper(w, errInvalidScope, "client_credentials grant does not support offline_access scope.", http.StatusBadRequest) + return + case scopeFederatedID: + s.tokenErrHelper(w, errInvalidScope, "client_credentials grant does not support federated:id scope.", http.StatusBadRequest) + return + default: + peerID, ok := parseCrossClientScope(scope) + if !ok { + unrecognized = append(unrecognized, scope) + continue + } + + isTrusted, err := s.validateCrossClientTrust(ctx, client.ID, peerID) + if err != nil { + s.logger.ErrorContext(ctx, "error validating cross client trust", "client_id", client.ID, "peer_id", peerID, "err", err) + s.tokenErrHelper(w, errInvalidClient, "Error validating cross client trust.", http.StatusBadRequest) + return + } + if !isTrusted { + invalidScopes = append(invalidScopes, scope) + } + } + } + if len(unrecognized) > 0 { + s.tokenErrHelper(w, errInvalidScope, fmt.Sprintf("Unrecognized scope(s) %q", unrecognized), http.StatusBadRequest) + return + } + if len(invalidScopes) > 0 { + s.tokenErrHelper(w, errInvalidScope, fmt.Sprintf("Client can't request scope(s) %q", invalidScopes), http.StatusBadRequest) + return + } + + // Build claims from the client itself — no user involved. + claims := storage.Claims{ + UserID: client.ID, + } + + // Only populate Username/PreferredUsername when the profile scope is requested. + for _, scope := range scopes { + if scope == scopeProfile { + claims.Username = client.Name + claims.PreferredUsername = client.Name + break + } + } + + nonce := r.Form.Get("nonce") + + // Empty connector ID is unique for cluster credentials grant + // Creating connectors with an empty ID with the config and API is prohibited + connID := "" + + accessToken, expiry, err := s.newAccessToken(ctx, client.ID, claims, scopes, nonce, connID) + if err != nil { + s.logger.ErrorContext(ctx, "client_credentials grant failed to create new access token", "err", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + + var idToken string + if hasOpenIDScope { + idToken, expiry, err = s.newIDToken(ctx, client.ID, claims, scopes, nonce, accessToken, "", connID) + if err != nil { + s.logger.ErrorContext(ctx, "client_credentials grant failed to create new ID token", "err", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + } + + resp := s.toAccessTokenResponse(idToken, accessToken, "", expiry) + s.writeAccessToken(w, resp) +} + type accessTokenResponse struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type,omitempty"` diff --git a/server/handlers_approval_test.go b/server/handlers_approval_test.go new file mode 100644 index 00000000..5ab80fc5 --- /dev/null +++ b/server/handlers_approval_test.go @@ -0,0 +1,117 @@ +package server + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/dexidp/dex/storage" +) + +type getAuthRequestErrorStorage struct { + storage.Storage + err error +} + +func (s *getAuthRequestErrorStorage) GetAuthRequest(context.Context, string) (storage.AuthRequest, error) { + return storage.AuthRequest{}, s.err +} + +func TestHandleApprovalGetAuthRequestErrorGET(t *testing.T) { + httpServer, server := newTestServer(t, func(c *Config) { + c.Storage = &getAuthRequestErrorStorage{Storage: c.Storage, err: errors.New("storage unavailable")} + }) + defer httpServer.Close() + + rr := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/approval?req=any&hmac=AQ", nil) + + server.ServeHTTP(rr, req) + + require.Equal(t, http.StatusInternalServerError, rr.Code) + require.Contains(t, rr.Body.String(), "Database error.") +} + +func TestHandleApprovalGetAuthRequestNotFoundGET(t *testing.T) { + httpServer, server := newTestServer(t, nil) + defer httpServer.Close() + + rr := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/approval?req=does-not-exist&hmac=AQ", nil) + + server.ServeHTTP(rr, req) + + require.Equal(t, http.StatusBadRequest, rr.Code) + require.Contains(t, rr.Body.String(), "User session error.") + require.NotContains(t, rr.Body.String(), "Database error.") +} + +func TestHandleApprovalGetAuthRequestNotFoundPOST(t *testing.T) { + httpServer, server := newTestServer(t, nil) + defer httpServer.Close() + + body := strings.NewReader("approval=approve&req=does-not-exist&hmac=AQ") + rr := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/approval", body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + server.ServeHTTP(rr, req) + + require.Equal(t, http.StatusBadRequest, rr.Code) + require.Contains(t, rr.Body.String(), "User session error.") + require.NotContains(t, rr.Body.String(), "Database error.") +} + +func TestHandleApprovalDoubleSubmitPOST(t *testing.T) { + ctx := t.Context() + httpServer, server := newTestServer(t, nil) + defer httpServer.Close() + + authReq := storage.AuthRequest{ + ID: "approval-double-submit", + ClientID: "test", + ResponseTypes: []string{responseTypeCode}, + RedirectURI: "https://client.example/callback", + Expiry: time.Now().Add(time.Minute), + LoggedIn: true, + HMACKey: []byte("approval-double-submit-key"), + } + require.NoError(t, server.storage.CreateAuthRequest(ctx, authReq)) + + h := hmac.New(sha256.New, authReq.HMACKey) + h.Write([]byte(authReq.ID)) + mac := base64.RawURLEncoding.EncodeToString(h.Sum(nil)) + + form := url.Values{ + "approval": {"approve"}, + "req": {authReq.ID}, + "hmac": {mac}, + } + + firstRR := httptest.NewRecorder() + firstReq := httptest.NewRequest(http.MethodPost, "/approval", strings.NewReader(form.Encode())) + firstReq.Header.Set("Content-Type", "application/x-www-form-urlencoded") + server.ServeHTTP(firstRR, firstReq) + + require.Equal(t, http.StatusSeeOther, firstRR.Code) + require.Contains(t, firstRR.Header().Get("Location"), "https://client.example/callback") + + secondRR := httptest.NewRecorder() + secondReq := httptest.NewRequest(http.MethodPost, "/approval", strings.NewReader(form.Encode())) + secondReq.Header.Set("Content-Type", "application/x-www-form-urlencoded") + server.ServeHTTP(secondRR, secondReq) + + require.Equal(t, http.StatusBadRequest, secondRR.Code) + require.Contains(t, secondRR.Body.String(), "User session error.") + require.NotContains(t, secondRR.Body.String(), "Database error.") +} diff --git a/server/handlers_test.go b/server/handlers_test.go index 0514d85c..933e9e4d 100644 --- a/server/handlers_test.go +++ b/server/handlers_test.go @@ -3,6 +3,9 @@ package server import ( "bytes" "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -21,6 +24,8 @@ import ( "golang.org/x/crypto/bcrypt" "golang.org/x/oauth2" + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/server/internal" "github.com/dexidp/dex/storage" ) @@ -62,6 +67,7 @@ func TestHandleDiscovery(t *testing.T) { Introspect: fmt.Sprintf("%s/token/introspect", httpServer.URL), GrantTypes: []string{ "authorization_code", + "client_credentials", "refresh_token", "urn:ietf:params:oauth:grant-type:device_code", "urn:ietf:params:oauth:grant-type:token-exchange", @@ -492,6 +498,396 @@ func TestHandlePassword_LocalPasswordDBClaims(t *testing.T) { require.Equal(t, []string{"team-a", "team-a/admins"}, claims.Groups) } +func setSessionsEnabled(t *testing.T, enabled bool) { + t.Helper() + if enabled { + t.Setenv("DEX_SESSIONS_ENABLED", "true") + } else { + t.Setenv("DEX_SESSIONS_ENABLED", "false") + } +} + +func TestFinalizeLoginCreatesUserIdentity(t *testing.T) { + ctx := t.Context() + setSessionsEnabled(t, true) + + connID := "mockPw" + authReqID := "test-create-ui" + expiry := time.Now().Add(100 * time.Second) + + httpServer, s := newTestServer(t, func(c *Config) { + c.SkipApprovalScreen = true + c.Now = time.Now + }) + defer httpServer.Close() + + sc := storage.Connector{ + ID: connID, + Type: "mockPassword", + Name: "MockPassword", + ResourceVersion: "1", + Config: []byte(`{"username": "foo", "password": "password"}`), + } + require.NoError(t, s.storage.CreateConnector(ctx, sc)) + _, err := s.OpenConnector(sc) + require.NoError(t, err) + + authReq := storage.AuthRequest{ + ID: authReqID, + ConnectorID: connID, + RedirectURI: "cb", + Expiry: expiry, + ResponseTypes: []string{responseTypeCode}, + } + require.NoError(t, s.storage.CreateAuthRequest(ctx, authReq)) + + rr := httptest.NewRecorder() + reqPath := fmt.Sprintf("/auth/%s/login?state=%s&back=&login=foo&password=password", connID, authReqID) + s.handlePasswordLogin(rr, httptest.NewRequest("POST", reqPath, nil)) + + require.Equal(t, 303, rr.Code) + + ui, err := s.storage.GetUserIdentity(ctx, "0-385-28089-0", connID) + require.NoError(t, err, "UserIdentity should exist after login") + require.Equal(t, "0-385-28089-0", ui.UserID) + require.Equal(t, connID, ui.ConnectorID) + require.Equal(t, "kilgore@kilgore.trout", ui.Claims.Email) + require.NotZero(t, ui.CreatedAt, "CreatedAt should be set") + require.NotZero(t, ui.LastLogin, "LastLogin should be set") +} + +func TestFinalizeLoginUpdatesUserIdentity(t *testing.T) { + ctx := t.Context() + setSessionsEnabled(t, true) + + connID := "mockPw" + authReqID := "test-update-ui" + expiry := time.Now().Add(100 * time.Second) + oldTime := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC) + + httpServer, s := newTestServer(t, func(c *Config) { + c.SkipApprovalScreen = true + c.Now = time.Now + }) + defer httpServer.Close() + + sc := storage.Connector{ + ID: connID, + Type: "mockPassword", + Name: "MockPassword", + ResourceVersion: "1", + Config: []byte(`{"username": "foo", "password": "password"}`), + } + require.NoError(t, s.storage.CreateConnector(ctx, sc)) + _, err := s.OpenConnector(sc) + require.NoError(t, err) + + // Pre-create UserIdentity with old data + require.NoError(t, s.storage.CreateUserIdentity(ctx, storage.UserIdentity{ + UserID: "0-385-28089-0", + ConnectorID: connID, + Claims: storage.Claims{ + UserID: "0-385-28089-0", + Username: "Old Name", + Email: "old@example.com", + }, + Consents: map[string][]string{"existing-client": {"openid"}}, + CreatedAt: oldTime, + LastLogin: oldTime, + })) + + authReq := storage.AuthRequest{ + ID: authReqID, + ConnectorID: connID, + RedirectURI: "cb", + Expiry: expiry, + ResponseTypes: []string{responseTypeCode}, + } + require.NoError(t, s.storage.CreateAuthRequest(ctx, authReq)) + + rr := httptest.NewRecorder() + reqPath := fmt.Sprintf("/auth/%s/login?state=%s&back=&login=foo&password=password", connID, authReqID) + s.handlePasswordLogin(rr, httptest.NewRequest("POST", reqPath, nil)) + + require.Equal(t, 303, rr.Code) + + ui, err := s.storage.GetUserIdentity(ctx, "0-385-28089-0", connID) + require.NoError(t, err, "UserIdentity should exist after login") + require.Equal(t, "Kilgore Trout", ui.Claims.Username, "claims should be refreshed from the connector") + require.Equal(t, "kilgore@kilgore.trout", ui.Claims.Email, "claims should be refreshed from the connector") + require.True(t, ui.LastLogin.After(oldTime), "LastLogin should be updated") + require.Equal(t, oldTime, ui.CreatedAt, "CreatedAt should not change on update") + require.Equal(t, []string{"openid"}, ui.Consents["existing-client"], "existing consents should be preserved") +} + +func TestFinalizeLoginSkipsUserIdentityWhenDisabled(t *testing.T) { + ctx := t.Context() + setSessionsEnabled(t, false) + + connID := "mockPw" + authReqID := "test-no-ui" + expiry := time.Now().Add(100 * time.Second) + + httpServer, s := newTestServer(t, func(c *Config) { + c.SkipApprovalScreen = true + c.Now = time.Now + }) + defer httpServer.Close() + + sc := storage.Connector{ + ID: connID, + Type: "mockPassword", + Name: "MockPassword", + ResourceVersion: "1", + Config: []byte(`{"username": "foo", "password": "password"}`), + } + require.NoError(t, s.storage.CreateConnector(ctx, sc)) + _, err := s.OpenConnector(sc) + require.NoError(t, err) + + authReq := storage.AuthRequest{ + ID: authReqID, + ConnectorID: connID, + RedirectURI: "cb", + Expiry: expiry, + ResponseTypes: []string{responseTypeCode}, + } + require.NoError(t, s.storage.CreateAuthRequest(ctx, authReq)) + + rr := httptest.NewRecorder() + reqPath := fmt.Sprintf("/auth/%s/login?state=%s&back=&login=foo&password=password", connID, authReqID) + s.handlePasswordLogin(rr, httptest.NewRequest("POST", reqPath, nil)) + + require.Equal(t, 303, rr.Code) + + _, err = s.storage.GetUserIdentity(ctx, "0-385-28089-0", connID) + require.ErrorIs(t, err, storage.ErrNotFound, "UserIdentity should not be created when sessions disabled") +} + +func TestSkipApprovalWithExistingConsent(t *testing.T) { + ctx := t.Context() + setSessionsEnabled(t, true) + + connID := "mock" + authReqID := "test-consent-skip" + expiry := time.Now().Add(100 * time.Second) + + tests := []struct { + name string + consents map[string][]string + scopes []string + clientID string + forcePrompt bool + wantPath string + }{ + { + name: "Existing consent covers requested scopes", + consents: map[string][]string{"test": {"email", "profile"}}, + scopes: []string{"openid", "email", "profile"}, + clientID: "test", + wantPath: "/callback/cb", + }, + { + name: "Existing consent missing a scope", + consents: map[string][]string{"test": {"email"}}, + scopes: []string{"openid", "email", "profile"}, + clientID: "test", + wantPath: "/approval", + }, + { + name: "Force approval overrides consent", + consents: map[string][]string{"test": {"email", "profile"}}, + scopes: []string{"openid", "email", "profile"}, + clientID: "test", + forcePrompt: true, + wantPath: "/approval", + }, + { + name: "No consent for this client", + consents: map[string][]string{"other-client": {"email"}}, + scopes: []string{"openid", "email"}, + clientID: "test", + wantPath: "/approval", + }, + { + name: "Only openid scope - skip with empty consent", + consents: map[string][]string{"test": {}}, + scopes: []string{"openid"}, + clientID: "test", + wantPath: "/callback/cb", + }, + { + name: "offline_access requires consent", + consents: map[string][]string{"test": {}}, + scopes: []string{"openid", "offline_access"}, + clientID: "test", + wantPath: "/approval", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + httpServer, s := newTestServer(t, func(c *Config) { + c.SkipApprovalScreen = false + c.Now = time.Now + }) + defer httpServer.Close() + + // Pre-create UserIdentity with consents + require.NoError(t, s.storage.CreateUserIdentity(ctx, storage.UserIdentity{ + UserID: "0-385-28089-0", + ConnectorID: connID, + Claims: storage.Claims{ + UserID: "0-385-28089-0", + Username: "Kilgore Trout", + Email: "kilgore@kilgore.trout", + EmailVerified: true, + }, + Consents: tc.consents, + CreatedAt: time.Now(), + LastLogin: time.Now(), + })) + + authReq := storage.AuthRequest{ + ID: authReqID, + ConnectorID: connID, + ClientID: tc.clientID, + RedirectURI: "cb", + Expiry: expiry, + ResponseTypes: []string{responseTypeCode}, + Scopes: tc.scopes, + ForceApprovalPrompt: tc.forcePrompt, + } + require.NoError(t, s.storage.CreateAuthRequest(ctx, authReq)) + + rr := httptest.NewRecorder() + reqPath := fmt.Sprintf("/callback/%s?state=%s", connID, authReqID) + s.handleConnectorCallback(rr, httptest.NewRequest("GET", reqPath, nil)) + + require.Equal(t, 303, rr.Code) + cb, err := url.Parse(rr.Result().Header.Get("Location")) + require.NoError(t, err) + require.Equal(t, tc.wantPath, cb.Path) + }) + } +} + +func TestConsentPersistedOnApproval(t *testing.T) { + ctx := t.Context() + setSessionsEnabled(t, true) + + httpServer, s := newTestServer(t, nil) + defer httpServer.Close() + + userID := "test-user" + connectorID := "mock" + clientID := "test" + + // Pre-create UserIdentity (would have been created during login) + require.NoError(t, s.storage.CreateUserIdentity(ctx, storage.UserIdentity{ + UserID: userID, + ConnectorID: connectorID, + Claims: storage.Claims{UserID: userID}, + Consents: make(map[string][]string), + CreatedAt: time.Now(), + LastLogin: time.Now(), + })) + + authReq := storage.AuthRequest{ + ID: "approval-consent-test", + ClientID: clientID, + ConnectorID: connectorID, + ResponseTypes: []string{responseTypeCode}, + RedirectURI: "https://client.example/callback", + Expiry: time.Now().Add(time.Minute), + LoggedIn: true, + Claims: storage.Claims{UserID: userID}, + Scopes: []string{"openid", "email", "profile"}, + HMACKey: []byte("consent-test-key"), + } + require.NoError(t, s.storage.CreateAuthRequest(ctx, authReq)) + + h := hmac.New(sha256.New, authReq.HMACKey) + h.Write([]byte(authReq.ID)) + mac := base64.RawURLEncoding.EncodeToString(h.Sum(nil)) + + form := url.Values{ + "approval": {"approve"}, + "req": {authReq.ID}, + "hmac": {mac}, + } + + rr := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/approval", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + s.ServeHTTP(rr, req) + + require.Equal(t, http.StatusSeeOther, rr.Code, "approval should redirect") + + ui, err := s.storage.GetUserIdentity(ctx, userID, connectorID) + require.NoError(t, err, "UserIdentity should exist") + require.Equal(t, []string{"openid", "email", "profile"}, ui.Consents[clientID], "approved scopes should be persisted") +} + +func TestScopesCoveredByConsent(t *testing.T) { + tests := []struct { + name string + approved []string + requested []string + want bool + }{ + { + name: "All scopes covered", + approved: []string{"email", "profile"}, + requested: []string{"openid", "email", "profile"}, + want: true, + }, + { + name: "Missing scope", + approved: []string{"email"}, + requested: []string{"openid", "email", "groups"}, + want: false, + }, + { + name: "Only openid scope skipped", + approved: []string{}, + requested: []string{"openid"}, + want: true, + }, + { + name: "offline_access requires consent", + approved: []string{}, + requested: []string{"openid", "offline_access"}, + want: false, + }, + { + name: "offline_access covered by consent", + approved: []string{"offline_access"}, + requested: []string{"openid", "offline_access"}, + want: true, + }, + { + name: "Nil approved", + approved: nil, + requested: []string{"email"}, + want: false, + }, + { + name: "Empty requested", + approved: []string{"email"}, + requested: []string{}, + want: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := scopesCoveredByConsent(tc.approved, tc.requested) + require.Equal(t, tc.want, got) + }) + } +} + func TestHandlePasswordLoginWithSkipApproval(t *testing.T) { ctx := t.Context() @@ -645,6 +1041,176 @@ func TestHandlePasswordLoginWithSkipApproval(t *testing.T) { } } +func TestHandleClientCredentials(t *testing.T) { + tests := []struct { + name string + clientID string + clientSecret string + scopes string + wantCode int + wantAccessTok bool + wantIDToken bool + wantUsername string + }{ + { + name: "Basic grant, no scopes", + clientID: "test", + clientSecret: "barfoo", + scopes: "", + wantCode: 200, + wantAccessTok: true, + wantIDToken: false, + }, + { + name: "With openid scope", + clientID: "test", + clientSecret: "barfoo", + scopes: "openid", + wantCode: 200, + wantAccessTok: true, + wantIDToken: true, + }, + { + name: "With openid and profile scope includes username", + clientID: "test", + clientSecret: "barfoo", + scopes: "openid profile", + wantCode: 200, + wantAccessTok: true, + wantIDToken: true, + wantUsername: "Test Client", + }, + { + name: "With openid email profile groups", + clientID: "test", + clientSecret: "barfoo", + scopes: "openid email profile groups", + wantCode: 200, + wantAccessTok: true, + wantIDToken: true, + wantUsername: "Test Client", + }, + { + name: "Invalid client secret", + clientID: "test", + clientSecret: "wrong", + scopes: "", + wantCode: 401, + }, + { + name: "Unknown client", + clientID: "nonexistent", + clientSecret: "secret", + scopes: "", + wantCode: 401, + }, + { + name: "offline_access scope rejected", + clientID: "test", + clientSecret: "barfoo", + scopes: "openid offline_access", + wantCode: 400, + }, + { + name: "Unrecognized scope", + clientID: "test", + clientSecret: "barfoo", + scopes: "openid bogus", + wantCode: 400, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := t.Context() + + httpServer, s := newTestServer(t, func(c *Config) { + c.Now = time.Now + }) + defer httpServer.Close() + + // Create a confidential client for testing. + err := s.storage.CreateClient(ctx, storage.Client{ + ID: "test", + Secret: "barfoo", + RedirectURIs: []string{"https://example.com/callback"}, + Name: "Test Client", + }) + require.NoError(t, err) + + u, err := url.Parse(s.issuerURL.String()) + require.NoError(t, err) + u.Path = path.Join(u.Path, "/token") + + v := url.Values{} + v.Add("grant_type", "client_credentials") + if tc.scopes != "" { + v.Add("scope", tc.scopes) + } + + req, _ := http.NewRequest("POST", u.String(), bytes.NewBufferString(v.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.SetBasicAuth(tc.clientID, tc.clientSecret) + + rr := httptest.NewRecorder() + s.ServeHTTP(rr, req) + + require.Equal(t, tc.wantCode, rr.Code) + + if tc.wantCode == 200 { + var resp struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + IDToken string `json:"id_token"` + RefreshToken string `json:"refresh_token"` + } + err := json.Unmarshal(rr.Body.Bytes(), &resp) + require.NoError(t, err) + + if tc.wantAccessTok { + require.NotEmpty(t, resp.AccessToken) + require.Equal(t, "bearer", resp.TokenType) + require.Greater(t, resp.ExpiresIn, 0) + } + if tc.wantIDToken { + require.NotEmpty(t, resp.IDToken) + + // Verify the ID token claims. + provider, err := oidc.NewProvider(ctx, httpServer.URL) + require.NoError(t, err) + verifier := provider.Verifier(&oidc.Config{ClientID: tc.clientID}) + idToken, err := verifier.Verify(ctx, resp.IDToken) + require.NoError(t, err) + + // Decode the subject to verify the connector ID. + var sub internal.IDTokenSubject + require.NoError(t, internal.Unmarshal(idToken.Subject, &sub)) + require.Equal(t, "", sub.ConnId) + require.Equal(t, tc.clientID, sub.UserId) + + var claims struct { + Name string `json:"name"` + PreferredUsername string `json:"preferred_username"` + } + require.NoError(t, idToken.Claims(&claims)) + + if tc.wantUsername != "" { + require.Equal(t, tc.wantUsername, claims.Name) + require.Equal(t, tc.wantUsername, claims.PreferredUsername) + } else { + require.Empty(t, claims.Name) + require.Empty(t, claims.PreferredUsername) + } + } else { + require.Empty(t, resp.IDToken) + } + // client_credentials must never return a refresh token. + require.Empty(t, resp.RefreshToken) + } + }) + } +} + func TestHandleConnectorCallbackWithSkipApproval(t *testing.T) { ctx := t.Context() @@ -887,8 +1453,462 @@ func TestHandleTokenExchange(t *testing.T) { } } +func TestHandleTokenExchangeConnectorGrantTypeRestriction(t *testing.T) { + ctx := t.Context() + httpServer, s := newTestServer(t, func(c *Config) { + c.Storage.CreateClient(ctx, storage.Client{ + ID: "client_1", + Secret: "secret_1", + }) + }) + defer httpServer.Close() + + // Restrict mock connector to authorization_code only + err := s.storage.UpdateConnector(ctx, "mock", func(c storage.Connector) (storage.Connector, error) { + c.GrantTypes = []string{grantTypeAuthorizationCode} + return c, nil + }) + require.NoError(t, err) + // Clear cached connector to pick up new grant types + s.mu.Lock() + delete(s.connectors, "mock") + s.mu.Unlock() + + vals := make(url.Values) + vals.Set("grant_type", grantTypeTokenExchange) + vals.Set("connector_id", "mock") + vals.Set("scope", "openid") + vals.Set("requested_token_type", tokenTypeAccess) + vals.Set("subject_token_type", tokenTypeID) + vals.Set("subject_token", "foobar") + vals.Set("client_id", "client_1") + vals.Set("client_secret", "secret_1") + + rr := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, httpServer.URL+"/token", strings.NewReader(vals.Encode())) + req.Header.Set("content-type", "application/x-www-form-urlencoded") + + s.handleToken(rr, req) + + require.Equal(t, http.StatusBadRequest, rr.Code, rr.Body.String()) +} + +func TestHandleAuthorizationConnectorGrantTypeFiltering(t *testing.T) { + tests := []struct { + name string + // grantTypes per connector ID; nil means unrestricted + connectorGrantTypes map[string][]string + responseType string + wantCode int + // wantRedirectContains is checked when wantCode == 302 + wantRedirectContains string + // wantBodyContains is checked when wantCode != 302 + wantBodyContains string + }{ + { + name: "one connector filtered, redirect to remaining", + connectorGrantTypes: map[string][]string{ + "mock": {grantTypeDeviceCode}, + "mock2": nil, + }, + responseType: "code", + wantCode: http.StatusFound, + wantRedirectContains: "/auth/mock2", + }, + { + name: "all connectors filtered", + connectorGrantTypes: map[string][]string{ + "mock": {grantTypeDeviceCode}, + "mock2": {grantTypeDeviceCode}, + }, + responseType: "code", + wantCode: http.StatusBadRequest, + wantBodyContains: "No connectors available", + }, + { + name: "no restrictions, both available", + connectorGrantTypes: map[string][]string{ + "mock": nil, + "mock2": nil, + }, + responseType: "code", + wantCode: http.StatusOK, + }, + { + name: "implicit flow filters auth_code-only connector", + connectorGrantTypes: map[string][]string{ + "mock": {grantTypeAuthorizationCode}, + "mock2": nil, + }, + responseType: "token", + wantCode: http.StatusFound, + wantRedirectContains: "/auth/mock2", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := t.Context() + httpServer, s := newTestServerMultipleConnectors(t, func(c *Config) { + c.Storage.CreateClient(ctx, storage.Client{ + ID: "test", + RedirectURIs: []string{"http://example.com/callback"}, + }) + }) + defer httpServer.Close() + + for id, gts := range tc.connectorGrantTypes { + err := s.storage.UpdateConnector(ctx, id, func(c storage.Connector) (storage.Connector, error) { + c.GrantTypes = gts + return c, nil + }) + require.NoError(t, err) + s.mu.Lock() + delete(s.connectors, id) + s.mu.Unlock() + } + + rr := httptest.NewRecorder() + reqURL := fmt.Sprintf("%s/auth?response_type=%s&client_id=test&redirect_uri=http://example.com/callback&scope=openid", httpServer.URL, tc.responseType) + req := httptest.NewRequest(http.MethodGet, reqURL, nil) + s.handleAuthorization(rr, req) + + require.Equal(t, tc.wantCode, rr.Code) + if tc.wantRedirectContains != "" { + require.Contains(t, rr.Header().Get("Location"), tc.wantRedirectContains) + } + if tc.wantBodyContains != "" { + require.Contains(t, rr.Body.String(), tc.wantBodyContains) + } + }) + } +} + +func TestHandleConnectorLoginGrantTypeRejection(t *testing.T) { + ctx := t.Context() + httpServer, s := newTestServer(t, func(c *Config) { + c.Storage.CreateClient(ctx, storage.Client{ + ID: "test-client", + Secret: "secret", + RedirectURIs: []string{"http://example.com/callback"}, + }) + }) + defer httpServer.Close() + + // Restrict mock connector to device_code only + err := s.storage.UpdateConnector(ctx, "mock", func(c storage.Connector) (storage.Connector, error) { + c.GrantTypes = []string{grantTypeDeviceCode} + return c, nil + }) + require.NoError(t, err) + s.mu.Lock() + delete(s.connectors, "mock") + s.mu.Unlock() + + // Try to use mock connector for auth code flow via the full server router + rr := httptest.NewRecorder() + reqURL := httpServer.URL + "/auth/mock?response_type=code&client_id=test-client&redirect_uri=http://example.com/callback&scope=openid" + req := httptest.NewRequest(http.MethodGet, reqURL, nil) + s.ServeHTTP(rr, req) + + require.Equal(t, http.StatusBadRequest, rr.Code) + require.Contains(t, rr.Body.String(), "does not support this grant type") +} + func setNonEmpty(vals url.Values, key, value string) { if value != "" { vals.Set(key, value) } } + +// registerTestConnector creates a connector in storage and registers it in the server's connectors map. +func registerTestConnector(t *testing.T, s *Server, connID string, c connector.Connector) { + t.Helper() + ctx := t.Context() + + storageConn := storage.Connector{ + ID: connID, + Type: "saml", + Name: "Test SAML", + ResourceVersion: "1", + } + if err := s.storage.CreateConnector(ctx, storageConn); err != nil { + t.Fatalf("failed to create connector in storage: %v", err) + } + + s.mu.Lock() + s.connectors[connID] = Connector{ + ResourceVersion: "1", + Connector: c, + } + s.mu.Unlock() +} + +func TestConnectorDataPersistence(t *testing.T) { + // Test that ConnectorData is correctly stored in refresh token + // and can be used for subsequent refresh operations. + httpServer, server := newTestServer(t, func(c *Config) { + c.RefreshTokenPolicy = &RefreshTokenPolicy{rotateRefreshTokens: true} + }) + defer httpServer.Close() + + ctx := t.Context() + connID := "saml-conndata" + + // Create a mock SAML connector that also implements RefreshConnector + mockConn := &mockSAMLRefreshConnector{ + refreshIdentity: connector.Identity{ + UserID: "refreshed-user", + Username: "refreshed-name", + Email: "refreshed@example.com", + EmailVerified: true, + Groups: []string{"refreshed-group"}, + }, + } + registerTestConnector(t, server, connID, mockConn) + + // Create client + client := storage.Client{ + ID: "conndata-client", + Secret: "conndata-secret", + RedirectURIs: []string{"https://example.com/callback"}, + Name: "ConnData Test Client", + } + require.NoError(t, server.storage.CreateClient(ctx, client)) + + // Create refresh token with ConnectorData (simulating what HandlePOST would store) + connectorData := []byte(`{"userID":"user-123","username":"testuser","email":"test@example.com","emailVerified":true,"groups":["admin","dev"]}`) + refreshToken := storage.RefreshToken{ + ID: "conndata-refresh", + Token: "conndata-token", + CreatedAt: time.Now(), + LastUsed: time.Now(), + ClientID: client.ID, + ConnectorID: connID, + Scopes: []string{"openid", "email", "offline_access"}, + Claims: storage.Claims{ + UserID: "user-123", + Username: "testuser", + Email: "test@example.com", + EmailVerified: true, + Groups: []string{"admin", "dev"}, + }, + ConnectorData: connectorData, + Nonce: "conndata-nonce", + } + require.NoError(t, server.storage.CreateRefresh(ctx, refreshToken)) + + offlineSession := storage.OfflineSessions{ + UserID: "user-123", + ConnID: connID, + Refresh: map[string]*storage.RefreshTokenRef{client.ID: {ID: refreshToken.ID, ClientID: client.ID}}, + ConnectorData: connectorData, + } + require.NoError(t, server.storage.CreateOfflineSessions(ctx, offlineSession)) + + // Verify ConnectorData is stored correctly + storedToken, err := server.storage.GetRefresh(ctx, refreshToken.ID) + require.NoError(t, err) + require.Equal(t, connectorData, storedToken.ConnectorData, + "ConnectorData should be persisted in refresh token storage") + + // Verify ConnectorData is stored in offline session + storedSession, err := server.storage.GetOfflineSessions(ctx, "user-123", connID) + require.NoError(t, err) + require.Equal(t, connectorData, storedSession.ConnectorData, + "ConnectorData should be persisted in offline session storage") +} + +// mockSAMLRefreshConnector implements SAMLConnector + RefreshConnector for testing. +type mockSAMLRefreshConnector struct { + refreshIdentity connector.Identity +} + +func (m *mockSAMLRefreshConnector) POSTData(s connector.Scopes, requestID string) (ssoURL, samlRequest string, err error) { + return "", "", nil +} + +func (m *mockSAMLRefreshConnector) HandlePOST(s connector.Scopes, samlResponse, inResponseTo string) (connector.Identity, error) { + return connector.Identity{}, nil +} + +func (m *mockSAMLRefreshConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { + return m.refreshIdentity, nil +} + +func TestFilterConnectors(t *testing.T) { + connectors := []storage.Connector{ + {ID: "github", Type: "github", Name: "GitHub"}, + {ID: "google", Type: "oidc", Name: "Google"}, + {ID: "ldap", Type: "ldap", Name: "LDAP"}, + } + + tests := []struct { + name string + allowedConnectors []string + wantIDs []string + }{ + { + name: "No filter - all connectors returned", + allowedConnectors: nil, + wantIDs: []string{"github", "google", "ldap"}, + }, + { + name: "Empty filter - all connectors returned", + allowedConnectors: []string{}, + wantIDs: []string{"github", "google", "ldap"}, + }, + { + name: "Filter to one connector", + allowedConnectors: []string{"github"}, + wantIDs: []string{"github"}, + }, + { + name: "Filter to two connectors", + allowedConnectors: []string{"github", "ldap"}, + wantIDs: []string{"github", "ldap"}, + }, + { + name: "Filter with non-existent connector ID", + allowedConnectors: []string{"nonexistent"}, + wantIDs: []string{}, + }, + { + name: "Filter with mix of valid and invalid IDs", + allowedConnectors: []string{"google", "nonexistent"}, + wantIDs: []string{"google"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := filterConnectors(connectors, tc.allowedConnectors) + gotIDs := make([]string, len(result)) + for i, c := range result { + gotIDs[i] = c.ID + } + require.Equal(t, tc.wantIDs, gotIDs) + }) + } +} + +func TestIsConnectorAllowed(t *testing.T) { + tests := []struct { + name string + allowedConnectors []string + connectorID string + want bool + }{ + { + name: "No restrictions - all allowed", + allowedConnectors: nil, + connectorID: "any", + want: true, + }, + { + name: "Empty list - all allowed", + allowedConnectors: []string{}, + connectorID: "any", + want: true, + }, + { + name: "Connector in allowed list", + allowedConnectors: []string{"github", "google"}, + connectorID: "github", + want: true, + }, + { + name: "Connector not in allowed list", + allowedConnectors: []string{"github", "google"}, + connectorID: "ldap", + want: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := isConnectorAllowed(tc.allowedConnectors, tc.connectorID) + require.Equal(t, tc.want, got) + }) + } +} + +func TestHandleAuthorizationWithAllowedConnectors(t *testing.T) { + ctx := t.Context() + + httpServer, s := newTestServerMultipleConnectors(t, nil) + defer httpServer.Close() + + // Create a client that only allows "mock" connector (not "mock2") + client := storage.Client{ + ID: "filtered-client", + Secret: "secret", + RedirectURIs: []string{"https://example.com/callback"}, + Name: "Filtered Client", + AllowedConnectors: []string{"mock"}, + } + require.NoError(t, s.storage.CreateClient(ctx, client)) + + // Request the auth page with this client - should only show "mock" connector + rr := httptest.NewRecorder() + req := httptest.NewRequest("GET", fmt.Sprintf("/auth?client_id=%s&redirect_uri=%s&response_type=code&scope=openid", + client.ID, url.QueryEscape("https://example.com/callback")), nil) + s.ServeHTTP(rr, req) + + // With only one allowed connector and alwaysShowLogin=false (default), + // the server should redirect directly to the connector + require.Equal(t, http.StatusFound, rr.Code) + location := rr.Header().Get("Location") + require.Contains(t, location, "/auth/mock") + require.NotContains(t, location, "mock2") +} + +func TestHandleAuthorizationWithNoMatchingConnectors(t *testing.T) { + ctx := t.Context() + + httpServer, s := newTestServerMultipleConnectors(t, nil) + defer httpServer.Close() + + // Create a client that only allows a non-existent connector + client := storage.Client{ + ID: "no-connectors-client", + Secret: "secret", + RedirectURIs: []string{"https://example.com/callback"}, + Name: "No Connectors Client", + AllowedConnectors: []string{"nonexistent"}, + } + require.NoError(t, s.storage.CreateClient(ctx, client)) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("GET", fmt.Sprintf("/auth?client_id=%s&redirect_uri=%s&response_type=code&scope=openid", + client.ID, url.QueryEscape("https://example.com/callback")), nil) + s.ServeHTTP(rr, req) + + // Should return an error, not an empty login page + require.Equal(t, http.StatusBadRequest, rr.Code) +} + +func TestHandleAuthorizationWithoutAllowedConnectors(t *testing.T) { + ctx := t.Context() + + httpServer, s := newTestServerMultipleConnectors(t, nil) + defer httpServer.Close() + + // Create a client with no connector restrictions + client := storage.Client{ + ID: "unfiltered-client", + Secret: "secret", + RedirectURIs: []string{"https://example.com/callback"}, + Name: "Unfiltered Client", + } + require.NoError(t, s.storage.CreateClient(ctx, client)) + + // Request the auth page - should show all connectors (rendered as HTML) + rr := httptest.NewRecorder() + req := httptest.NewRequest("GET", fmt.Sprintf("/auth?client_id=%s&redirect_uri=%s&response_type=code&scope=openid", + client.ID, url.QueryEscape("https://example.com/callback")), nil) + s.ServeHTTP(rr, req) + + // With multiple connectors and no filter, the login page should be rendered (200 OK) + require.Equal(t, http.StatusOK, rr.Code) +} diff --git a/server/introspectionhandler_test.go b/server/introspectionhandler_test.go index 61ff3cf2..799f0000 100644 --- a/server/introspectionhandler_test.go +++ b/server/introspectionhandler_test.go @@ -298,7 +298,7 @@ func TestHandleIntrospect(t *testing.T) { { testName: "Access Token: active", token: activeAccessToken, - response: toJSON(getIntrospectionValue(s.issuerURL, time.Now(), expiry, "access_token")), + response: toJSON(getIntrospectionValue(s.issuerURL, t0, expiry, "access_token")), responseStatusCode: 200, }, { @@ -311,7 +311,7 @@ func TestHandleIntrospect(t *testing.T) { { testName: "Refresh Token: active", token: activeRefreshToken, - response: toJSON(getIntrospectionValue(s.issuerURL, time.Now(), time.Now().Add(s.refreshTokenPolicy.absoluteLifetime), "refresh_token")), + response: toJSON(getIntrospectionValue(s.issuerURL, t0, t0.Add(s.refreshTokenPolicy.absoluteLifetime), "refresh_token")), responseStatusCode: 200, }, { diff --git a/server/oauth2.go b/server/oauth2.go index a8dd0b42..0925d6d0 100644 --- a/server/oauth2.go +++ b/server/oauth2.go @@ -14,6 +14,7 @@ import ( "net" "net/http" "net/url" + "slices" "strconv" "strings" "time" @@ -143,8 +144,19 @@ const ( grantTypePassword = "password" grantTypeDeviceCode = "urn:ietf:params:oauth:grant-type:device_code" grantTypeTokenExchange = "urn:ietf:params:oauth:grant-type:token-exchange" + grantTypeClientCredentials = "client_credentials" ) +// ConnectorGrantTypes is the set of grant types that can be restricted per connector. +var ConnectorGrantTypes = map[string]bool{ + grantTypeAuthorizationCode: true, + grantTypeRefreshToken: true, + grantTypeImplicit: true, + grantTypePassword: true, + grantTypeDeviceCode: true, + grantTypeTokenExchange: true, +} + const ( // https://www.rfc-editor.org/rfc/rfc8693.html#section-3 tokenTypeAccess = "urn:ietf:params:oauth:token-type:access_token" @@ -464,6 +476,9 @@ func (s *Server) parseAuthorizationRequest(r *http.Request) (*storage.AuthReques if !validateConnectorID(connectors, connectorID) { return nil, newRedirectedErr(errInvalidRequest, "Invalid ConnectorID") } + if !isConnectorAllowed(client.AllowedConnectors, connectorID) { + return nil, newRedirectedErr(errInvalidRequest, "Connector not allowed for this client") + } } // dex doesn't support request parameter and must return request_not_supported error @@ -472,10 +487,16 @@ func (s *Server) parseAuthorizationRequest(r *http.Request) (*storage.AuthReques return nil, newRedirectedErr(errRequestNotSupported, "Server does not support request parameter.") } - if codeChallengeMethod != codeChallengeMethodS256 && codeChallengeMethod != codeChallengeMethodPlain { + if codeChallenge != "" && !slices.Contains(s.pkce.CodeChallengeMethodsSupported, codeChallengeMethod) { return nil, newRedirectedErr(errInvalidRequest, "Unsupported PKCE challenge method (%q).", codeChallengeMethod) } + // Enforce PKCE if configured. + // https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-4.1.1 + if s.pkce.Enforce && codeChallenge == "" { + return nil, newRedirectedErr(errInvalidRequest, "PKCE is required. The code_challenge parameter must be provided.") + } + var ( unrecognized []string invalidScopes []string diff --git a/server/oauth2_test.go b/server/oauth2_test.go index ea930cb3..6e1528ce 100644 --- a/server/oauth2_test.go +++ b/server/oauth2_test.go @@ -53,6 +53,7 @@ func TestParseAuthorizationRequest(t *testing.T) { name string clients []storage.Client supportedResponseTypes []string + pkce PKCEConfig usePOST bool @@ -319,6 +320,92 @@ func TestParseAuthorizationRequest(t *testing.T) { }, expectedError: &redirectedAuthErr{Type: errInvalidRequest}, }, + { + name: "PKCE enforced, no code_challenge provided", + clients: []storage.Client{ + { + ID: "bar", + RedirectURIs: []string{"https://example.com/bar"}, + }, + }, + supportedResponseTypes: []string{"code"}, + pkce: PKCEConfig{ + Enforce: true, + CodeChallengeMethodsSupported: []string{"S256", "plain"}, + }, + queryParams: map[string]string{ + "client_id": "bar", + "redirect_uri": "https://example.com/bar", + "response_type": "code", + "scope": "openid email profile", + }, + expectedError: &redirectedAuthErr{Type: errInvalidRequest}, + }, + { + name: "PKCE enforced, code_challenge provided", + clients: []storage.Client{ + { + ID: "bar", + RedirectURIs: []string{"https://example.com/bar"}, + }, + }, + supportedResponseTypes: []string{"code"}, + pkce: PKCEConfig{ + Enforce: true, + CodeChallengeMethodsSupported: []string{"S256", "plain"}, + }, + queryParams: map[string]string{ + "client_id": "bar", + "redirect_uri": "https://example.com/bar", + "response_type": "code", + "code_challenge": "123", + "code_challenge_method": "S256", + "scope": "openid email profile", + }, + }, + { + name: "PKCE only S256 allowed, plain rejected", + clients: []storage.Client{ + { + ID: "bar", + RedirectURIs: []string{"https://example.com/bar"}, + }, + }, + supportedResponseTypes: []string{"code"}, + pkce: PKCEConfig{ + CodeChallengeMethodsSupported: []string{"S256"}, + }, + queryParams: map[string]string{ + "client_id": "bar", + "redirect_uri": "https://example.com/bar", + "response_type": "code", + "code_challenge": "123", + "code_challenge_method": "plain", + "scope": "openid email profile", + }, + expectedError: &redirectedAuthErr{Type: errInvalidRequest}, + }, + { + name: "PKCE only S256 allowed, S256 accepted", + clients: []storage.Client{ + { + ID: "bar", + RedirectURIs: []string{"https://example.com/bar"}, + }, + }, + supportedResponseTypes: []string{"code"}, + pkce: PKCEConfig{ + CodeChallengeMethodsSupported: []string{"S256"}, + }, + queryParams: map[string]string{ + "client_id": "bar", + "redirect_uri": "https://example.com/bar", + "response_type": "code", + "code_challenge": "123", + "code_challenge_method": "S256", + "scope": "openid email profile", + }, + }, } for _, tc := range tests { @@ -326,6 +413,9 @@ func TestParseAuthorizationRequest(t *testing.T) { httpServer, server := newTestServerMultipleConnectors(t, func(c *Config) { c.SupportedResponseTypes = tc.supportedResponseTypes c.Storage = storage.WithStaticClients(c.Storage, tc.clients) + if len(tc.pkce.CodeChallengeMethodsSupported) > 0 || tc.pkce.Enforce { + c.PKCE = tc.pkce + } }) defer httpServer.Close() diff --git a/server/refreshhandlers.go b/server/refreshhandlers.go index b47bd52c..a0807d07 100644 --- a/server/refreshhandlers.go +++ b/server/refreshhandlers.go @@ -202,6 +202,10 @@ func (s *Server) getRefreshTokenFromStorage(ctx context.Context, clientID *strin s.logger.ErrorContext(ctx, "connector not found", "connector_id", refresh.ConnectorID, "err", err) return nil, newInternalServerError() } + if !GrantTypeAllowed(refreshCtx.connector.GrantTypes, grantTypeRefreshToken) { + s.logger.ErrorContext(ctx, "connector does not allow refresh token grant", "connector_id", refresh.ConnectorID) + return nil, &refreshError{msg: errInvalidRequest, desc: "Connector does not support refresh tokens.", code: http.StatusBadRequest} + } // Get Connector Data session, err := s.storage.GetOfflineSessions(ctx, refresh.Claims.UserID, refresh.ConnectorID) diff --git a/server/server.go b/server/server.go index b30791ab..a78bf99f 100644 --- a/server/server.go +++ b/server/server.go @@ -13,6 +13,7 @@ import ( "net/url" "os" "path" + "slices" "sort" "strings" "sync" @@ -45,6 +46,7 @@ import ( "github.com/dexidp/dex/connector/openshift" "github.com/dexidp/dex/connector/saml" "github.com/dexidp/dex/connector/ssh" + "github.com/dexidp/dex/pkg/featureflags" "github.com/dexidp/dex/server/signer" "github.com/dexidp/dex/storage" "github.com/dexidp/dex/web" @@ -58,6 +60,13 @@ const LocalConnector = "local" type Connector struct { ResourceVersion string Connector connector.Connector + GrantTypes []string +} + +// GrantTypeAllowed checks if the given grant type is allowed for this connector. +// If no grant types are configured, all are allowed. +func GrantTypeAllowed(configuredTypes []string, grantType string) bool { + return len(configuredTypes) == 0 || slices.Contains(configuredTypes, grantType) } // Config holds the server's configuration options. @@ -107,6 +116,9 @@ type Config struct { // If set, the server will use this connector to handle password grants PasswordConnector string + // PKCE configuration + PKCE PKCEConfig + GCFrequency time.Duration // Defaults to 5 minutes // If specified, the server will use this function for determining time. @@ -159,6 +171,14 @@ type WebConfig struct { Extra map[string]string } +// PKCEConfig holds PKCE (Proof Key for Code Exchange) settings. +type PKCEConfig struct { + // If true, PKCE is required for all authorization code flows. + Enforce bool + // Supported code challenge methods. Defaults to ["S256", "plain"]. + CodeChallengeMethodsSupported []string +} + func value(val, defaultValue time.Duration) time.Duration { if val == 0 { return defaultValue @@ -194,6 +214,8 @@ type Server struct { supportedGrantTypes []string + pkce PKCEConfig + now func() time.Time idTokensValidFor time.Duration @@ -229,6 +251,19 @@ func newServer(ctx context.Context, c Config) (*Server, error) { c.AllowedHeaders = []string{"Authorization"} } + supportedChallengeMethods := map[string]bool{ + codeChallengeMethodS256: true, + codeChallengeMethodPlain: true, + } + if len(c.PKCE.CodeChallengeMethodsSupported) == 0 { + c.PKCE.CodeChallengeMethodsSupported = []string{codeChallengeMethodS256, codeChallengeMethodPlain} + } + for _, m := range c.PKCE.CodeChallengeMethodsSupported { + if !supportedChallengeMethods[m] { + return nil, fmt.Errorf("unsupported PKCE challenge method %q", m) + } + } + allSupportedGrants := map[string]bool{ grantTypeAuthorizationCode: true, grantTypeRefreshToken: true, @@ -255,6 +290,8 @@ func newServer(ctx context.Context, c Config) (*Server, error) { allSupportedGrants[grantTypePassword] = true } + allSupportedGrants[grantTypeClientCredentials] = true + var supportedGrants []string if len(c.AllowedGrantTypes) > 0 { for _, grant := range c.AllowedGrantTypes { @@ -301,6 +338,7 @@ func newServer(ctx context.Context, c Config) (*Server, error) { storage: newKeyCacher(c.Storage, now), supportedResponseTypes: supportedRes, supportedGrantTypes: supportedGrants, + pkce: c.PKCE, idTokensValidFor: value(c.IDTokensValidFor, 24*time.Hour), authRequestsValidFor: value(c.AuthRequestsValidFor, 24*time.Hour), deviceRequestsValidFor: value(c.DeviceRequestsValidFor, 5*time.Minute), @@ -341,6 +379,10 @@ func newServer(ctx context.Context, c Config) (*Server, error) { return nil, fmt.Errorf("server: failed to open all connectors (%d/%d)", failedCount, len(storageConnectors)) } + if featureflags.SessionsEnabled.Enabled() { + s.logger.InfoContext(ctx, "sessions feature flag is enabled") + } + instrumentHandler := func(_ string, handler http.Handler) http.HandlerFunc { return handler.ServeHTTP } @@ -739,6 +781,7 @@ func (s *Server) OpenConnector(conn storage.Connector) (Connector, error) { connector := Connector{ ResourceVersion: conn.ResourceVersion, Connector: c, + GrantTypes: conn.GrantTypes, } s.mu.Lock() s.connectors[conn.ID] = connector @@ -747,6 +790,13 @@ func (s *Server) OpenConnector(conn storage.Connector) (Connector, error) { return connector, nil } +// CloseConnector removes the connector from the server's in-memory map. +func (s *Server) CloseConnector(id string) { + s.mu.Lock() + delete(s.connectors, id) + s.mu.Unlock() +} + // getConnector retrieves the connector object with the given id from the storage // and updates the connector list for server if necessary. func (s *Server) getConnector(ctx context.Context, id string) (Connector, error) { diff --git a/server/server_test.go b/server/server_test.go index 5a735f1d..db8f12ce 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -103,6 +103,7 @@ func newTestServer(t *testing.T, updateConfig func(c *Config)) (*httptest.Server AllowedGrantTypes: []string{ // all implemented types grantTypeDeviceCode, grantTypeAuthorizationCode, + grantTypeClientCredentials, grantTypeRefreshToken, grantTypeTokenExchange, grantTypeImplicit, @@ -1640,7 +1641,7 @@ func TestOAuth2DeviceFlow(t *testing.T) { // Add the Clients to the test server client := storage.Client{ ID: clientID, - RedirectURIs: []string{deviceCallbackURI}, + RedirectURIs: []string{s.absPath(deviceCallbackURI)}, Public: true, } if err := s.storage.CreateClient(ctx, client); err != nil { @@ -1751,7 +1752,7 @@ func TestOAuth2DeviceFlow(t *testing.T) { ClientSecret: client.Secret, Endpoint: p.Endpoint(), Scopes: requestedScopes, - RedirectURL: deviceCallbackURI, + RedirectURL: s.absURL(deviceCallbackURI), } if len(tc.scopes) != 0 { oauth2Config.Scopes = tc.scopes @@ -1774,7 +1775,7 @@ func TestServerSupportedGrants(t *testing.T) { { name: "Simple", config: func(c *Config) {}, - resGrants: []string{grantTypeAuthorizationCode, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, + resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, }, { name: "Minimal", @@ -1782,14 +1783,30 @@ func TestServerSupportedGrants(t *testing.T) { resGrants: []string{grantTypeTokenExchange}, }, { - name: "With password connector", - config: func(c *Config) { c.PasswordConnector = "local" }, - resGrants: []string{grantTypeAuthorizationCode, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, + name: "With password connector", + config: func(c *Config) { + c.PasswordConnector = "local" + }, + resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, }, { - name: "With token response", - config: func(c *Config) { c.SupportedResponseTypes = append(c.SupportedResponseTypes, responseTypeToken) }, - resGrants: []string{grantTypeAuthorizationCode, grantTypeImplicit, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, + name: "Without client credentials", + config: func(c *Config) { + c.AllowedGrantTypes = []string{ + grantTypeAuthorizationCode, + grantTypeRefreshToken, + grantTypeDeviceCode, + grantTypeTokenExchange, + } + }, + resGrants: []string{grantTypeAuthorizationCode, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, + }, + { + name: "With token response", + config: func(c *Config) { + c.SupportedResponseTypes = append(c.SupportedResponseTypes, responseTypeToken) + }, + resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypeImplicit, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, }, { name: "All", @@ -1797,7 +1814,7 @@ func TestServerSupportedGrants(t *testing.T) { c.PasswordConnector = "local" c.SupportedResponseTypes = append(c.SupportedResponseTypes, responseTypeToken) }, - resGrants: []string{grantTypeAuthorizationCode, grantTypeImplicit, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, + resGrants: []string{grantTypeAuthorizationCode, grantTypeClientCredentials, grantTypeImplicit, grantTypePassword, grantTypeRefreshToken, grantTypeDeviceCode, grantTypeTokenExchange}, }, } diff --git a/storage/conformance/conformance.go b/storage/conformance/conformance.go index b8c1b96d..94b23745 100644 --- a/storage/conformance/conformance.go +++ b/storage/conformance/conformance.go @@ -51,6 +51,8 @@ func RunTests(t *testing.T, newStorage func(t *testing.T) storage.Storage) { {"TimezoneSupport", testTimezones}, {"DeviceRequestCRUD", testDeviceRequestCRUD}, {"DeviceTokenCRUD", testDeviceTokenCRUD}, + {"UserIdentityCRUD", testUserIdentityCRUD}, + {"AuthSessionCRUD", testAuthSessionCRUD}, }) } @@ -262,11 +264,12 @@ func testClientCRUD(t *testing.T, s storage.Storage) { ctx := t.Context() id1 := storage.NewID() c1 := storage.Client{ - ID: id1, - Secret: "foobar", - RedirectURIs: []string{"foo://bar.com/", "https://auth.example.com"}, - Name: "dex client", - LogoURL: "https://goo.gl/JIyzIC", + ID: id1, + Secret: "foobar", + RedirectURIs: []string{"foo://bar.com/", "https://auth.example.com"}, + Name: "dex client", + LogoURL: "https://goo.gl/JIyzIC", + AllowedConnectors: []string{"github", "google"}, } err := s.DeleteClient(ctx, id1) mustBeErrNotFound(t, "client", err) @@ -630,10 +633,11 @@ func testConnectorCRUD(t *testing.T, s storage.Storage) { id1 := storage.NewID() config1 := []byte(`{"issuer": "https://accounts.google.com"}`) c1 := storage.Connector{ - ID: id1, - Type: "Default", - Name: "Default", - Config: config1, + ID: id1, + Type: "Default", + Name: "Default", + Config: config1, + GrantTypes: []string{"authorization_code", "refresh_token"}, } if err := s.CreateConnector(ctx, c1); err != nil { @@ -674,12 +678,14 @@ func testConnectorCRUD(t *testing.T, s storage.Storage) { if err := s.UpdateConnector(ctx, c1.ID, func(old storage.Connector) (storage.Connector, error) { old.Type = "oidc" + old.GrantTypes = []string{"urn:ietf:params:oauth:grant-type:token-exchange"} return old, nil }); err != nil { t.Fatalf("failed to update Connector: %v", err) } c1.Type = "oidc" + c1.GrantTypes = []string{"urn:ietf:params:oauth:grant-type:token-exchange"} getAndCompare(id1, c1) connectorList := []storage.Connector{c1, c2} @@ -1080,3 +1086,183 @@ func testDeviceTokenCRUD(t *testing.T, s storage.Storage) { t.Fatalf("storage does not support PKCE, wanted challenge=%#v got %#v", codeChallenge, got.PKCE) } } + +func testUserIdentityCRUD(t *testing.T, s storage.Storage) { + ctx := t.Context() + + now := time.Now().UTC().Round(time.Millisecond) + + u1 := storage.UserIdentity{ + UserID: "user1", + ConnectorID: "conn1", + Claims: storage.Claims{ + UserID: "user1", + Username: "jane", + Email: "jane@example.com", + EmailVerified: true, + Groups: []string{"a", "b"}, + }, + Consents: make(map[string][]string), + CreatedAt: now, + LastLogin: now, + BlockedUntil: time.Unix(0, 0).UTC(), + } + + // Create with empty Consents map. + if err := s.CreateUserIdentity(ctx, u1); err != nil { + t.Fatalf("create user identity: %v", err) + } + + // Duplicate create should return ErrAlreadyExists. + err := s.CreateUserIdentity(ctx, u1) + mustBeErrAlreadyExists(t, "user identity", err) + + // Get and compare. + got, err := s.GetUserIdentity(ctx, u1.UserID, u1.ConnectorID) + if err != nil { + t.Fatalf("get user identity: %v", err) + } + + got.CreatedAt = got.CreatedAt.UTC().Round(time.Millisecond) + got.LastLogin = got.LastLogin.UTC().Round(time.Millisecond) + got.BlockedUntil = got.BlockedUntil.UTC().Round(time.Millisecond) + u1.BlockedUntil = u1.BlockedUntil.UTC().Round(time.Millisecond) + if diff := pretty.Compare(u1, got); diff != "" { + t.Errorf("user identity retrieved from storage did not match: %s", diff) + } + + // Update: add consent entry. + if err := s.UpdateUserIdentity(ctx, u1.UserID, u1.ConnectorID, func(old storage.UserIdentity) (storage.UserIdentity, error) { + old.Consents["client1"] = []string{"openid", "email"} + return old, nil + }); err != nil { + t.Fatalf("update user identity: %v", err) + } + + // Get and verify updated consents. + got, err = s.GetUserIdentity(ctx, u1.UserID, u1.ConnectorID) + if err != nil { + t.Fatalf("get user identity after update: %v", err) + } + wantConsents := map[string][]string{"client1": {"openid", "email"}} + if diff := pretty.Compare(wantConsents, got.Consents); diff != "" { + t.Errorf("user identity consents did not match after update: %s", diff) + } + + // List and verify. + identities, err := s.ListUserIdentities(ctx) + if err != nil { + t.Fatalf("list user identities: %v", err) + } + if len(identities) != 1 { + t.Fatalf("expected 1 user identity, got %d", len(identities)) + } + + // Delete. + if err := s.DeleteUserIdentity(ctx, u1.UserID, u1.ConnectorID); err != nil { + t.Fatalf("delete user identity: %v", err) + } + + // Get deleted should return ErrNotFound. + _, err = s.GetUserIdentity(ctx, u1.UserID, u1.ConnectorID) + mustBeErrNotFound(t, "user identity", err) +} + +func testAuthSessionCRUD(t *testing.T, s storage.Storage) { + ctx := t.Context() + + now := time.Now().UTC().Round(time.Millisecond) + + session := storage.AuthSession{ + ID: storage.NewID(), + ClientStates: map[string]*storage.ClientAuthState{ + "client1": { + UserID: "user1", + ConnectorID: "conn1", + Active: true, + ExpiresAt: now.Add(24 * time.Hour), + LastActivity: now, + LastTokenIssuedAt: now, + }, + }, + CreatedAt: now, + LastActivity: now, + IPAddress: "192.168.1.1", + UserAgent: "TestBrowser/1.0", + } + + // Create. + if err := s.CreateAuthSession(ctx, session); err != nil { + t.Fatalf("create auth session: %v", err) + } + + // Duplicate create should return ErrAlreadyExists. + err := s.CreateAuthSession(ctx, session) + mustBeErrAlreadyExists(t, "auth session", err) + + // Get and compare. + got, err := s.GetAuthSession(ctx, session.ID) + if err != nil { + t.Fatalf("get auth session: %v", err) + } + + got.CreatedAt = got.CreatedAt.UTC().Round(time.Millisecond) + got.LastActivity = got.LastActivity.UTC().Round(time.Millisecond) + for _, cs := range got.ClientStates { + cs.ExpiresAt = cs.ExpiresAt.UTC().Round(time.Millisecond) + cs.LastActivity = cs.LastActivity.UTC().Round(time.Millisecond) + cs.LastTokenIssuedAt = cs.LastTokenIssuedAt.UTC().Round(time.Millisecond) + } + if diff := pretty.Compare(session, got); diff != "" { + t.Errorf("auth session retrieved from storage did not match: %s", diff) + } + + // Update: add a new client state. + newNow := now.Add(time.Minute) + if err := s.UpdateAuthSession(ctx, session.ID, func(old storage.AuthSession) (storage.AuthSession, error) { + old.ClientStates["client2"] = &storage.ClientAuthState{ + UserID: "user2", + ConnectorID: "conn2", + Active: true, + ExpiresAt: newNow.Add(24 * time.Hour), + LastActivity: newNow, + } + old.LastActivity = newNow + return old, nil + }); err != nil { + t.Fatalf("update auth session: %v", err) + } + + // Get and verify update. + got, err = s.GetAuthSession(ctx, session.ID) + if err != nil { + t.Fatalf("get auth session after update: %v", err) + } + if len(got.ClientStates) != 2 { + t.Fatalf("expected 2 client states, got %d", len(got.ClientStates)) + } + if got.ClientStates["client2"] == nil { + t.Fatal("expected client2 state to exist") + } + if got.ClientStates["client2"].UserID != "user2" { + t.Errorf("expected client2 user_id to be user2, got %s", got.ClientStates["client2"].UserID) + } + + // List and verify. + sessions, err := s.ListAuthSessions(ctx) + if err != nil { + t.Fatalf("list auth sessions: %v", err) + } + if len(sessions) != 1 { + t.Fatalf("expected 1 auth session, got %d", len(sessions)) + } + + // Delete. + if err := s.DeleteAuthSession(ctx, session.ID); err != nil { + t.Fatalf("delete auth session: %v", err) + } + + // Get deleted should return ErrNotFound. + _, err = s.GetAuthSession(ctx, session.ID) + mustBeErrNotFound(t, "auth session", err) +} diff --git a/storage/conformance/transactions.go b/storage/conformance/transactions.go index 1383c8e7..5889a024 100644 --- a/storage/conformance/transactions.go +++ b/storage/conformance/transactions.go @@ -2,9 +2,12 @@ package conformance import ( "context" + "strconv" + "sync" "testing" "time" + "github.com/stretchr/testify/require" "golang.org/x/crypto/bcrypt" "github.com/dexidp/dex/storage" @@ -26,6 +29,16 @@ func RunTransactionTests(t *testing.T, newStorage func(t *testing.T) storage.Sto }) } +// RunConcurrencyTests runs tests that verify storage implementations handle +// high-contention parallel updates correctly. Unlike RunTransactionTests, +// these tests use real goroutine-based parallelism rather than nested calls, +// and are safe to run on all storage backends (including those with non-reentrant locks). +func RunConcurrencyTests(t *testing.T, newStorage func(t *testing.T) storage.Storage) { + runTests(t, newStorage, []subTest{ + {"RefreshTokenParallelUpdate", testRefreshTokenParallelUpdate}, + }) +} + func testClientConcurrentUpdate(t *testing.T, s storage.Storage) { ctx := t.Context() c := storage.Client{ @@ -180,3 +193,111 @@ func testKeysConcurrentUpdate(t *testing.T, s storage.Storage) { } } } + +// testRefreshTokenParallelUpdate tests that many parallel updates to the same +// refresh token are serialized correctly by the storage and no updates are lost. +// +// Each goroutine atomically increments a counter stored in the Token field. +// After all goroutines finish, the counter must equal the number of successful updates. +// A mismatch indicates lost updates due to broken atomicity. +func testRefreshTokenParallelUpdate(t *testing.T, s storage.Storage) { + ctx := t.Context() + + id := storage.NewID() + refresh := storage.RefreshToken{ + ID: id, + Token: "0", + Nonce: "foo", + ClientID: "client_id", + ConnectorID: "connector_id", + Scopes: []string{"openid"}, + CreatedAt: time.Now().UTC().Round(time.Millisecond), + LastUsed: time.Now().UTC().Round(time.Millisecond), + Claims: storage.Claims{ + UserID: "1", + Username: "jane", + Email: "jane@example.com", + }, + } + + require.NoError(t, s.CreateRefresh(ctx, refresh)) + + const numWorkers = 100 + + type updateResult struct { + err error + newToken string // token value written by this worker's updater + } + + var wg sync.WaitGroup + results := make([]updateResult, numWorkers) + + for i := range numWorkers { + wg.Add(1) + go func() { + defer wg.Done() + results[i].err = s.UpdateRefreshToken(ctx, id, func(old storage.RefreshToken) (storage.RefreshToken, error) { + counter, _ := strconv.Atoi(old.Token) + old.Token = strconv.Itoa(counter + 1) + results[i].newToken = old.Token + return old, nil + }) + }() + } + + wg.Wait() + + errCounts := map[string]int{} + var successes int + writtenTokens := map[string]int{} + for _, r := range results { + if r.err == nil { + successes++ + writtenTokens[r.newToken]++ + } else { + errCounts[r.err.Error()]++ + } + } + + for msg, count := range errCounts { + t.Logf("error (x%d): %s", count, msg) + } + + stored, err := s.GetRefresh(ctx, id) + require.NoError(t, err) + + counter, err := strconv.Atoi(stored.Token) + require.NoError(t, err) + + t.Logf("parallel refresh token updates: %d/%d succeeded, final counter: %d", successes, numWorkers, counter) + + if successes < numWorkers { + t.Errorf("not all updates succeeded: %d/%d (some failed under contention)", successes, numWorkers) + } + + if counter != successes { + t.Errorf("lost updates detected: %d successful updates but counter is %d", successes, counter) + } + + // Each successful updater must have seen a unique counter value. + // Duplicates would mean two updaters read the same state — a sign of broken atomicity. + for token, count := range writtenTokens { + if count > 1 { + t.Errorf("token %q was written by %d updaters — concurrent updaters saw the same state", token, count) + } + } + + // Successful updaters must have produced a contiguous sequence 1..N. + // A gap would mean an updater saw stale state even though the write succeeded. + for i := 1; i <= successes; i++ { + if writtenTokens[strconv.Itoa(i)] != 1 { + t.Errorf("expected token %q to be written exactly once, got %d", strconv.Itoa(i), writtenTokens[strconv.Itoa(i)]) + } + } + + // The token stored in the database must match the highest value written. + // This confirms that the last successful update is the one persisted. + if stored.Token != strconv.Itoa(successes) { + t.Errorf("stored token %q does not match expected final value %q", stored.Token, strconv.Itoa(successes)) + } +} diff --git a/storage/ent/client/authsession.go b/storage/ent/client/authsession.go new file mode 100644 index 00000000..439bdbe3 --- /dev/null +++ b/storage/ent/client/authsession.go @@ -0,0 +1,108 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/dexidp/dex/storage" +) + +// CreateAuthSession saves provided auth session into the database. +func (d *Database) CreateAuthSession(ctx context.Context, session storage.AuthSession) error { + if session.ClientStates == nil { + session.ClientStates = make(map[string]*storage.ClientAuthState) + } + encodedStates, err := json.Marshal(session.ClientStates) + if err != nil { + return fmt.Errorf("encode client states auth session: %w", err) + } + + _, err = d.client.AuthSession.Create(). + SetID(session.ID). + SetClientStates(encodedStates). + SetCreatedAt(session.CreatedAt). + SetLastActivity(session.LastActivity). + SetIPAddress(session.IPAddress). + SetUserAgent(session.UserAgent). + Save(ctx) + if err != nil { + return convertDBError("create auth session: %w", err) + } + return nil +} + +// GetAuthSession extracts an auth session from the database by session ID. +func (d *Database) GetAuthSession(ctx context.Context, sessionID string) (storage.AuthSession, error) { + authSession, err := d.client.AuthSession.Get(ctx, sessionID) + if err != nil { + return storage.AuthSession{}, convertDBError("get auth session: %w", err) + } + return toStorageAuthSession(authSession), nil +} + +// ListAuthSessions extracts all auth sessions from the database. +func (d *Database) ListAuthSessions(ctx context.Context) ([]storage.AuthSession, error) { + authSessions, err := d.client.AuthSession.Query().All(ctx) + if err != nil { + return nil, convertDBError("list auth sessions: %w", err) + } + + storageAuthSessions := make([]storage.AuthSession, 0, len(authSessions)) + for _, s := range authSessions { + storageAuthSessions = append(storageAuthSessions, toStorageAuthSession(s)) + } + return storageAuthSessions, nil +} + +// DeleteAuthSession deletes an auth session from the database by session ID. +func (d *Database) DeleteAuthSession(ctx context.Context, sessionID string) error { + err := d.client.AuthSession.DeleteOneID(sessionID).Exec(ctx) + if err != nil { + return convertDBError("delete auth session: %w", err) + } + return nil +} + +// UpdateAuthSession changes an auth session using an updater function. +func (d *Database) UpdateAuthSession(ctx context.Context, sessionID string, updater func(s storage.AuthSession) (storage.AuthSession, error)) error { + tx, err := d.BeginTx(ctx) + if err != nil { + return convertDBError("update auth session tx: %w", err) + } + + authSession, err := tx.AuthSession.Get(ctx, sessionID) + if err != nil { + return rollback(tx, "update auth session database: %w", err) + } + + newSession, err := updater(toStorageAuthSession(authSession)) + if err != nil { + return rollback(tx, "update auth session updating: %w", err) + } + + if newSession.ClientStates == nil { + newSession.ClientStates = make(map[string]*storage.ClientAuthState) + } + + encodedStates, err := json.Marshal(newSession.ClientStates) + if err != nil { + return rollback(tx, "encode client states auth session: %w", err) + } + + _, err = tx.AuthSession.UpdateOneID(sessionID). + SetClientStates(encodedStates). + SetLastActivity(newSession.LastActivity). + SetIPAddress(newSession.IPAddress). + SetUserAgent(newSession.UserAgent). + Save(ctx) + if err != nil { + return rollback(tx, "update auth session updating: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update auth session commit: %w", err) + } + + return nil +} diff --git a/storage/ent/client/client.go b/storage/ent/client/client.go index 1957a76a..a4f0d942 100644 --- a/storage/ent/client/client.go +++ b/storage/ent/client/client.go @@ -16,6 +16,7 @@ func (d *Database) CreateClient(ctx context.Context, client storage.Client) erro SetLogoURL(client.LogoURL). SetRedirectUris(client.RedirectURIs). SetTrustedPeers(client.TrustedPeers). + SetAllowedConnectors(client.AllowedConnectors). Save(ctx) if err != nil { return convertDBError("create oauth2 client: %w", err) @@ -79,6 +80,7 @@ func (d *Database) UpdateClient(ctx context.Context, id string, updater func(old SetLogoURL(newClient.LogoURL). SetRedirectUris(newClient.RedirectURIs). SetTrustedPeers(newClient.TrustedPeers). + SetAllowedConnectors(newClient.AllowedConnectors). Save(ctx) if err != nil { return rollback(tx, "update client uploading: %w", err) diff --git a/storage/ent/client/connector.go b/storage/ent/client/connector.go index f0cff8ba..21e7aec2 100644 --- a/storage/ent/client/connector.go +++ b/storage/ent/client/connector.go @@ -14,6 +14,7 @@ func (d *Database) CreateConnector(ctx context.Context, connector storage.Connec SetType(connector.Type). SetResourceVersion(connector.ResourceVersion). SetConfig(connector.Config). + SetGrantTypes(connector.GrantTypes). Save(ctx) if err != nil { return convertDBError("create connector: %w", err) @@ -75,6 +76,7 @@ func (d *Database) UpdateConnector(ctx context.Context, id string, updater func( SetType(newConnector.Type). SetResourceVersion(newConnector.ResourceVersion). SetConfig(newConnector.Config). + SetGrantTypes(newConnector.GrantTypes). Save(ctx) if err != nil { return rollback(tx, "update connector uploading: %w", err) diff --git a/storage/ent/client/offlinesession.go b/storage/ent/client/offlinesession.go index 9d608cb6..c8f18433 100644 --- a/storage/ent/client/offlinesession.go +++ b/storage/ent/client/offlinesession.go @@ -15,7 +15,7 @@ func (d *Database) CreateOfflineSessions(ctx context.Context, session storage.Of return fmt.Errorf("encode refresh offline session: %w", err) } - id := offlineSessionID(session.UserID, session.ConnID, d.hasher) + id := compositeKeyID(session.UserID, session.ConnID, d.hasher) _, err = d.client.OfflineSession.Create(). SetID(id). SetUserID(session.UserID). @@ -31,7 +31,7 @@ func (d *Database) CreateOfflineSessions(ctx context.Context, session storage.Of // GetOfflineSessions extracts an offline session from the database by user id and connector id. func (d *Database) GetOfflineSessions(ctx context.Context, userID, connID string) (storage.OfflineSessions, error) { - id := offlineSessionID(userID, connID, d.hasher) + id := compositeKeyID(userID, connID, d.hasher) offlineSession, err := d.client.OfflineSession.Get(ctx, id) if err != nil { @@ -42,7 +42,7 @@ func (d *Database) GetOfflineSessions(ctx context.Context, userID, connID string // DeleteOfflineSessions deletes an offline session from the database by user id and connector id. func (d *Database) DeleteOfflineSessions(ctx context.Context, userID, connID string) error { - id := offlineSessionID(userID, connID, d.hasher) + id := compositeKeyID(userID, connID, d.hasher) err := d.client.OfflineSession.DeleteOneID(id).Exec(ctx) if err != nil { @@ -53,7 +53,7 @@ func (d *Database) DeleteOfflineSessions(ctx context.Context, userID, connID str // UpdateOfflineSessions changes an offline session by user id and connector id using an updater function. func (d *Database) UpdateOfflineSessions(ctx context.Context, userID string, connID string, updater func(s storage.OfflineSessions) (storage.OfflineSessions, error)) error { - id := offlineSessionID(userID, connID, d.hasher) + id := compositeKeyID(userID, connID, d.hasher) tx, err := d.BeginTx(ctx) if err != nil { diff --git a/storage/ent/client/types.go b/storage/ent/client/types.go index ab8ee83f..f8e99c4a 100644 --- a/storage/ent/client/types.go +++ b/storage/ent/client/types.go @@ -76,22 +76,24 @@ func toStorageAuthCode(a *db.AuthCode) storage.AuthCode { func toStorageClient(c *db.OAuth2Client) storage.Client { return storage.Client{ - ID: c.ID, - Secret: c.Secret, - RedirectURIs: c.RedirectUris, - TrustedPeers: c.TrustedPeers, - Public: c.Public, - Name: c.Name, - LogoURL: c.LogoURL, + ID: c.ID, + Secret: c.Secret, + RedirectURIs: c.RedirectUris, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + Name: c.Name, + LogoURL: c.LogoURL, + AllowedConnectors: c.AllowedConnectors, } } func toStorageConnector(c *db.Connector) storage.Connector { return storage.Connector{ - ID: c.ID, - Type: c.Type, - Name: c.Name, - Config: c.Config, + ID: c.ID, + Type: c.Type, + Name: c.Name, + Config: c.Config, + GrantTypes: c.GrantTypes, } } @@ -161,6 +163,61 @@ func toStorageDeviceRequest(r *db.DeviceRequest) storage.DeviceRequest { } } +func toStorageUserIdentity(u *db.UserIdentity) storage.UserIdentity { + s := storage.UserIdentity{ + UserID: u.UserID, + ConnectorID: u.ConnectorID, + Claims: storage.Claims{ + UserID: u.ClaimsUserID, + Username: u.ClaimsUsername, + PreferredUsername: u.ClaimsPreferredUsername, + Email: u.ClaimsEmail, + EmailVerified: u.ClaimsEmailVerified, + Groups: u.ClaimsGroups, + }, + CreatedAt: u.CreatedAt, + LastLogin: u.LastLogin, + BlockedUntil: u.BlockedUntil, + } + + if u.Consents != nil { + if err := json.Unmarshal(u.Consents, &s.Consents); err != nil { + // Correctness of json structure is guaranteed on uploading + panic(err) + } + if s.Consents == nil { + // Ensure Consents is non-nil even if JSON was "null". + s.Consents = make(map[string][]string) + } + } else { + // Server code assumes this will be non-nil. + s.Consents = make(map[string][]string) + } + return s +} + +func toStorageAuthSession(s *db.AuthSession) storage.AuthSession { + result := storage.AuthSession{ + ID: s.ID, + CreatedAt: s.CreatedAt, + LastActivity: s.LastActivity, + IPAddress: s.IPAddress, + UserAgent: s.UserAgent, + } + + if s.ClientStates != nil { + if err := json.Unmarshal(s.ClientStates, &result.ClientStates); err != nil { + panic(err) + } + if result.ClientStates == nil { + result.ClientStates = make(map[string]*storage.ClientAuthState) + } + } else { + result.ClientStates = make(map[string]*storage.ClientAuthState) + } + return result +} + func toStorageDeviceToken(t *db.DeviceToken) storage.DeviceToken { return storage.DeviceToken{ DeviceCode: t.DeviceCode, diff --git a/storage/ent/client/useridentity.go b/storage/ent/client/useridentity.go new file mode 100644 index 00000000..1cf87919 --- /dev/null +++ b/storage/ent/client/useridentity.go @@ -0,0 +1,130 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/dexidp/dex/storage" +) + +// CreateUserIdentity saves provided user identity into the database. +func (d *Database) CreateUserIdentity(ctx context.Context, identity storage.UserIdentity) error { + if identity.Consents == nil { + identity.Consents = make(map[string][]string) + } + encodedConsents, err := json.Marshal(identity.Consents) + if err != nil { + return fmt.Errorf("encode consents user identity: %w", err) + } + + id := compositeKeyID(identity.UserID, identity.ConnectorID, d.hasher) + _, err = d.client.UserIdentity.Create(). + SetID(id). + SetUserID(identity.UserID). + SetConnectorID(identity.ConnectorID). + SetClaimsUserID(identity.Claims.UserID). + SetClaimsUsername(identity.Claims.Username). + SetClaimsPreferredUsername(identity.Claims.PreferredUsername). + SetClaimsEmail(identity.Claims.Email). + SetClaimsEmailVerified(identity.Claims.EmailVerified). + SetClaimsGroups(identity.Claims.Groups). + SetConsents(encodedConsents). + SetCreatedAt(identity.CreatedAt). + SetLastLogin(identity.LastLogin). + SetBlockedUntil(identity.BlockedUntil). + Save(ctx) + if err != nil { + return convertDBError("create user identity: %w", err) + } + return nil +} + +// GetUserIdentity extracts a user identity from the database by user id and connector id. +func (d *Database) GetUserIdentity(ctx context.Context, userID, connectorID string) (storage.UserIdentity, error) { + id := compositeKeyID(userID, connectorID, d.hasher) + + userIdentity, err := d.client.UserIdentity.Get(ctx, id) + if err != nil { + return storage.UserIdentity{}, convertDBError("get user identity: %w", err) + } + return toStorageUserIdentity(userIdentity), nil +} + +// DeleteUserIdentity deletes a user identity from the database by user id and connector id. +func (d *Database) DeleteUserIdentity(ctx context.Context, userID, connectorID string) error { + id := compositeKeyID(userID, connectorID, d.hasher) + + err := d.client.UserIdentity.DeleteOneID(id).Exec(ctx) + if err != nil { + return convertDBError("delete user identity: %w", err) + } + return nil +} + +// UpdateUserIdentity changes a user identity by user id and connector id using an updater function. +func (d *Database) UpdateUserIdentity(ctx context.Context, userID string, connectorID string, updater func(u storage.UserIdentity) (storage.UserIdentity, error)) error { + id := compositeKeyID(userID, connectorID, d.hasher) + + tx, err := d.BeginTx(ctx) + if err != nil { + return convertDBError("update user identity tx: %w", err) + } + + userIdentity, err := tx.UserIdentity.Get(ctx, id) + if err != nil { + return rollback(tx, "update user identity database: %w", err) + } + + newUserIdentity, err := updater(toStorageUserIdentity(userIdentity)) + if err != nil { + return rollback(tx, "update user identity updating: %w", err) + } + + if newUserIdentity.Consents == nil { + newUserIdentity.Consents = make(map[string][]string) + } + + encodedConsents, err := json.Marshal(newUserIdentity.Consents) + if err != nil { + return rollback(tx, "encode consents user identity: %w", err) + } + + _, err = tx.UserIdentity.UpdateOneID(id). + SetUserID(newUserIdentity.UserID). + SetConnectorID(newUserIdentity.ConnectorID). + SetClaimsUserID(newUserIdentity.Claims.UserID). + SetClaimsUsername(newUserIdentity.Claims.Username). + SetClaimsPreferredUsername(newUserIdentity.Claims.PreferredUsername). + SetClaimsEmail(newUserIdentity.Claims.Email). + SetClaimsEmailVerified(newUserIdentity.Claims.EmailVerified). + SetClaimsGroups(newUserIdentity.Claims.Groups). + SetConsents(encodedConsents). + SetCreatedAt(newUserIdentity.CreatedAt). + SetLastLogin(newUserIdentity.LastLogin). + SetBlockedUntil(newUserIdentity.BlockedUntil). + Save(ctx) + if err != nil { + return rollback(tx, "update user identity uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update user identity commit: %w", err) + } + + return nil +} + +// ListUserIdentities lists all user identities in the database. +func (d *Database) ListUserIdentities(ctx context.Context) ([]storage.UserIdentity, error) { + userIdentities, err := d.client.UserIdentity.Query().All(ctx) + if err != nil { + return nil, convertDBError("list user identities: %w", err) + } + + storageUserIdentities := make([]storage.UserIdentity, 0, len(userIdentities)) + for _, u := range userIdentities { + storageUserIdentities = append(storageUserIdentities, toStorageUserIdentity(u)) + } + return storageUserIdentities, nil +} diff --git a/storage/ent/client/utils.go b/storage/ent/client/utils.go index 65c037ac..950e612c 100644 --- a/storage/ent/client/utils.go +++ b/storage/ent/client/utils.go @@ -32,13 +32,13 @@ func convertDBError(t string, err error) error { return fmt.Errorf(t, err) } -// compose hashed id from user and connection id to use it as primary key +// compositeKeyID composes a hashed id from two key parts to use as primary key. // ent doesn't support multi-key primary yet // https://github.com/facebook/ent/issues/400 -func offlineSessionID(userID string, connID string, hasher func() hash.Hash) string { +func compositeKeyID(first string, second string, hasher func() hash.Hash) string { h := hasher() - h.Write([]byte(userID)) - h.Write([]byte(connID)) + h.Write([]byte(first)) + h.Write([]byte(second)) return fmt.Sprintf("%x", h.Sum(nil)) } diff --git a/storage/ent/db/authsession.go b/storage/ent/db/authsession.go new file mode 100644 index 00000000..b81479c7 --- /dev/null +++ b/storage/ent/db/authsession.go @@ -0,0 +1,150 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/authsession" +) + +// AuthSession is the model entity for the AuthSession schema. +type AuthSession struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // ClientStates holds the value of the "client_states" field. + ClientStates []byte `json:"client_states,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // LastActivity holds the value of the "last_activity" field. + LastActivity time.Time `json:"last_activity,omitempty"` + // IPAddress holds the value of the "ip_address" field. + IPAddress string `json:"ip_address,omitempty"` + // UserAgent holds the value of the "user_agent" field. + UserAgent string `json:"user_agent,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AuthSession) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case authsession.FieldClientStates: + values[i] = new([]byte) + case authsession.FieldID, authsession.FieldIPAddress, authsession.FieldUserAgent: + values[i] = new(sql.NullString) + case authsession.FieldCreatedAt, authsession.FieldLastActivity: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AuthSession fields. +func (_m *AuthSession) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case authsession.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + _m.ID = value.String + } + case authsession.FieldClientStates: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field client_states", values[i]) + } else if value != nil { + _m.ClientStates = *value + } + case authsession.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case authsession.FieldLastActivity: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_activity", values[i]) + } else if value.Valid { + _m.LastActivity = value.Time + } + case authsession.FieldIPAddress: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ip_address", values[i]) + } else if value.Valid { + _m.IPAddress = value.String + } + case authsession.FieldUserAgent: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field user_agent", values[i]) + } else if value.Valid { + _m.UserAgent = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AuthSession. +// This includes values selected through modifiers, order, etc. +func (_m *AuthSession) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this AuthSession. +// Note that you need to call AuthSession.Unwrap() before calling this method if this AuthSession +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *AuthSession) Update() *AuthSessionUpdateOne { + return NewAuthSessionClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the AuthSession entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *AuthSession) Unwrap() *AuthSession { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("db: AuthSession is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *AuthSession) String() string { + var builder strings.Builder + builder.WriteString("AuthSession(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("client_states=") + builder.WriteString(fmt.Sprintf("%v", _m.ClientStates)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("last_activity=") + builder.WriteString(_m.LastActivity.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("ip_address=") + builder.WriteString(_m.IPAddress) + builder.WriteString(", ") + builder.WriteString("user_agent=") + builder.WriteString(_m.UserAgent) + builder.WriteByte(')') + return builder.String() +} + +// AuthSessions is a parsable slice of AuthSession. +type AuthSessions []*AuthSession diff --git a/storage/ent/db/authsession/authsession.go b/storage/ent/db/authsession/authsession.go new file mode 100644 index 00000000..e2548f90 --- /dev/null +++ b/storage/ent/db/authsession/authsession.go @@ -0,0 +1,83 @@ +// Code generated by ent, DO NOT EDIT. + +package authsession + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the authsession type in the database. + Label = "auth_session" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldClientStates holds the string denoting the client_states field in the database. + FieldClientStates = "client_states" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldLastActivity holds the string denoting the last_activity field in the database. + FieldLastActivity = "last_activity" + // FieldIPAddress holds the string denoting the ip_address field in the database. + FieldIPAddress = "ip_address" + // FieldUserAgent holds the string denoting the user_agent field in the database. + FieldUserAgent = "user_agent" + // Table holds the table name of the authsession in the database. + Table = "auth_sessions" +) + +// Columns holds all SQL columns for authsession fields. +var Columns = []string{ + FieldID, + FieldClientStates, + FieldCreatedAt, + FieldLastActivity, + FieldIPAddress, + FieldUserAgent, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultIPAddress holds the default value on creation for the "ip_address" field. + DefaultIPAddress string + // DefaultUserAgent holds the default value on creation for the "user_agent" field. + DefaultUserAgent string + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the AuthSession queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByLastActivity orders the results by the last_activity field. +func ByLastActivity(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastActivity, opts...).ToFunc() +} + +// ByIPAddress orders the results by the ip_address field. +func ByIPAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPAddress, opts...).ToFunc() +} + +// ByUserAgent orders the results by the user_agent field. +func ByUserAgent(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserAgent, opts...).ToFunc() +} diff --git a/storage/ent/db/authsession/where.go b/storage/ent/db/authsession/where.go new file mode 100644 index 00000000..a4f52894 --- /dev/null +++ b/storage/ent/db/authsession/where.go @@ -0,0 +1,355 @@ +// Code generated by ent, DO NOT EDIT. + +package authsession + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldContainsFold(FieldID, id)) +} + +// ClientStates applies equality check predicate on the "client_states" field. It's identical to ClientStatesEQ. +func ClientStates(v []byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldClientStates, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldCreatedAt, v)) +} + +// LastActivity applies equality check predicate on the "last_activity" field. It's identical to LastActivityEQ. +func LastActivity(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldLastActivity, v)) +} + +// IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ. +func IPAddress(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldIPAddress, v)) +} + +// UserAgent applies equality check predicate on the "user_agent" field. It's identical to UserAgentEQ. +func UserAgent(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldUserAgent, v)) +} + +// ClientStatesEQ applies the EQ predicate on the "client_states" field. +func ClientStatesEQ(v []byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldClientStates, v)) +} + +// ClientStatesNEQ applies the NEQ predicate on the "client_states" field. +func ClientStatesNEQ(v []byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNEQ(FieldClientStates, v)) +} + +// ClientStatesIn applies the In predicate on the "client_states" field. +func ClientStatesIn(vs ...[]byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldIn(FieldClientStates, vs...)) +} + +// ClientStatesNotIn applies the NotIn predicate on the "client_states" field. +func ClientStatesNotIn(vs ...[]byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNotIn(FieldClientStates, vs...)) +} + +// ClientStatesGT applies the GT predicate on the "client_states" field. +func ClientStatesGT(v []byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGT(FieldClientStates, v)) +} + +// ClientStatesGTE applies the GTE predicate on the "client_states" field. +func ClientStatesGTE(v []byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGTE(FieldClientStates, v)) +} + +// ClientStatesLT applies the LT predicate on the "client_states" field. +func ClientStatesLT(v []byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLT(FieldClientStates, v)) +} + +// ClientStatesLTE applies the LTE predicate on the "client_states" field. +func ClientStatesLTE(v []byte) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLTE(FieldClientStates, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLTE(FieldCreatedAt, v)) +} + +// LastActivityEQ applies the EQ predicate on the "last_activity" field. +func LastActivityEQ(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldLastActivity, v)) +} + +// LastActivityNEQ applies the NEQ predicate on the "last_activity" field. +func LastActivityNEQ(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNEQ(FieldLastActivity, v)) +} + +// LastActivityIn applies the In predicate on the "last_activity" field. +func LastActivityIn(vs ...time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldIn(FieldLastActivity, vs...)) +} + +// LastActivityNotIn applies the NotIn predicate on the "last_activity" field. +func LastActivityNotIn(vs ...time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNotIn(FieldLastActivity, vs...)) +} + +// LastActivityGT applies the GT predicate on the "last_activity" field. +func LastActivityGT(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGT(FieldLastActivity, v)) +} + +// LastActivityGTE applies the GTE predicate on the "last_activity" field. +func LastActivityGTE(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGTE(FieldLastActivity, v)) +} + +// LastActivityLT applies the LT predicate on the "last_activity" field. +func LastActivityLT(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLT(FieldLastActivity, v)) +} + +// LastActivityLTE applies the LTE predicate on the "last_activity" field. +func LastActivityLTE(v time.Time) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLTE(FieldLastActivity, v)) +} + +// IPAddressEQ applies the EQ predicate on the "ip_address" field. +func IPAddressEQ(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldIPAddress, v)) +} + +// IPAddressNEQ applies the NEQ predicate on the "ip_address" field. +func IPAddressNEQ(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNEQ(FieldIPAddress, v)) +} + +// IPAddressIn applies the In predicate on the "ip_address" field. +func IPAddressIn(vs ...string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldIn(FieldIPAddress, vs...)) +} + +// IPAddressNotIn applies the NotIn predicate on the "ip_address" field. +func IPAddressNotIn(vs ...string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNotIn(FieldIPAddress, vs...)) +} + +// IPAddressGT applies the GT predicate on the "ip_address" field. +func IPAddressGT(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGT(FieldIPAddress, v)) +} + +// IPAddressGTE applies the GTE predicate on the "ip_address" field. +func IPAddressGTE(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGTE(FieldIPAddress, v)) +} + +// IPAddressLT applies the LT predicate on the "ip_address" field. +func IPAddressLT(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLT(FieldIPAddress, v)) +} + +// IPAddressLTE applies the LTE predicate on the "ip_address" field. +func IPAddressLTE(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLTE(FieldIPAddress, v)) +} + +// IPAddressContains applies the Contains predicate on the "ip_address" field. +func IPAddressContains(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldContains(FieldIPAddress, v)) +} + +// IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field. +func IPAddressHasPrefix(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldHasPrefix(FieldIPAddress, v)) +} + +// IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field. +func IPAddressHasSuffix(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldHasSuffix(FieldIPAddress, v)) +} + +// IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field. +func IPAddressEqualFold(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEqualFold(FieldIPAddress, v)) +} + +// IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field. +func IPAddressContainsFold(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldContainsFold(FieldIPAddress, v)) +} + +// UserAgentEQ applies the EQ predicate on the "user_agent" field. +func UserAgentEQ(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEQ(FieldUserAgent, v)) +} + +// UserAgentNEQ applies the NEQ predicate on the "user_agent" field. +func UserAgentNEQ(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNEQ(FieldUserAgent, v)) +} + +// UserAgentIn applies the In predicate on the "user_agent" field. +func UserAgentIn(vs ...string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldIn(FieldUserAgent, vs...)) +} + +// UserAgentNotIn applies the NotIn predicate on the "user_agent" field. +func UserAgentNotIn(vs ...string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldNotIn(FieldUserAgent, vs...)) +} + +// UserAgentGT applies the GT predicate on the "user_agent" field. +func UserAgentGT(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGT(FieldUserAgent, v)) +} + +// UserAgentGTE applies the GTE predicate on the "user_agent" field. +func UserAgentGTE(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldGTE(FieldUserAgent, v)) +} + +// UserAgentLT applies the LT predicate on the "user_agent" field. +func UserAgentLT(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLT(FieldUserAgent, v)) +} + +// UserAgentLTE applies the LTE predicate on the "user_agent" field. +func UserAgentLTE(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldLTE(FieldUserAgent, v)) +} + +// UserAgentContains applies the Contains predicate on the "user_agent" field. +func UserAgentContains(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldContains(FieldUserAgent, v)) +} + +// UserAgentHasPrefix applies the HasPrefix predicate on the "user_agent" field. +func UserAgentHasPrefix(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldHasPrefix(FieldUserAgent, v)) +} + +// UserAgentHasSuffix applies the HasSuffix predicate on the "user_agent" field. +func UserAgentHasSuffix(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldHasSuffix(FieldUserAgent, v)) +} + +// UserAgentEqualFold applies the EqualFold predicate on the "user_agent" field. +func UserAgentEqualFold(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldEqualFold(FieldUserAgent, v)) +} + +// UserAgentContainsFold applies the ContainsFold predicate on the "user_agent" field. +func UserAgentContainsFold(v string) predicate.AuthSession { + return predicate.AuthSession(sql.FieldContainsFold(FieldUserAgent, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AuthSession) predicate.AuthSession { + return predicate.AuthSession(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AuthSession) predicate.AuthSession { + return predicate.AuthSession(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AuthSession) predicate.AuthSession { + return predicate.AuthSession(sql.NotPredicates(p)) +} diff --git a/storage/ent/db/authsession_create.go b/storage/ent/db/authsession_create.go new file mode 100644 index 00000000..a680d675 --- /dev/null +++ b/storage/ent/db/authsession_create.go @@ -0,0 +1,282 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authsession" +) + +// AuthSessionCreate is the builder for creating a AuthSession entity. +type AuthSessionCreate struct { + config + mutation *AuthSessionMutation + hooks []Hook +} + +// SetClientStates sets the "client_states" field. +func (_c *AuthSessionCreate) SetClientStates(v []byte) *AuthSessionCreate { + _c.mutation.SetClientStates(v) + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AuthSessionCreate) SetCreatedAt(v time.Time) *AuthSessionCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetLastActivity sets the "last_activity" field. +func (_c *AuthSessionCreate) SetLastActivity(v time.Time) *AuthSessionCreate { + _c.mutation.SetLastActivity(v) + return _c +} + +// SetIPAddress sets the "ip_address" field. +func (_c *AuthSessionCreate) SetIPAddress(v string) *AuthSessionCreate { + _c.mutation.SetIPAddress(v) + return _c +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_c *AuthSessionCreate) SetNillableIPAddress(v *string) *AuthSessionCreate { + if v != nil { + _c.SetIPAddress(*v) + } + return _c +} + +// SetUserAgent sets the "user_agent" field. +func (_c *AuthSessionCreate) SetUserAgent(v string) *AuthSessionCreate { + _c.mutation.SetUserAgent(v) + return _c +} + +// SetNillableUserAgent sets the "user_agent" field if the given value is not nil. +func (_c *AuthSessionCreate) SetNillableUserAgent(v *string) *AuthSessionCreate { + if v != nil { + _c.SetUserAgent(*v) + } + return _c +} + +// SetID sets the "id" field. +func (_c *AuthSessionCreate) SetID(v string) *AuthSessionCreate { + _c.mutation.SetID(v) + return _c +} + +// Mutation returns the AuthSessionMutation object of the builder. +func (_c *AuthSessionCreate) Mutation() *AuthSessionMutation { + return _c.mutation +} + +// Save creates the AuthSession in the database. +func (_c *AuthSessionCreate) Save(ctx context.Context) (*AuthSession, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AuthSessionCreate) SaveX(ctx context.Context) *AuthSession { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AuthSessionCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AuthSessionCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AuthSessionCreate) defaults() { + if _, ok := _c.mutation.IPAddress(); !ok { + v := authsession.DefaultIPAddress + _c.mutation.SetIPAddress(v) + } + if _, ok := _c.mutation.UserAgent(); !ok { + v := authsession.DefaultUserAgent + _c.mutation.SetUserAgent(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AuthSessionCreate) check() error { + if _, ok := _c.mutation.ClientStates(); !ok { + return &ValidationError{Name: "client_states", err: errors.New(`db: missing required field "AuthSession.client_states"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "AuthSession.created_at"`)} + } + if _, ok := _c.mutation.LastActivity(); !ok { + return &ValidationError{Name: "last_activity", err: errors.New(`db: missing required field "AuthSession.last_activity"`)} + } + if _, ok := _c.mutation.IPAddress(); !ok { + return &ValidationError{Name: "ip_address", err: errors.New(`db: missing required field "AuthSession.ip_address"`)} + } + if _, ok := _c.mutation.UserAgent(); !ok { + return &ValidationError{Name: "user_agent", err: errors.New(`db: missing required field "AuthSession.user_agent"`)} + } + if v, ok := _c.mutation.ID(); ok { + if err := authsession.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "AuthSession.id": %w`, err)} + } + } + return nil +} + +func (_c *AuthSessionCreate) sqlSave(ctx context.Context) (*AuthSession, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected AuthSession.ID type: %T", _spec.ID.Value) + } + } + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *AuthSessionCreate) createSpec() (*AuthSession, *sqlgraph.CreateSpec) { + var ( + _node = &AuthSession{config: _c.config} + _spec = sqlgraph.NewCreateSpec(authsession.Table, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString)) + ) + if id, ok := _c.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := _c.mutation.ClientStates(); ok { + _spec.SetField(authsession.FieldClientStates, field.TypeBytes, value) + _node.ClientStates = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(authsession.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.LastActivity(); ok { + _spec.SetField(authsession.FieldLastActivity, field.TypeTime, value) + _node.LastActivity = value + } + if value, ok := _c.mutation.IPAddress(); ok { + _spec.SetField(authsession.FieldIPAddress, field.TypeString, value) + _node.IPAddress = value + } + if value, ok := _c.mutation.UserAgent(); ok { + _spec.SetField(authsession.FieldUserAgent, field.TypeString, value) + _node.UserAgent = value + } + return _node, _spec +} + +// AuthSessionCreateBulk is the builder for creating many AuthSession entities in bulk. +type AuthSessionCreateBulk struct { + config + err error + builders []*AuthSessionCreate +} + +// Save creates the AuthSession entities in the database. +func (_c *AuthSessionCreateBulk) Save(ctx context.Context) ([]*AuthSession, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*AuthSession, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AuthSessionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AuthSessionCreateBulk) SaveX(ctx context.Context) []*AuthSession { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AuthSessionCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AuthSessionCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/storage/ent/db/authsession_delete.go b/storage/ent/db/authsession_delete.go new file mode 100644 index 00000000..63116fb4 --- /dev/null +++ b/storage/ent/db/authsession_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authsession" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthSessionDelete is the builder for deleting a AuthSession entity. +type AuthSessionDelete struct { + config + hooks []Hook + mutation *AuthSessionMutation +} + +// Where appends a list predicates to the AuthSessionDelete builder. +func (_d *AuthSessionDelete) Where(ps ...predicate.AuthSession) *AuthSessionDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AuthSessionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AuthSessionDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AuthSessionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(authsession.Table, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AuthSessionDeleteOne is the builder for deleting a single AuthSession entity. +type AuthSessionDeleteOne struct { + _d *AuthSessionDelete +} + +// Where appends a list predicates to the AuthSessionDelete builder. +func (_d *AuthSessionDeleteOne) Where(ps ...predicate.AuthSession) *AuthSessionDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AuthSessionDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{authsession.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AuthSessionDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/storage/ent/db/authsession_query.go b/storage/ent/db/authsession_query.go new file mode 100644 index 00000000..dc3528f9 --- /dev/null +++ b/storage/ent/db/authsession_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authsession" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthSessionQuery is the builder for querying AuthSession entities. +type AuthSessionQuery struct { + config + ctx *QueryContext + order []authsession.OrderOption + inters []Interceptor + predicates []predicate.AuthSession + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AuthSessionQuery builder. +func (_q *AuthSessionQuery) Where(ps ...predicate.AuthSession) *AuthSessionQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AuthSessionQuery) Limit(limit int) *AuthSessionQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AuthSessionQuery) Offset(offset int) *AuthSessionQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AuthSessionQuery) Unique(unique bool) *AuthSessionQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AuthSessionQuery) Order(o ...authsession.OrderOption) *AuthSessionQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first AuthSession entity from the query. +// Returns a *NotFoundError when no AuthSession was found. +func (_q *AuthSessionQuery) First(ctx context.Context) (*AuthSession, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{authsession.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AuthSessionQuery) FirstX(ctx context.Context) *AuthSession { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AuthSession ID from the query. +// Returns a *NotFoundError when no AuthSession ID was found. +func (_q *AuthSessionQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{authsession.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *AuthSessionQuery) FirstIDX(ctx context.Context) string { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AuthSession entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AuthSession entity is found. +// Returns a *NotFoundError when no AuthSession entities are found. +func (_q *AuthSessionQuery) Only(ctx context.Context) (*AuthSession, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{authsession.Label} + default: + return nil, &NotSingularError{authsession.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AuthSessionQuery) OnlyX(ctx context.Context) *AuthSession { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AuthSession ID in the query. +// Returns a *NotSingularError when more than one AuthSession ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *AuthSessionQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{authsession.Label} + default: + err = &NotSingularError{authsession.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *AuthSessionQuery) OnlyIDX(ctx context.Context) string { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AuthSessions. +func (_q *AuthSessionQuery) All(ctx context.Context) ([]*AuthSession, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AuthSession, *AuthSessionQuery]() + return withInterceptors[[]*AuthSession](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AuthSessionQuery) AllX(ctx context.Context) []*AuthSession { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AuthSession IDs. +func (_q *AuthSessionQuery) IDs(ctx context.Context) (ids []string, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(authsession.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *AuthSessionQuery) IDsX(ctx context.Context) []string { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *AuthSessionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AuthSessionQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AuthSessionQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AuthSessionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AuthSessionQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AuthSessionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AuthSessionQuery) Clone() *AuthSessionQuery { + if _q == nil { + return nil + } + return &AuthSessionQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]authsession.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.AuthSession{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// ClientStates []byte `json:"client_states,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AuthSession.Query(). +// GroupBy(authsession.FieldClientStates). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (_q *AuthSessionQuery) GroupBy(field string, fields ...string) *AuthSessionGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AuthSessionGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = authsession.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// ClientStates []byte `json:"client_states,omitempty"` +// } +// +// client.AuthSession.Query(). +// Select(authsession.FieldClientStates). +// Scan(ctx, &v) +func (_q *AuthSessionQuery) Select(fields ...string) *AuthSessionSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AuthSessionSelect{AuthSessionQuery: _q} + sbuild.label = authsession.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AuthSessionSelect configured with the given aggregations. +func (_q *AuthSessionQuery) Aggregate(fns ...AggregateFunc) *AuthSessionSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AuthSessionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !authsession.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AuthSessionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthSession, error) { + var ( + nodes = []*AuthSession{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AuthSession).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AuthSession{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *AuthSessionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AuthSessionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(authsession.Table, authsession.Columns, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, authsession.FieldID) + for i := range fields { + if fields[i] != authsession.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AuthSessionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(authsession.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = authsession.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AuthSessionGroupBy is the group-by builder for AuthSession entities. +type AuthSessionGroupBy struct { + selector + build *AuthSessionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AuthSessionGroupBy) Aggregate(fns ...AggregateFunc) *AuthSessionGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AuthSessionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AuthSessionQuery, *AuthSessionGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AuthSessionGroupBy) sqlScan(ctx context.Context, root *AuthSessionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AuthSessionSelect is the builder for selecting fields of AuthSession entities. +type AuthSessionSelect struct { + *AuthSessionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AuthSessionSelect) Aggregate(fns ...AggregateFunc) *AuthSessionSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AuthSessionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AuthSessionQuery, *AuthSessionSelect](ctx, _s.AuthSessionQuery, _s, _s.inters, v) +} + +func (_s *AuthSessionSelect) sqlScan(ctx context.Context, root *AuthSessionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/storage/ent/db/authsession_update.go b/storage/ent/db/authsession_update.go new file mode 100644 index 00000000..e91999bd --- /dev/null +++ b/storage/ent/db/authsession_update.go @@ -0,0 +1,330 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authsession" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthSessionUpdate is the builder for updating AuthSession entities. +type AuthSessionUpdate struct { + config + hooks []Hook + mutation *AuthSessionMutation +} + +// Where appends a list predicates to the AuthSessionUpdate builder. +func (_u *AuthSessionUpdate) Where(ps ...predicate.AuthSession) *AuthSessionUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetClientStates sets the "client_states" field. +func (_u *AuthSessionUpdate) SetClientStates(v []byte) *AuthSessionUpdate { + _u.mutation.SetClientStates(v) + return _u +} + +// SetCreatedAt sets the "created_at" field. +func (_u *AuthSessionUpdate) SetCreatedAt(v time.Time) *AuthSessionUpdate { + _u.mutation.SetCreatedAt(v) + return _u +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_u *AuthSessionUpdate) SetNillableCreatedAt(v *time.Time) *AuthSessionUpdate { + if v != nil { + _u.SetCreatedAt(*v) + } + return _u +} + +// SetLastActivity sets the "last_activity" field. +func (_u *AuthSessionUpdate) SetLastActivity(v time.Time) *AuthSessionUpdate { + _u.mutation.SetLastActivity(v) + return _u +} + +// SetNillableLastActivity sets the "last_activity" field if the given value is not nil. +func (_u *AuthSessionUpdate) SetNillableLastActivity(v *time.Time) *AuthSessionUpdate { + if v != nil { + _u.SetLastActivity(*v) + } + return _u +} + +// SetIPAddress sets the "ip_address" field. +func (_u *AuthSessionUpdate) SetIPAddress(v string) *AuthSessionUpdate { + _u.mutation.SetIPAddress(v) + return _u +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_u *AuthSessionUpdate) SetNillableIPAddress(v *string) *AuthSessionUpdate { + if v != nil { + _u.SetIPAddress(*v) + } + return _u +} + +// SetUserAgent sets the "user_agent" field. +func (_u *AuthSessionUpdate) SetUserAgent(v string) *AuthSessionUpdate { + _u.mutation.SetUserAgent(v) + return _u +} + +// SetNillableUserAgent sets the "user_agent" field if the given value is not nil. +func (_u *AuthSessionUpdate) SetNillableUserAgent(v *string) *AuthSessionUpdate { + if v != nil { + _u.SetUserAgent(*v) + } + return _u +} + +// Mutation returns the AuthSessionMutation object of the builder. +func (_u *AuthSessionUpdate) Mutation() *AuthSessionMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AuthSessionUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AuthSessionUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AuthSessionUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AuthSessionUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +func (_u *AuthSessionUpdate) sqlSave(ctx context.Context) (_node int, err error) { + _spec := sqlgraph.NewUpdateSpec(authsession.Table, authsession.Columns, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.ClientStates(); ok { + _spec.SetField(authsession.FieldClientStates, field.TypeBytes, value) + } + if value, ok := _u.mutation.CreatedAt(); ok { + _spec.SetField(authsession.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.LastActivity(); ok { + _spec.SetField(authsession.FieldLastActivity, field.TypeTime, value) + } + if value, ok := _u.mutation.IPAddress(); ok { + _spec.SetField(authsession.FieldIPAddress, field.TypeString, value) + } + if value, ok := _u.mutation.UserAgent(); ok { + _spec.SetField(authsession.FieldUserAgent, field.TypeString, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{authsession.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AuthSessionUpdateOne is the builder for updating a single AuthSession entity. +type AuthSessionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AuthSessionMutation +} + +// SetClientStates sets the "client_states" field. +func (_u *AuthSessionUpdateOne) SetClientStates(v []byte) *AuthSessionUpdateOne { + _u.mutation.SetClientStates(v) + return _u +} + +// SetCreatedAt sets the "created_at" field. +func (_u *AuthSessionUpdateOne) SetCreatedAt(v time.Time) *AuthSessionUpdateOne { + _u.mutation.SetCreatedAt(v) + return _u +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_u *AuthSessionUpdateOne) SetNillableCreatedAt(v *time.Time) *AuthSessionUpdateOne { + if v != nil { + _u.SetCreatedAt(*v) + } + return _u +} + +// SetLastActivity sets the "last_activity" field. +func (_u *AuthSessionUpdateOne) SetLastActivity(v time.Time) *AuthSessionUpdateOne { + _u.mutation.SetLastActivity(v) + return _u +} + +// SetNillableLastActivity sets the "last_activity" field if the given value is not nil. +func (_u *AuthSessionUpdateOne) SetNillableLastActivity(v *time.Time) *AuthSessionUpdateOne { + if v != nil { + _u.SetLastActivity(*v) + } + return _u +} + +// SetIPAddress sets the "ip_address" field. +func (_u *AuthSessionUpdateOne) SetIPAddress(v string) *AuthSessionUpdateOne { + _u.mutation.SetIPAddress(v) + return _u +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_u *AuthSessionUpdateOne) SetNillableIPAddress(v *string) *AuthSessionUpdateOne { + if v != nil { + _u.SetIPAddress(*v) + } + return _u +} + +// SetUserAgent sets the "user_agent" field. +func (_u *AuthSessionUpdateOne) SetUserAgent(v string) *AuthSessionUpdateOne { + _u.mutation.SetUserAgent(v) + return _u +} + +// SetNillableUserAgent sets the "user_agent" field if the given value is not nil. +func (_u *AuthSessionUpdateOne) SetNillableUserAgent(v *string) *AuthSessionUpdateOne { + if v != nil { + _u.SetUserAgent(*v) + } + return _u +} + +// Mutation returns the AuthSessionMutation object of the builder. +func (_u *AuthSessionUpdateOne) Mutation() *AuthSessionMutation { + return _u.mutation +} + +// Where appends a list predicates to the AuthSessionUpdate builder. +func (_u *AuthSessionUpdateOne) Where(ps ...predicate.AuthSession) *AuthSessionUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AuthSessionUpdateOne) Select(field string, fields ...string) *AuthSessionUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated AuthSession entity. +func (_u *AuthSessionUpdateOne) Save(ctx context.Context) (*AuthSession, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AuthSessionUpdateOne) SaveX(ctx context.Context) *AuthSession { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AuthSessionUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AuthSessionUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +func (_u *AuthSessionUpdateOne) sqlSave(ctx context.Context) (_node *AuthSession, err error) { + _spec := sqlgraph.NewUpdateSpec(authsession.Table, authsession.Columns, sqlgraph.NewFieldSpec(authsession.FieldID, field.TypeString)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "AuthSession.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, authsession.FieldID) + for _, f := range fields { + if !authsession.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != authsession.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.ClientStates(); ok { + _spec.SetField(authsession.FieldClientStates, field.TypeBytes, value) + } + if value, ok := _u.mutation.CreatedAt(); ok { + _spec.SetField(authsession.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.LastActivity(); ok { + _spec.SetField(authsession.FieldLastActivity, field.TypeTime, value) + } + if value, ok := _u.mutation.IPAddress(); ok { + _spec.SetField(authsession.FieldIPAddress, field.TypeString, value) + } + if value, ok := _u.mutation.UserAgent(); ok { + _spec.SetField(authsession.FieldUserAgent, field.TypeString, value) + } + _node = &AuthSession{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{authsession.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/storage/ent/db/client.go b/storage/ent/db/client.go index 4fb28cb3..ffd70add 100644 --- a/storage/ent/db/client.go +++ b/storage/ent/db/client.go @@ -16,6 +16,7 @@ import ( "entgo.io/ent/dialect/sql" "github.com/dexidp/dex/storage/ent/db/authcode" "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/authsession" "github.com/dexidp/dex/storage/ent/db/connector" "github.com/dexidp/dex/storage/ent/db/devicerequest" "github.com/dexidp/dex/storage/ent/db/devicetoken" @@ -24,6 +25,7 @@ import ( "github.com/dexidp/dex/storage/ent/db/offlinesession" "github.com/dexidp/dex/storage/ent/db/password" "github.com/dexidp/dex/storage/ent/db/refreshtoken" + "github.com/dexidp/dex/storage/ent/db/useridentity" ) // Client is the client that holds all ent builders. @@ -35,6 +37,8 @@ type Client struct { AuthCode *AuthCodeClient // AuthRequest is the client for interacting with the AuthRequest builders. AuthRequest *AuthRequestClient + // AuthSession is the client for interacting with the AuthSession builders. + AuthSession *AuthSessionClient // Connector is the client for interacting with the Connector builders. Connector *ConnectorClient // DeviceRequest is the client for interacting with the DeviceRequest builders. @@ -51,6 +55,8 @@ type Client struct { Password *PasswordClient // RefreshToken is the client for interacting with the RefreshToken builders. RefreshToken *RefreshTokenClient + // UserIdentity is the client for interacting with the UserIdentity builders. + UserIdentity *UserIdentityClient } // NewClient creates a new client configured with the given options. @@ -64,6 +70,7 @@ func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) c.AuthCode = NewAuthCodeClient(c.config) c.AuthRequest = NewAuthRequestClient(c.config) + c.AuthSession = NewAuthSessionClient(c.config) c.Connector = NewConnectorClient(c.config) c.DeviceRequest = NewDeviceRequestClient(c.config) c.DeviceToken = NewDeviceTokenClient(c.config) @@ -72,6 +79,7 @@ func (c *Client) init() { c.OfflineSession = NewOfflineSessionClient(c.config) c.Password = NewPasswordClient(c.config) c.RefreshToken = NewRefreshTokenClient(c.config) + c.UserIdentity = NewUserIdentityClient(c.config) } type ( @@ -166,6 +174,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { config: cfg, AuthCode: NewAuthCodeClient(cfg), AuthRequest: NewAuthRequestClient(cfg), + AuthSession: NewAuthSessionClient(cfg), Connector: NewConnectorClient(cfg), DeviceRequest: NewDeviceRequestClient(cfg), DeviceToken: NewDeviceTokenClient(cfg), @@ -174,6 +183,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { OfflineSession: NewOfflineSessionClient(cfg), Password: NewPasswordClient(cfg), RefreshToken: NewRefreshTokenClient(cfg), + UserIdentity: NewUserIdentityClient(cfg), }, nil } @@ -195,6 +205,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) config: cfg, AuthCode: NewAuthCodeClient(cfg), AuthRequest: NewAuthRequestClient(cfg), + AuthSession: NewAuthSessionClient(cfg), Connector: NewConnectorClient(cfg), DeviceRequest: NewDeviceRequestClient(cfg), DeviceToken: NewDeviceTokenClient(cfg), @@ -203,6 +214,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) OfflineSession: NewOfflineSessionClient(cfg), Password: NewPasswordClient(cfg), RefreshToken: NewRefreshTokenClient(cfg), + UserIdentity: NewUserIdentityClient(cfg), }, nil } @@ -232,8 +244,9 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.AuthCode, c.AuthRequest, c.Connector, c.DeviceRequest, c.DeviceToken, c.Keys, - c.OAuth2Client, c.OfflineSession, c.Password, c.RefreshToken, + c.AuthCode, c.AuthRequest, c.AuthSession, c.Connector, c.DeviceRequest, + c.DeviceToken, c.Keys, c.OAuth2Client, c.OfflineSession, c.Password, + c.RefreshToken, c.UserIdentity, } { n.Use(hooks...) } @@ -243,8 +256,9 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.AuthCode, c.AuthRequest, c.Connector, c.DeviceRequest, c.DeviceToken, c.Keys, - c.OAuth2Client, c.OfflineSession, c.Password, c.RefreshToken, + c.AuthCode, c.AuthRequest, c.AuthSession, c.Connector, c.DeviceRequest, + c.DeviceToken, c.Keys, c.OAuth2Client, c.OfflineSession, c.Password, + c.RefreshToken, c.UserIdentity, } { n.Intercept(interceptors...) } @@ -257,6 +271,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.AuthCode.mutate(ctx, m) case *AuthRequestMutation: return c.AuthRequest.mutate(ctx, m) + case *AuthSessionMutation: + return c.AuthSession.mutate(ctx, m) case *ConnectorMutation: return c.Connector.mutate(ctx, m) case *DeviceRequestMutation: @@ -273,6 +289,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Password.mutate(ctx, m) case *RefreshTokenMutation: return c.RefreshToken.mutate(ctx, m) + case *UserIdentityMutation: + return c.UserIdentity.mutate(ctx, m) default: return nil, fmt.Errorf("db: unknown mutation type %T", m) } @@ -544,6 +562,139 @@ func (c *AuthRequestClient) mutate(ctx context.Context, m *AuthRequestMutation) } } +// AuthSessionClient is a client for the AuthSession schema. +type AuthSessionClient struct { + config +} + +// NewAuthSessionClient returns a client for the AuthSession from the given config. +func NewAuthSessionClient(c config) *AuthSessionClient { + return &AuthSessionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `authsession.Hooks(f(g(h())))`. +func (c *AuthSessionClient) Use(hooks ...Hook) { + c.hooks.AuthSession = append(c.hooks.AuthSession, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `authsession.Intercept(f(g(h())))`. +func (c *AuthSessionClient) Intercept(interceptors ...Interceptor) { + c.inters.AuthSession = append(c.inters.AuthSession, interceptors...) +} + +// Create returns a builder for creating a AuthSession entity. +func (c *AuthSessionClient) Create() *AuthSessionCreate { + mutation := newAuthSessionMutation(c.config, OpCreate) + return &AuthSessionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AuthSession entities. +func (c *AuthSessionClient) CreateBulk(builders ...*AuthSessionCreate) *AuthSessionCreateBulk { + return &AuthSessionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AuthSessionClient) MapCreateBulk(slice any, setFunc func(*AuthSessionCreate, int)) *AuthSessionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AuthSessionCreateBulk{err: fmt.Errorf("calling to AuthSessionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AuthSessionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AuthSessionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AuthSession. +func (c *AuthSessionClient) Update() *AuthSessionUpdate { + mutation := newAuthSessionMutation(c.config, OpUpdate) + return &AuthSessionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AuthSessionClient) UpdateOne(_m *AuthSession) *AuthSessionUpdateOne { + mutation := newAuthSessionMutation(c.config, OpUpdateOne, withAuthSession(_m)) + return &AuthSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AuthSessionClient) UpdateOneID(id string) *AuthSessionUpdateOne { + mutation := newAuthSessionMutation(c.config, OpUpdateOne, withAuthSessionID(id)) + return &AuthSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AuthSession. +func (c *AuthSessionClient) Delete() *AuthSessionDelete { + mutation := newAuthSessionMutation(c.config, OpDelete) + return &AuthSessionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AuthSessionClient) DeleteOne(_m *AuthSession) *AuthSessionDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AuthSessionClient) DeleteOneID(id string) *AuthSessionDeleteOne { + builder := c.Delete().Where(authsession.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AuthSessionDeleteOne{builder} +} + +// Query returns a query builder for AuthSession. +func (c *AuthSessionClient) Query() *AuthSessionQuery { + return &AuthSessionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAuthSession}, + inters: c.Interceptors(), + } +} + +// Get returns a AuthSession entity by its id. +func (c *AuthSessionClient) Get(ctx context.Context, id string) (*AuthSession, error) { + return c.Query().Where(authsession.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AuthSessionClient) GetX(ctx context.Context, id string) *AuthSession { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *AuthSessionClient) Hooks() []Hook { + return c.hooks.AuthSession +} + +// Interceptors returns the client interceptors. +func (c *AuthSessionClient) Interceptors() []Interceptor { + return c.inters.AuthSession +} + +func (c *AuthSessionClient) mutate(ctx context.Context, m *AuthSessionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AuthSessionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AuthSessionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AuthSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AuthSessionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown AuthSession mutation op: %q", m.Op()) + } +} + // ConnectorClient is a client for the Connector schema. type ConnectorClient struct { config @@ -1608,14 +1759,148 @@ func (c *RefreshTokenClient) mutate(ctx context.Context, m *RefreshTokenMutation } } +// UserIdentityClient is a client for the UserIdentity schema. +type UserIdentityClient struct { + config +} + +// NewUserIdentityClient returns a client for the UserIdentity from the given config. +func NewUserIdentityClient(c config) *UserIdentityClient { + return &UserIdentityClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `useridentity.Hooks(f(g(h())))`. +func (c *UserIdentityClient) Use(hooks ...Hook) { + c.hooks.UserIdentity = append(c.hooks.UserIdentity, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `useridentity.Intercept(f(g(h())))`. +func (c *UserIdentityClient) Intercept(interceptors ...Interceptor) { + c.inters.UserIdentity = append(c.inters.UserIdentity, interceptors...) +} + +// Create returns a builder for creating a UserIdentity entity. +func (c *UserIdentityClient) Create() *UserIdentityCreate { + mutation := newUserIdentityMutation(c.config, OpCreate) + return &UserIdentityCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserIdentity entities. +func (c *UserIdentityClient) CreateBulk(builders ...*UserIdentityCreate) *UserIdentityCreateBulk { + return &UserIdentityCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserIdentityClient) MapCreateBulk(slice any, setFunc func(*UserIdentityCreate, int)) *UserIdentityCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserIdentityCreateBulk{err: fmt.Errorf("calling to UserIdentityClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserIdentityCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserIdentityCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserIdentity. +func (c *UserIdentityClient) Update() *UserIdentityUpdate { + mutation := newUserIdentityMutation(c.config, OpUpdate) + return &UserIdentityUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserIdentityClient) UpdateOne(_m *UserIdentity) *UserIdentityUpdateOne { + mutation := newUserIdentityMutation(c.config, OpUpdateOne, withUserIdentity(_m)) + return &UserIdentityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserIdentityClient) UpdateOneID(id string) *UserIdentityUpdateOne { + mutation := newUserIdentityMutation(c.config, OpUpdateOne, withUserIdentityID(id)) + return &UserIdentityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserIdentity. +func (c *UserIdentityClient) Delete() *UserIdentityDelete { + mutation := newUserIdentityMutation(c.config, OpDelete) + return &UserIdentityDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserIdentityClient) DeleteOne(_m *UserIdentity) *UserIdentityDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserIdentityClient) DeleteOneID(id string) *UserIdentityDeleteOne { + builder := c.Delete().Where(useridentity.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserIdentityDeleteOne{builder} +} + +// Query returns a query builder for UserIdentity. +func (c *UserIdentityClient) Query() *UserIdentityQuery { + return &UserIdentityQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserIdentity}, + inters: c.Interceptors(), + } +} + +// Get returns a UserIdentity entity by its id. +func (c *UserIdentityClient) Get(ctx context.Context, id string) (*UserIdentity, error) { + return c.Query().Where(useridentity.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserIdentityClient) GetX(ctx context.Context, id string) *UserIdentity { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *UserIdentityClient) Hooks() []Hook { + return c.hooks.UserIdentity +} + +// Interceptors returns the client interceptors. +func (c *UserIdentityClient) Interceptors() []Interceptor { + return c.inters.UserIdentity +} + +func (c *UserIdentityClient) mutate(ctx context.Context, m *UserIdentityMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserIdentityCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserIdentityUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserIdentityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserIdentityDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown UserIdentity mutation op: %q", m.Op()) + } +} + // hooks and interceptors per client, for fast access. type ( hooks struct { - AuthCode, AuthRequest, Connector, DeviceRequest, DeviceToken, Keys, - OAuth2Client, OfflineSession, Password, RefreshToken []ent.Hook + AuthCode, AuthRequest, AuthSession, Connector, DeviceRequest, DeviceToken, Keys, + OAuth2Client, OfflineSession, Password, RefreshToken, UserIdentity []ent.Hook } inters struct { - AuthCode, AuthRequest, Connector, DeviceRequest, DeviceToken, Keys, - OAuth2Client, OfflineSession, Password, RefreshToken []ent.Interceptor + AuthCode, AuthRequest, AuthSession, Connector, DeviceRequest, DeviceToken, Keys, + OAuth2Client, OfflineSession, Password, RefreshToken, + UserIdentity []ent.Interceptor } ) diff --git a/storage/ent/db/connector.go b/storage/ent/db/connector.go index 2071d14e..812b45f7 100644 --- a/storage/ent/db/connector.go +++ b/storage/ent/db/connector.go @@ -3,6 +3,7 @@ package db import ( + "encoding/json" "fmt" "strings" @@ -23,7 +24,9 @@ type Connector struct { // ResourceVersion holds the value of the "resource_version" field. ResourceVersion string `json:"resource_version,omitempty"` // Config holds the value of the "config" field. - Config []byte `json:"config,omitempty"` + Config []byte `json:"config,omitempty"` + // GrantTypes holds the value of the "grant_types" field. + GrantTypes []string `json:"grant_types,omitempty"` selectValues sql.SelectValues } @@ -32,7 +35,7 @@ func (*Connector) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case connector.FieldConfig: + case connector.FieldConfig, connector.FieldGrantTypes: values[i] = new([]byte) case connector.FieldID, connector.FieldType, connector.FieldName, connector.FieldResourceVersion: values[i] = new(sql.NullString) @@ -81,6 +84,14 @@ func (_m *Connector) assignValues(columns []string, values []any) error { } else if value != nil { _m.Config = *value } + case connector.FieldGrantTypes: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field grant_types", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.GrantTypes); err != nil { + return fmt.Errorf("unmarshal field grant_types: %w", err) + } + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -128,6 +139,9 @@ func (_m *Connector) String() string { builder.WriteString(", ") builder.WriteString("config=") builder.WriteString(fmt.Sprintf("%v", _m.Config)) + builder.WriteString(", ") + builder.WriteString("grant_types=") + builder.WriteString(fmt.Sprintf("%v", _m.GrantTypes)) builder.WriteByte(')') return builder.String() } diff --git a/storage/ent/db/connector/connector.go b/storage/ent/db/connector/connector.go index 996328c1..ca603f4d 100644 --- a/storage/ent/db/connector/connector.go +++ b/storage/ent/db/connector/connector.go @@ -19,6 +19,8 @@ const ( FieldResourceVersion = "resource_version" // FieldConfig holds the string denoting the config field in the database. FieldConfig = "config" + // FieldGrantTypes holds the string denoting the grant_types field in the database. + FieldGrantTypes = "grant_types" // Table holds the table name of the connector in the database. Table = "connectors" ) @@ -30,6 +32,7 @@ var Columns = []string{ FieldName, FieldResourceVersion, FieldConfig, + FieldGrantTypes, } // ValidColumn reports if the column name is valid (part of the table columns). diff --git a/storage/ent/db/connector/where.go b/storage/ent/db/connector/where.go index 39cc477f..a09bec60 100644 --- a/storage/ent/db/connector/where.go +++ b/storage/ent/db/connector/where.go @@ -317,6 +317,16 @@ func ConfigLTE(v []byte) predicate.Connector { return predicate.Connector(sql.FieldLTE(FieldConfig, v)) } +// GrantTypesIsNil applies the IsNil predicate on the "grant_types" field. +func GrantTypesIsNil() predicate.Connector { + return predicate.Connector(sql.FieldIsNull(FieldGrantTypes)) +} + +// GrantTypesNotNil applies the NotNil predicate on the "grant_types" field. +func GrantTypesNotNil() predicate.Connector { + return predicate.Connector(sql.FieldNotNull(FieldGrantTypes)) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Connector) predicate.Connector { return predicate.Connector(sql.AndPredicates(predicates...)) diff --git a/storage/ent/db/connector_create.go b/storage/ent/db/connector_create.go index 42da769e..4e9cc35b 100644 --- a/storage/ent/db/connector_create.go +++ b/storage/ent/db/connector_create.go @@ -43,6 +43,12 @@ func (_c *ConnectorCreate) SetConfig(v []byte) *ConnectorCreate { return _c } +// SetGrantTypes sets the "grant_types" field. +func (_c *ConnectorCreate) SetGrantTypes(v []string) *ConnectorCreate { + _c.mutation.SetGrantTypes(v) + return _c +} + // SetID sets the "id" field. func (_c *ConnectorCreate) SetID(v string) *ConnectorCreate { _c.mutation.SetID(v) @@ -161,6 +167,10 @@ func (_c *ConnectorCreate) createSpec() (*Connector, *sqlgraph.CreateSpec) { _spec.SetField(connector.FieldConfig, field.TypeBytes, value) _node.Config = value } + if value, ok := _c.mutation.GrantTypes(); ok { + _spec.SetField(connector.FieldGrantTypes, field.TypeJSON, value) + _node.GrantTypes = value + } return _node, _spec } diff --git a/storage/ent/db/connector_update.go b/storage/ent/db/connector_update.go index bbcfd565..d9b58d04 100644 --- a/storage/ent/db/connector_update.go +++ b/storage/ent/db/connector_update.go @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/dexidp/dex/storage/ent/db/connector" "github.com/dexidp/dex/storage/ent/db/predicate" @@ -75,6 +76,24 @@ func (_u *ConnectorUpdate) SetConfig(v []byte) *ConnectorUpdate { return _u } +// SetGrantTypes sets the "grant_types" field. +func (_u *ConnectorUpdate) SetGrantTypes(v []string) *ConnectorUpdate { + _u.mutation.SetGrantTypes(v) + return _u +} + +// AppendGrantTypes appends value to the "grant_types" field. +func (_u *ConnectorUpdate) AppendGrantTypes(v []string) *ConnectorUpdate { + _u.mutation.AppendGrantTypes(v) + return _u +} + +// ClearGrantTypes clears the value of the "grant_types" field. +func (_u *ConnectorUpdate) ClearGrantTypes() *ConnectorUpdate { + _u.mutation.ClearGrantTypes() + return _u +} + // Mutation returns the ConnectorMutation object of the builder. func (_u *ConnectorUpdate) Mutation() *ConnectorMutation { return _u.mutation @@ -146,6 +165,17 @@ func (_u *ConnectorUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.Config(); ok { _spec.SetField(connector.FieldConfig, field.TypeBytes, value) } + if value, ok := _u.mutation.GrantTypes(); ok { + _spec.SetField(connector.FieldGrantTypes, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedGrantTypes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, connector.FieldGrantTypes, value) + }) + } + if _u.mutation.GrantTypesCleared() { + _spec.ClearField(connector.FieldGrantTypes, field.TypeJSON) + } if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{connector.Label} @@ -214,6 +244,24 @@ func (_u *ConnectorUpdateOne) SetConfig(v []byte) *ConnectorUpdateOne { return _u } +// SetGrantTypes sets the "grant_types" field. +func (_u *ConnectorUpdateOne) SetGrantTypes(v []string) *ConnectorUpdateOne { + _u.mutation.SetGrantTypes(v) + return _u +} + +// AppendGrantTypes appends value to the "grant_types" field. +func (_u *ConnectorUpdateOne) AppendGrantTypes(v []string) *ConnectorUpdateOne { + _u.mutation.AppendGrantTypes(v) + return _u +} + +// ClearGrantTypes clears the value of the "grant_types" field. +func (_u *ConnectorUpdateOne) ClearGrantTypes() *ConnectorUpdateOne { + _u.mutation.ClearGrantTypes() + return _u +} + // Mutation returns the ConnectorMutation object of the builder. func (_u *ConnectorUpdateOne) Mutation() *ConnectorMutation { return _u.mutation @@ -315,6 +363,17 @@ func (_u *ConnectorUpdateOne) sqlSave(ctx context.Context) (_node *Connector, er if value, ok := _u.mutation.Config(); ok { _spec.SetField(connector.FieldConfig, field.TypeBytes, value) } + if value, ok := _u.mutation.GrantTypes(); ok { + _spec.SetField(connector.FieldGrantTypes, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedGrantTypes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, connector.FieldGrantTypes, value) + }) + } + if _u.mutation.GrantTypesCleared() { + _spec.ClearField(connector.FieldGrantTypes, field.TypeJSON) + } _node = &Connector{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/storage/ent/db/ent.go b/storage/ent/db/ent.go index 06bee261..f73e0090 100644 --- a/storage/ent/db/ent.go +++ b/storage/ent/db/ent.go @@ -14,6 +14,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "github.com/dexidp/dex/storage/ent/db/authcode" "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/authsession" "github.com/dexidp/dex/storage/ent/db/connector" "github.com/dexidp/dex/storage/ent/db/devicerequest" "github.com/dexidp/dex/storage/ent/db/devicetoken" @@ -22,6 +23,7 @@ import ( "github.com/dexidp/dex/storage/ent/db/offlinesession" "github.com/dexidp/dex/storage/ent/db/password" "github.com/dexidp/dex/storage/ent/db/refreshtoken" + "github.com/dexidp/dex/storage/ent/db/useridentity" ) // ent aliases to avoid import conflicts in user's code. @@ -84,6 +86,7 @@ func checkColumn(t, c string) error { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ authcode.Table: authcode.ValidColumn, authrequest.Table: authrequest.ValidColumn, + authsession.Table: authsession.ValidColumn, connector.Table: connector.ValidColumn, devicerequest.Table: devicerequest.ValidColumn, devicetoken.Table: devicetoken.ValidColumn, @@ -92,6 +95,7 @@ func checkColumn(t, c string) error { offlinesession.Table: offlinesession.ValidColumn, password.Table: password.ValidColumn, refreshtoken.Table: refreshtoken.ValidColumn, + useridentity.Table: useridentity.ValidColumn, }) }) return columnCheck(t, c) diff --git a/storage/ent/db/hook/hook.go b/storage/ent/db/hook/hook.go index 12cb91c6..1c8780d7 100644 --- a/storage/ent/db/hook/hook.go +++ b/storage/ent/db/hook/hook.go @@ -33,6 +33,18 @@ func (f AuthRequestFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, e return nil, fmt.Errorf("unexpected mutation type %T. expect *db.AuthRequestMutation", m) } +// The AuthSessionFunc type is an adapter to allow the use of ordinary +// function as AuthSession mutator. +type AuthSessionFunc func(context.Context, *db.AuthSessionMutation) (db.Value, error) + +// Mutate calls f(ctx, m). +func (f AuthSessionFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { + if mv, ok := m.(*db.AuthSessionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *db.AuthSessionMutation", m) +} + // The ConnectorFunc type is an adapter to allow the use of ordinary // function as Connector mutator. type ConnectorFunc func(context.Context, *db.ConnectorMutation) (db.Value, error) @@ -129,6 +141,18 @@ func (f RefreshTokenFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, return nil, fmt.Errorf("unexpected mutation type %T. expect *db.RefreshTokenMutation", m) } +// The UserIdentityFunc type is an adapter to allow the use of ordinary +// function as UserIdentity mutator. +type UserIdentityFunc func(context.Context, *db.UserIdentityMutation) (db.Value, error) + +// Mutate calls f(ctx, m). +func (f UserIdentityFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { + if mv, ok := m.(*db.UserIdentityMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *db.UserIdentityMutation", m) +} + // Condition is a hook condition function. type Condition func(context.Context, db.Mutation) bool diff --git a/storage/ent/db/migrate/schema.go b/storage/ent/db/migrate/schema.go index b61d4697..786598c0 100644 --- a/storage/ent/db/migrate/schema.go +++ b/storage/ent/db/migrate/schema.go @@ -63,6 +63,21 @@ var ( Columns: AuthRequestsColumns, PrimaryKey: []*schema.Column{AuthRequestsColumns[0]}, } + // AuthSessionsColumns holds the columns for the "auth_sessions" table. + AuthSessionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "client_states", Type: field.TypeBytes}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "last_activity", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "ip_address", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "user_agent", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + } + // AuthSessionsTable holds the schema information for the "auth_sessions" table. + AuthSessionsTable = &schema.Table{ + Name: "auth_sessions", + Columns: AuthSessionsColumns, + PrimaryKey: []*schema.Column{AuthSessionsColumns[0]}, + } // ConnectorsColumns holds the columns for the "connectors" table. ConnectorsColumns = []*schema.Column{ {Name: "id", Type: field.TypeString, Unique: true, Size: 100, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, @@ -70,6 +85,7 @@ var ( {Name: "name", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, {Name: "resource_version", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, {Name: "config", Type: field.TypeBytes}, + {Name: "grant_types", Type: field.TypeJSON, Nullable: true}, } // ConnectorsTable holds the schema information for the "connectors" table. ConnectorsTable = &schema.Table{ @@ -134,6 +150,7 @@ var ( {Name: "public", Type: field.TypeBool}, {Name: "name", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, {Name: "logo_url", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "allowed_connectors", Type: field.TypeJSON, Nullable: true}, } // Oauth2clientsTable holds the schema information for the "oauth2clients" table. Oauth2clientsTable = &schema.Table{ @@ -198,10 +215,33 @@ var ( Columns: RefreshTokensColumns, PrimaryKey: []*schema.Column{RefreshTokensColumns[0]}, } + // UserIdentitiesColumns holds the columns for the "user_identities" table. + UserIdentitiesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "user_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "connector_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_user_id", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_username", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_preferred_username", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email_verified", Type: field.TypeBool, Default: false}, + {Name: "claims_groups", Type: field.TypeJSON, Nullable: true}, + {Name: "consents", Type: field.TypeBytes}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "last_login", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "blocked_until", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + } + // UserIdentitiesTable holds the schema information for the "user_identities" table. + UserIdentitiesTable = &schema.Table{ + Name: "user_identities", + Columns: UserIdentitiesColumns, + PrimaryKey: []*schema.Column{UserIdentitiesColumns[0]}, + } // Tables holds all the tables in the schema. Tables = []*schema.Table{ AuthCodesTable, AuthRequestsTable, + AuthSessionsTable, ConnectorsTable, DeviceRequestsTable, DeviceTokensTable, @@ -210,6 +250,7 @@ var ( OfflineSessionsTable, PasswordsTable, RefreshTokensTable, + UserIdentitiesTable, } ) diff --git a/storage/ent/db/mutation.go b/storage/ent/db/mutation.go index a5cb61d2..748022c9 100644 --- a/storage/ent/db/mutation.go +++ b/storage/ent/db/mutation.go @@ -14,6 +14,7 @@ import ( "github.com/dexidp/dex/storage" "github.com/dexidp/dex/storage/ent/db/authcode" "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/authsession" "github.com/dexidp/dex/storage/ent/db/connector" "github.com/dexidp/dex/storage/ent/db/devicerequest" "github.com/dexidp/dex/storage/ent/db/devicetoken" @@ -23,6 +24,7 @@ import ( "github.com/dexidp/dex/storage/ent/db/password" "github.com/dexidp/dex/storage/ent/db/predicate" "github.com/dexidp/dex/storage/ent/db/refreshtoken" + "github.com/dexidp/dex/storage/ent/db/useridentity" jose "github.com/go-jose/go-jose/v4" ) @@ -37,6 +39,7 @@ const ( // Node types. TypeAuthCode = "AuthCode" TypeAuthRequest = "AuthRequest" + TypeAuthSession = "AuthSession" TypeConnector = "Connector" TypeDeviceRequest = "DeviceRequest" TypeDeviceToken = "DeviceToken" @@ -45,6 +48,7 @@ const ( TypeOfflineSession = "OfflineSession" TypePassword = "Password" TypeRefreshToken = "RefreshToken" + TypeUserIdentity = "UserIdentity" ) // AuthCodeMutation represents an operation that mutates the AuthCode nodes in the graph. @@ -2717,33 +2721,34 @@ func (m *AuthRequestMutation) ResetEdge(name string) error { return fmt.Errorf("unknown AuthRequest edge %s", name) } -// ConnectorMutation represents an operation that mutates the Connector nodes in the graph. -type ConnectorMutation struct { +// AuthSessionMutation represents an operation that mutates the AuthSession nodes in the graph. +type AuthSessionMutation struct { config - op Op - typ string - id *string - _type *string - name *string - resource_version *string - _config *[]byte - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*Connector, error) - predicates []predicate.Connector + op Op + typ string + id *string + client_states *[]byte + created_at *time.Time + last_activity *time.Time + ip_address *string + user_agent *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*AuthSession, error) + predicates []predicate.AuthSession } -var _ ent.Mutation = (*ConnectorMutation)(nil) +var _ ent.Mutation = (*AuthSessionMutation)(nil) -// connectorOption allows management of the mutation configuration using functional options. -type connectorOption func(*ConnectorMutation) +// authsessionOption allows management of the mutation configuration using functional options. +type authsessionOption func(*AuthSessionMutation) -// newConnectorMutation creates new mutation for the Connector entity. -func newConnectorMutation(c config, op Op, opts ...connectorOption) *ConnectorMutation { - m := &ConnectorMutation{ +// newAuthSessionMutation creates new mutation for the AuthSession entity. +func newAuthSessionMutation(c config, op Op, opts ...authsessionOption) *AuthSessionMutation { + m := &AuthSessionMutation{ config: c, op: op, - typ: TypeConnector, + typ: TypeAuthSession, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -2752,20 +2757,20 @@ func newConnectorMutation(c config, op Op, opts ...connectorOption) *ConnectorMu return m } -// withConnectorID sets the ID field of the mutation. -func withConnectorID(id string) connectorOption { - return func(m *ConnectorMutation) { +// withAuthSessionID sets the ID field of the mutation. +func withAuthSessionID(id string) authsessionOption { + return func(m *AuthSessionMutation) { var ( err error once sync.Once - value *Connector + value *AuthSession ) - m.oldValue = func(ctx context.Context) (*Connector, error) { + m.oldValue = func(ctx context.Context) (*AuthSession, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().Connector.Get(ctx, id) + value, err = m.Client().AuthSession.Get(ctx, id) } }) return value, err @@ -2774,10 +2779,10 @@ func withConnectorID(id string) connectorOption { } } -// withConnector sets the old Connector of the mutation. -func withConnector(node *Connector) connectorOption { - return func(m *ConnectorMutation) { - m.oldValue = func(context.Context) (*Connector, error) { +// withAuthSession sets the old AuthSession of the mutation. +func withAuthSession(node *AuthSession) authsessionOption { + return func(m *AuthSessionMutation) { + m.oldValue = func(context.Context) (*AuthSession, error) { return node, nil } m.id = &node.ID @@ -2786,7 +2791,7 @@ func withConnector(node *Connector) connectorOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m ConnectorMutation) Client() *Client { +func (m AuthSessionMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -2794,7 +2799,7 @@ func (m ConnectorMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m ConnectorMutation) Tx() (*Tx, error) { +func (m AuthSessionMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -2804,14 +2809,14 @@ func (m ConnectorMutation) Tx() (*Tx, error) { } // SetID sets the value of the id field. Note that this -// operation is only accepted on creation of Connector entities. -func (m *ConnectorMutation) SetID(id string) { +// operation is only accepted on creation of AuthSession entities. +func (m *AuthSessionMutation) SetID(id string) { m.id = &id } // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *ConnectorMutation) ID() (id string, exists bool) { +func (m *AuthSessionMutation) ID() (id string, exists bool) { if m.id == nil { return } @@ -2822,7 +2827,7 @@ func (m *ConnectorMutation) ID() (id string, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *ConnectorMutation) IDs(ctx context.Context) ([]string, error) { +func (m *AuthSessionMutation) IDs(ctx context.Context) ([]string, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -2831,165 +2836,201 @@ func (m *ConnectorMutation) IDs(ctx context.Context) ([]string, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().Connector.Query().Where(m.predicates...).IDs(ctx) + return m.Client().AuthSession.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetType sets the "type" field. -func (m *ConnectorMutation) SetType(s string) { - m._type = &s +// SetClientStates sets the "client_states" field. +func (m *AuthSessionMutation) SetClientStates(b []byte) { + m.client_states = &b } -// GetType returns the value of the "type" field in the mutation. -func (m *ConnectorMutation) GetType() (r string, exists bool) { - v := m._type +// ClientStates returns the value of the "client_states" field in the mutation. +func (m *AuthSessionMutation) ClientStates() (r []byte, exists bool) { + v := m.client_states if v == nil { return } return *v, true } -// OldType returns the old "type" field's value of the Connector entity. -// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// OldClientStates returns the old "client_states" field's value of the AuthSession entity. +// If the AuthSession object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConnectorMutation) OldType(ctx context.Context) (v string, err error) { +func (m *AuthSessionMutation) OldClientStates(ctx context.Context) (v []byte, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldType is only allowed on UpdateOne operations") + return v, errors.New("OldClientStates is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldType requires an ID field in the mutation") + return v, errors.New("OldClientStates requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldType: %w", err) + return v, fmt.Errorf("querying old value for OldClientStates: %w", err) } - return oldValue.Type, nil + return oldValue.ClientStates, nil } -// ResetType resets all changes to the "type" field. -func (m *ConnectorMutation) ResetType() { - m._type = nil +// ResetClientStates resets all changes to the "client_states" field. +func (m *AuthSessionMutation) ResetClientStates() { + m.client_states = nil } -// SetName sets the "name" field. -func (m *ConnectorMutation) SetName(s string) { - m.name = &s +// SetCreatedAt sets the "created_at" field. +func (m *AuthSessionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t } -// Name returns the value of the "name" field in the mutation. -func (m *ConnectorMutation) Name() (r string, exists bool) { - v := m.name +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AuthSessionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at if v == nil { return } return *v, true } -// OldName returns the old "name" field's value of the Connector entity. -// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// OldCreatedAt returns the old "created_at" field's value of the AuthSession entity. +// If the AuthSession object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConnectorMutation) OldName(ctx context.Context) (v string, err error) { +func (m *AuthSessionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldName is only allowed on UpdateOne operations") + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldName requires an ID field in the mutation") + return v, errors.New("OldCreatedAt requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldName: %w", err) + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) } - return oldValue.Name, nil + return oldValue.CreatedAt, nil } -// ResetName resets all changes to the "name" field. -func (m *ConnectorMutation) ResetName() { - m.name = nil +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AuthSessionMutation) ResetCreatedAt() { + m.created_at = nil } -// SetResourceVersion sets the "resource_version" field. -func (m *ConnectorMutation) SetResourceVersion(s string) { - m.resource_version = &s +// SetLastActivity sets the "last_activity" field. +func (m *AuthSessionMutation) SetLastActivity(t time.Time) { + m.last_activity = &t } -// ResourceVersion returns the value of the "resource_version" field in the mutation. -func (m *ConnectorMutation) ResourceVersion() (r string, exists bool) { - v := m.resource_version +// LastActivity returns the value of the "last_activity" field in the mutation. +func (m *AuthSessionMutation) LastActivity() (r time.Time, exists bool) { + v := m.last_activity if v == nil { return } return *v, true } -// OldResourceVersion returns the old "resource_version" field's value of the Connector entity. -// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// OldLastActivity returns the old "last_activity" field's value of the AuthSession entity. +// If the AuthSession object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConnectorMutation) OldResourceVersion(ctx context.Context) (v string, err error) { +func (m *AuthSessionMutation) OldLastActivity(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldResourceVersion is only allowed on UpdateOne operations") + return v, errors.New("OldLastActivity is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldResourceVersion requires an ID field in the mutation") + return v, errors.New("OldLastActivity requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldResourceVersion: %w", err) + return v, fmt.Errorf("querying old value for OldLastActivity: %w", err) } - return oldValue.ResourceVersion, nil + return oldValue.LastActivity, nil } -// ResetResourceVersion resets all changes to the "resource_version" field. -func (m *ConnectorMutation) ResetResourceVersion() { - m.resource_version = nil +// ResetLastActivity resets all changes to the "last_activity" field. +func (m *AuthSessionMutation) ResetLastActivity() { + m.last_activity = nil } -// SetConfig sets the "config" field. -func (m *ConnectorMutation) SetConfig(b []byte) { - m._config = &b +// SetIPAddress sets the "ip_address" field. +func (m *AuthSessionMutation) SetIPAddress(s string) { + m.ip_address = &s } -// Config returns the value of the "config" field in the mutation. -func (m *ConnectorMutation) Config() (r []byte, exists bool) { - v := m._config +// IPAddress returns the value of the "ip_address" field in the mutation. +func (m *AuthSessionMutation) IPAddress() (r string, exists bool) { + v := m.ip_address if v == nil { return } return *v, true } -// OldConfig returns the old "config" field's value of the Connector entity. -// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// OldIPAddress returns the old "ip_address" field's value of the AuthSession entity. +// If the AuthSession object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConnectorMutation) OldConfig(ctx context.Context) (v []byte, err error) { +func (m *AuthSessionMutation) OldIPAddress(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldConfig is only allowed on UpdateOne operations") + return v, errors.New("OldIPAddress is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldConfig requires an ID field in the mutation") + return v, errors.New("OldIPAddress requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldConfig: %w", err) + return v, fmt.Errorf("querying old value for OldIPAddress: %w", err) } - return oldValue.Config, nil + return oldValue.IPAddress, nil } -// ResetConfig resets all changes to the "config" field. -func (m *ConnectorMutation) ResetConfig() { - m._config = nil +// ResetIPAddress resets all changes to the "ip_address" field. +func (m *AuthSessionMutation) ResetIPAddress() { + m.ip_address = nil } -// Where appends a list predicates to the ConnectorMutation builder. -func (m *ConnectorMutation) Where(ps ...predicate.Connector) { +// SetUserAgent sets the "user_agent" field. +func (m *AuthSessionMutation) SetUserAgent(s string) { + m.user_agent = &s +} + +// UserAgent returns the value of the "user_agent" field in the mutation. +func (m *AuthSessionMutation) UserAgent() (r string, exists bool) { + v := m.user_agent + if v == nil { + return + } + return *v, true +} + +// OldUserAgent returns the old "user_agent" field's value of the AuthSession entity. +// If the AuthSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthSessionMutation) OldUserAgent(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserAgent is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserAgent requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserAgent: %w", err) + } + return oldValue.UserAgent, nil +} + +// ResetUserAgent resets all changes to the "user_agent" field. +func (m *AuthSessionMutation) ResetUserAgent() { + m.user_agent = nil +} + +// Where appends a list predicates to the AuthSessionMutation builder. +func (m *AuthSessionMutation) Where(ps ...predicate.AuthSession) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the ConnectorMutation builder. Using this method, +// WhereP appends storage-level predicates to the AuthSessionMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *ConnectorMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.Connector, len(ps)) +func (m *AuthSessionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AuthSession, len(ps)) for i := range ps { p[i] = ps[i] } @@ -2997,36 +3038,39 @@ func (m *ConnectorMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *ConnectorMutation) Op() Op { +func (m *AuthSessionMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *ConnectorMutation) SetOp(op Op) { +func (m *AuthSessionMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (Connector). -func (m *ConnectorMutation) Type() string { +// Type returns the node type of this mutation (AuthSession). +func (m *AuthSessionMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *ConnectorMutation) Fields() []string { - fields := make([]string, 0, 4) - if m._type != nil { - fields = append(fields, connector.FieldType) +func (m *AuthSessionMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.client_states != nil { + fields = append(fields, authsession.FieldClientStates) } - if m.name != nil { - fields = append(fields, connector.FieldName) + if m.created_at != nil { + fields = append(fields, authsession.FieldCreatedAt) } - if m.resource_version != nil { - fields = append(fields, connector.FieldResourceVersion) + if m.last_activity != nil { + fields = append(fields, authsession.FieldLastActivity) } - if m._config != nil { - fields = append(fields, connector.FieldConfig) + if m.ip_address != nil { + fields = append(fields, authsession.FieldIPAddress) + } + if m.user_agent != nil { + fields = append(fields, authsession.FieldUserAgent) } return fields } @@ -3034,16 +3078,18 @@ func (m *ConnectorMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *ConnectorMutation) Field(name string) (ent.Value, bool) { +func (m *AuthSessionMutation) Field(name string) (ent.Value, bool) { switch name { - case connector.FieldType: - return m.GetType() - case connector.FieldName: - return m.Name() - case connector.FieldResourceVersion: - return m.ResourceVersion() - case connector.FieldConfig: - return m.Config() + case authsession.FieldClientStates: + return m.ClientStates() + case authsession.FieldCreatedAt: + return m.CreatedAt() + case authsession.FieldLastActivity: + return m.LastActivity() + case authsession.FieldIPAddress: + return m.IPAddress() + case authsession.FieldUserAgent: + return m.UserAgent() } return nil, false } @@ -3051,196 +3097,207 @@ func (m *ConnectorMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *ConnectorMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *AuthSessionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case connector.FieldType: - return m.OldType(ctx) - case connector.FieldName: - return m.OldName(ctx) - case connector.FieldResourceVersion: - return m.OldResourceVersion(ctx) - case connector.FieldConfig: - return m.OldConfig(ctx) + case authsession.FieldClientStates: + return m.OldClientStates(ctx) + case authsession.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case authsession.FieldLastActivity: + return m.OldLastActivity(ctx) + case authsession.FieldIPAddress: + return m.OldIPAddress(ctx) + case authsession.FieldUserAgent: + return m.OldUserAgent(ctx) } - return nil, fmt.Errorf("unknown Connector field %s", name) + return nil, fmt.Errorf("unknown AuthSession field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *ConnectorMutation) SetField(name string, value ent.Value) error { +func (m *AuthSessionMutation) SetField(name string, value ent.Value) error { switch name { - case connector.FieldType: - v, ok := value.(string) + case authsession.FieldClientStates: + v, ok := value.([]byte) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetType(v) + m.SetClientStates(v) return nil - case connector.FieldName: - v, ok := value.(string) + case authsession.FieldCreatedAt: + v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetName(v) + m.SetCreatedAt(v) return nil - case connector.FieldResourceVersion: + case authsession.FieldLastActivity: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastActivity(v) + return nil + case authsession.FieldIPAddress: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetResourceVersion(v) + m.SetIPAddress(v) return nil - case connector.FieldConfig: - v, ok := value.([]byte) + case authsession.FieldUserAgent: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetConfig(v) + m.SetUserAgent(v) return nil } - return fmt.Errorf("unknown Connector field %s", name) + return fmt.Errorf("unknown AuthSession field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *ConnectorMutation) AddedFields() []string { +func (m *AuthSessionMutation) AddedFields() []string { return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *ConnectorMutation) AddedField(name string) (ent.Value, bool) { +func (m *AuthSessionMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *ConnectorMutation) AddField(name string, value ent.Value) error { +func (m *AuthSessionMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown Connector numeric field %s", name) + return fmt.Errorf("unknown AuthSession numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *ConnectorMutation) ClearedFields() []string { +func (m *AuthSessionMutation) ClearedFields() []string { return nil } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *ConnectorMutation) FieldCleared(name string) bool { +func (m *AuthSessionMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *ConnectorMutation) ClearField(name string) error { - return fmt.Errorf("unknown Connector nullable field %s", name) +func (m *AuthSessionMutation) ClearField(name string) error { + return fmt.Errorf("unknown AuthSession nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *ConnectorMutation) ResetField(name string) error { +func (m *AuthSessionMutation) ResetField(name string) error { switch name { - case connector.FieldType: - m.ResetType() + case authsession.FieldClientStates: + m.ResetClientStates() return nil - case connector.FieldName: - m.ResetName() + case authsession.FieldCreatedAt: + m.ResetCreatedAt() return nil - case connector.FieldResourceVersion: - m.ResetResourceVersion() + case authsession.FieldLastActivity: + m.ResetLastActivity() return nil - case connector.FieldConfig: - m.ResetConfig() + case authsession.FieldIPAddress: + m.ResetIPAddress() + return nil + case authsession.FieldUserAgent: + m.ResetUserAgent() return nil } - return fmt.Errorf("unknown Connector field %s", name) + return fmt.Errorf("unknown AuthSession field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *ConnectorMutation) AddedEdges() []string { +func (m *AuthSessionMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *ConnectorMutation) AddedIDs(name string) []ent.Value { +func (m *AuthSessionMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *ConnectorMutation) RemovedEdges() []string { +func (m *AuthSessionMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *ConnectorMutation) RemovedIDs(name string) []ent.Value { +func (m *AuthSessionMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *ConnectorMutation) ClearedEdges() []string { +func (m *AuthSessionMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *ConnectorMutation) EdgeCleared(name string) bool { +func (m *AuthSessionMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *ConnectorMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown Connector unique edge %s", name) +func (m *AuthSessionMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown AuthSession unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *ConnectorMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown Connector edge %s", name) +func (m *AuthSessionMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown AuthSession edge %s", name) } -// DeviceRequestMutation represents an operation that mutates the DeviceRequest nodes in the graph. -type DeviceRequestMutation struct { +// ConnectorMutation represents an operation that mutates the Connector nodes in the graph. +type ConnectorMutation struct { config - op Op - typ string - id *int - user_code *string - device_code *string - client_id *string - client_secret *string - scopes *[]string - appendscopes []string - expiry *time.Time - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*DeviceRequest, error) - predicates []predicate.DeviceRequest + op Op + typ string + id *string + _type *string + name *string + resource_version *string + _config *[]byte + grant_types *[]string + appendgrant_types []string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Connector, error) + predicates []predicate.Connector } -var _ ent.Mutation = (*DeviceRequestMutation)(nil) +var _ ent.Mutation = (*ConnectorMutation)(nil) -// devicerequestOption allows management of the mutation configuration using functional options. -type devicerequestOption func(*DeviceRequestMutation) +// connectorOption allows management of the mutation configuration using functional options. +type connectorOption func(*ConnectorMutation) -// newDeviceRequestMutation creates new mutation for the DeviceRequest entity. -func newDeviceRequestMutation(c config, op Op, opts ...devicerequestOption) *DeviceRequestMutation { - m := &DeviceRequestMutation{ +// newConnectorMutation creates new mutation for the Connector entity. +func newConnectorMutation(c config, op Op, opts ...connectorOption) *ConnectorMutation { + m := &ConnectorMutation{ config: c, op: op, - typ: TypeDeviceRequest, + typ: TypeConnector, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -3249,20 +3306,20 @@ func newDeviceRequestMutation(c config, op Op, opts ...devicerequestOption) *Dev return m } -// withDeviceRequestID sets the ID field of the mutation. -func withDeviceRequestID(id int) devicerequestOption { - return func(m *DeviceRequestMutation) { +// withConnectorID sets the ID field of the mutation. +func withConnectorID(id string) connectorOption { + return func(m *ConnectorMutation) { var ( err error once sync.Once - value *DeviceRequest + value *Connector ) - m.oldValue = func(ctx context.Context) (*DeviceRequest, error) { + m.oldValue = func(ctx context.Context) (*Connector, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().DeviceRequest.Get(ctx, id) + value, err = m.Client().Connector.Get(ctx, id) } }) return value, err @@ -3271,10 +3328,10 @@ func withDeviceRequestID(id int) devicerequestOption { } } -// withDeviceRequest sets the old DeviceRequest of the mutation. -func withDeviceRequest(node *DeviceRequest) devicerequestOption { - return func(m *DeviceRequestMutation) { - m.oldValue = func(context.Context) (*DeviceRequest, error) { +// withConnector sets the old Connector of the mutation. +func withConnector(node *Connector) connectorOption { + return func(m *ConnectorMutation) { + m.oldValue = func(context.Context) (*Connector, error) { return node, nil } m.id = &node.ID @@ -3283,7 +3340,7 @@ func withDeviceRequest(node *DeviceRequest) devicerequestOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m DeviceRequestMutation) Client() *Client { +func (m ConnectorMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -3291,7 +3348,7 @@ func (m DeviceRequestMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m DeviceRequestMutation) Tx() (*Tx, error) { +func (m ConnectorMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -3300,9 +3357,15 @@ func (m DeviceRequestMutation) Tx() (*Tx, error) { return tx, nil } +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Connector entities. +func (m *ConnectorMutation) SetID(id string) { + m.id = &id +} + // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *DeviceRequestMutation) ID() (id int, exists bool) { +func (m *ConnectorMutation) ID() (id string, exists bool) { if m.id == nil { return } @@ -3313,275 +3376,239 @@ func (m *DeviceRequestMutation) ID() (id int, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *DeviceRequestMutation) IDs(ctx context.Context) ([]int, error) { +func (m *ConnectorMutation) IDs(ctx context.Context) ([]string, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() if exists { - return []int{id}, nil + return []string{id}, nil } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().DeviceRequest.Query().Where(m.predicates...).IDs(ctx) + return m.Client().Connector.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetUserCode sets the "user_code" field. -func (m *DeviceRequestMutation) SetUserCode(s string) { - m.user_code = &s +// SetType sets the "type" field. +func (m *ConnectorMutation) SetType(s string) { + m._type = &s } -// UserCode returns the value of the "user_code" field in the mutation. -func (m *DeviceRequestMutation) UserCode() (r string, exists bool) { - v := m.user_code +// GetType returns the value of the "type" field in the mutation. +func (m *ConnectorMutation) GetType() (r string, exists bool) { + v := m._type if v == nil { return } return *v, true } -// OldUserCode returns the old "user_code" field's value of the DeviceRequest entity. -// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// OldType returns the old "type" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceRequestMutation) OldUserCode(ctx context.Context) (v string, err error) { +func (m *ConnectorMutation) OldType(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUserCode is only allowed on UpdateOne operations") + return v, errors.New("OldType is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUserCode requires an ID field in the mutation") + return v, errors.New("OldType requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldUserCode: %w", err) + return v, fmt.Errorf("querying old value for OldType: %w", err) } - return oldValue.UserCode, nil + return oldValue.Type, nil } -// ResetUserCode resets all changes to the "user_code" field. -func (m *DeviceRequestMutation) ResetUserCode() { - m.user_code = nil +// ResetType resets all changes to the "type" field. +func (m *ConnectorMutation) ResetType() { + m._type = nil } -// SetDeviceCode sets the "device_code" field. -func (m *DeviceRequestMutation) SetDeviceCode(s string) { - m.device_code = &s +// SetName sets the "name" field. +func (m *ConnectorMutation) SetName(s string) { + m.name = &s } -// DeviceCode returns the value of the "device_code" field in the mutation. -func (m *DeviceRequestMutation) DeviceCode() (r string, exists bool) { - v := m.device_code +// Name returns the value of the "name" field in the mutation. +func (m *ConnectorMutation) Name() (r string, exists bool) { + v := m.name if v == nil { return } return *v, true } -// OldDeviceCode returns the old "device_code" field's value of the DeviceRequest entity. -// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// OldName returns the old "name" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceRequestMutation) OldDeviceCode(ctx context.Context) (v string, err error) { +func (m *ConnectorMutation) OldName(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldDeviceCode is only allowed on UpdateOne operations") + return v, errors.New("OldName is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldDeviceCode requires an ID field in the mutation") + return v, errors.New("OldName requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldDeviceCode: %w", err) + return v, fmt.Errorf("querying old value for OldName: %w", err) } - return oldValue.DeviceCode, nil + return oldValue.Name, nil } -// ResetDeviceCode resets all changes to the "device_code" field. -func (m *DeviceRequestMutation) ResetDeviceCode() { - m.device_code = nil +// ResetName resets all changes to the "name" field. +func (m *ConnectorMutation) ResetName() { + m.name = nil } -// SetClientID sets the "client_id" field. -func (m *DeviceRequestMutation) SetClientID(s string) { - m.client_id = &s +// SetResourceVersion sets the "resource_version" field. +func (m *ConnectorMutation) SetResourceVersion(s string) { + m.resource_version = &s } -// ClientID returns the value of the "client_id" field in the mutation. -func (m *DeviceRequestMutation) ClientID() (r string, exists bool) { - v := m.client_id +// ResourceVersion returns the value of the "resource_version" field in the mutation. +func (m *ConnectorMutation) ResourceVersion() (r string, exists bool) { + v := m.resource_version if v == nil { return } return *v, true } -// OldClientID returns the old "client_id" field's value of the DeviceRequest entity. -// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// OldResourceVersion returns the old "resource_version" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceRequestMutation) OldClientID(ctx context.Context) (v string, err error) { +func (m *ConnectorMutation) OldResourceVersion(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldClientID is only allowed on UpdateOne operations") + return v, errors.New("OldResourceVersion is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldClientID requires an ID field in the mutation") + return v, errors.New("OldResourceVersion requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldClientID: %w", err) + return v, fmt.Errorf("querying old value for OldResourceVersion: %w", err) } - return oldValue.ClientID, nil + return oldValue.ResourceVersion, nil } -// ResetClientID resets all changes to the "client_id" field. -func (m *DeviceRequestMutation) ResetClientID() { - m.client_id = nil +// ResetResourceVersion resets all changes to the "resource_version" field. +func (m *ConnectorMutation) ResetResourceVersion() { + m.resource_version = nil } -// SetClientSecret sets the "client_secret" field. -func (m *DeviceRequestMutation) SetClientSecret(s string) { - m.client_secret = &s +// SetConfig sets the "config" field. +func (m *ConnectorMutation) SetConfig(b []byte) { + m._config = &b } -// ClientSecret returns the value of the "client_secret" field in the mutation. -func (m *DeviceRequestMutation) ClientSecret() (r string, exists bool) { - v := m.client_secret +// Config returns the value of the "config" field in the mutation. +func (m *ConnectorMutation) Config() (r []byte, exists bool) { + v := m._config if v == nil { return } return *v, true } -// OldClientSecret returns the old "client_secret" field's value of the DeviceRequest entity. -// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// OldConfig returns the old "config" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceRequestMutation) OldClientSecret(ctx context.Context) (v string, err error) { +func (m *ConnectorMutation) OldConfig(ctx context.Context) (v []byte, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldClientSecret is only allowed on UpdateOne operations") + return v, errors.New("OldConfig is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldClientSecret requires an ID field in the mutation") + return v, errors.New("OldConfig requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldClientSecret: %w", err) + return v, fmt.Errorf("querying old value for OldConfig: %w", err) } - return oldValue.ClientSecret, nil + return oldValue.Config, nil } -// ResetClientSecret resets all changes to the "client_secret" field. -func (m *DeviceRequestMutation) ResetClientSecret() { - m.client_secret = nil +// ResetConfig resets all changes to the "config" field. +func (m *ConnectorMutation) ResetConfig() { + m._config = nil } -// SetScopes sets the "scopes" field. -func (m *DeviceRequestMutation) SetScopes(s []string) { - m.scopes = &s - m.appendscopes = nil +// SetGrantTypes sets the "grant_types" field. +func (m *ConnectorMutation) SetGrantTypes(s []string) { + m.grant_types = &s + m.appendgrant_types = nil } -// Scopes returns the value of the "scopes" field in the mutation. -func (m *DeviceRequestMutation) Scopes() (r []string, exists bool) { - v := m.scopes +// GrantTypes returns the value of the "grant_types" field in the mutation. +func (m *ConnectorMutation) GrantTypes() (r []string, exists bool) { + v := m.grant_types if v == nil { return } return *v, true } -// OldScopes returns the old "scopes" field's value of the DeviceRequest entity. -// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// OldGrantTypes returns the old "grant_types" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceRequestMutation) OldScopes(ctx context.Context) (v []string, err error) { +func (m *ConnectorMutation) OldGrantTypes(ctx context.Context) (v []string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldScopes is only allowed on UpdateOne operations") + return v, errors.New("OldGrantTypes is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldScopes requires an ID field in the mutation") + return v, errors.New("OldGrantTypes requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldScopes: %w", err) + return v, fmt.Errorf("querying old value for OldGrantTypes: %w", err) } - return oldValue.Scopes, nil + return oldValue.GrantTypes, nil } -// AppendScopes adds s to the "scopes" field. -func (m *DeviceRequestMutation) AppendScopes(s []string) { - m.appendscopes = append(m.appendscopes, s...) +// AppendGrantTypes adds s to the "grant_types" field. +func (m *ConnectorMutation) AppendGrantTypes(s []string) { + m.appendgrant_types = append(m.appendgrant_types, s...) } -// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. -func (m *DeviceRequestMutation) AppendedScopes() ([]string, bool) { - if len(m.appendscopes) == 0 { +// AppendedGrantTypes returns the list of values that were appended to the "grant_types" field in this mutation. +func (m *ConnectorMutation) AppendedGrantTypes() ([]string, bool) { + if len(m.appendgrant_types) == 0 { return nil, false } - return m.appendscopes, true + return m.appendgrant_types, true } -// ClearScopes clears the value of the "scopes" field. -func (m *DeviceRequestMutation) ClearScopes() { - m.scopes = nil - m.appendscopes = nil - m.clearedFields[devicerequest.FieldScopes] = struct{}{} +// ClearGrantTypes clears the value of the "grant_types" field. +func (m *ConnectorMutation) ClearGrantTypes() { + m.grant_types = nil + m.appendgrant_types = nil + m.clearedFields[connector.FieldGrantTypes] = struct{}{} } -// ScopesCleared returns if the "scopes" field was cleared in this mutation. -func (m *DeviceRequestMutation) ScopesCleared() bool { - _, ok := m.clearedFields[devicerequest.FieldScopes] +// GrantTypesCleared returns if the "grant_types" field was cleared in this mutation. +func (m *ConnectorMutation) GrantTypesCleared() bool { + _, ok := m.clearedFields[connector.FieldGrantTypes] return ok } -// ResetScopes resets all changes to the "scopes" field. -func (m *DeviceRequestMutation) ResetScopes() { - m.scopes = nil - m.appendscopes = nil - delete(m.clearedFields, devicerequest.FieldScopes) -} - -// SetExpiry sets the "expiry" field. -func (m *DeviceRequestMutation) SetExpiry(t time.Time) { - m.expiry = &t -} - -// Expiry returns the value of the "expiry" field in the mutation. -func (m *DeviceRequestMutation) Expiry() (r time.Time, exists bool) { - v := m.expiry - if v == nil { - return - } - return *v, true -} - -// OldExpiry returns the old "expiry" field's value of the DeviceRequest entity. -// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceRequestMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldExpiry is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldExpiry requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldExpiry: %w", err) - } - return oldValue.Expiry, nil -} - -// ResetExpiry resets all changes to the "expiry" field. -func (m *DeviceRequestMutation) ResetExpiry() { - m.expiry = nil +// ResetGrantTypes resets all changes to the "grant_types" field. +func (m *ConnectorMutation) ResetGrantTypes() { + m.grant_types = nil + m.appendgrant_types = nil + delete(m.clearedFields, connector.FieldGrantTypes) } -// Where appends a list predicates to the DeviceRequestMutation builder. -func (m *DeviceRequestMutation) Where(ps ...predicate.DeviceRequest) { +// Where appends a list predicates to the ConnectorMutation builder. +func (m *ConnectorMutation) Where(ps ...predicate.Connector) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the DeviceRequestMutation builder. Using this method, +// WhereP appends storage-level predicates to the ConnectorMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *DeviceRequestMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.DeviceRequest, len(ps)) +func (m *ConnectorMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Connector, len(ps)) for i := range ps { p[i] = ps[i] } @@ -3589,42 +3616,39 @@ func (m *DeviceRequestMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *DeviceRequestMutation) Op() Op { +func (m *ConnectorMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *DeviceRequestMutation) SetOp(op Op) { +func (m *ConnectorMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (DeviceRequest). -func (m *DeviceRequestMutation) Type() string { +// Type returns the node type of this mutation (Connector). +func (m *ConnectorMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *DeviceRequestMutation) Fields() []string { - fields := make([]string, 0, 6) - if m.user_code != nil { - fields = append(fields, devicerequest.FieldUserCode) +func (m *ConnectorMutation) Fields() []string { + fields := make([]string, 0, 5) + if m._type != nil { + fields = append(fields, connector.FieldType) } - if m.device_code != nil { - fields = append(fields, devicerequest.FieldDeviceCode) - } - if m.client_id != nil { - fields = append(fields, devicerequest.FieldClientID) + if m.name != nil { + fields = append(fields, connector.FieldName) } - if m.client_secret != nil { - fields = append(fields, devicerequest.FieldClientSecret) + if m.resource_version != nil { + fields = append(fields, connector.FieldResourceVersion) } - if m.scopes != nil { - fields = append(fields, devicerequest.FieldScopes) + if m._config != nil { + fields = append(fields, connector.FieldConfig) } - if m.expiry != nil { - fields = append(fields, devicerequest.FieldExpiry) + if m.grant_types != nil { + fields = append(fields, connector.FieldGrantTypes) } return fields } @@ -3632,20 +3656,18 @@ func (m *DeviceRequestMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *DeviceRequestMutation) Field(name string) (ent.Value, bool) { +func (m *ConnectorMutation) Field(name string) (ent.Value, bool) { switch name { - case devicerequest.FieldUserCode: - return m.UserCode() - case devicerequest.FieldDeviceCode: - return m.DeviceCode() - case devicerequest.FieldClientID: - return m.ClientID() - case devicerequest.FieldClientSecret: - return m.ClientSecret() - case devicerequest.FieldScopes: - return m.Scopes() - case devicerequest.FieldExpiry: - return m.Expiry() + case connector.FieldType: + return m.GetType() + case connector.FieldName: + return m.Name() + case connector.FieldResourceVersion: + return m.ResourceVersion() + case connector.FieldConfig: + return m.Config() + case connector.FieldGrantTypes: + return m.GrantTypes() } return nil, false } @@ -3653,231 +3675,217 @@ func (m *DeviceRequestMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *DeviceRequestMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *ConnectorMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case devicerequest.FieldUserCode: - return m.OldUserCode(ctx) - case devicerequest.FieldDeviceCode: - return m.OldDeviceCode(ctx) - case devicerequest.FieldClientID: - return m.OldClientID(ctx) - case devicerequest.FieldClientSecret: - return m.OldClientSecret(ctx) - case devicerequest.FieldScopes: - return m.OldScopes(ctx) - case devicerequest.FieldExpiry: - return m.OldExpiry(ctx) + case connector.FieldType: + return m.OldType(ctx) + case connector.FieldName: + return m.OldName(ctx) + case connector.FieldResourceVersion: + return m.OldResourceVersion(ctx) + case connector.FieldConfig: + return m.OldConfig(ctx) + case connector.FieldGrantTypes: + return m.OldGrantTypes(ctx) } - return nil, fmt.Errorf("unknown DeviceRequest field %s", name) + return nil, fmt.Errorf("unknown Connector field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *DeviceRequestMutation) SetField(name string, value ent.Value) error { +func (m *ConnectorMutation) SetField(name string, value ent.Value) error { switch name { - case devicerequest.FieldUserCode: + case connector.FieldType: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetUserCode(v) + m.SetType(v) return nil - case devicerequest.FieldDeviceCode: + case connector.FieldName: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetDeviceCode(v) + m.SetName(v) return nil - case devicerequest.FieldClientID: + case connector.FieldResourceVersion: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetClientID(v) + m.SetResourceVersion(v) return nil - case devicerequest.FieldClientSecret: - v, ok := value.(string) + case connector.FieldConfig: + v, ok := value.([]byte) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetClientSecret(v) + m.SetConfig(v) return nil - case devicerequest.FieldScopes: + case connector.FieldGrantTypes: v, ok := value.([]string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetScopes(v) - return nil - case devicerequest.FieldExpiry: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetExpiry(v) + m.SetGrantTypes(v) return nil } - return fmt.Errorf("unknown DeviceRequest field %s", name) + return fmt.Errorf("unknown Connector field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *DeviceRequestMutation) AddedFields() []string { +func (m *ConnectorMutation) AddedFields() []string { return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *DeviceRequestMutation) AddedField(name string) (ent.Value, bool) { +func (m *ConnectorMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *DeviceRequestMutation) AddField(name string, value ent.Value) error { +func (m *ConnectorMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown DeviceRequest numeric field %s", name) + return fmt.Errorf("unknown Connector numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *DeviceRequestMutation) ClearedFields() []string { +func (m *ConnectorMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(devicerequest.FieldScopes) { - fields = append(fields, devicerequest.FieldScopes) + if m.FieldCleared(connector.FieldGrantTypes) { + fields = append(fields, connector.FieldGrantTypes) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *DeviceRequestMutation) FieldCleared(name string) bool { +func (m *ConnectorMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *DeviceRequestMutation) ClearField(name string) error { +func (m *ConnectorMutation) ClearField(name string) error { switch name { - case devicerequest.FieldScopes: - m.ClearScopes() + case connector.FieldGrantTypes: + m.ClearGrantTypes() return nil } - return fmt.Errorf("unknown DeviceRequest nullable field %s", name) + return fmt.Errorf("unknown Connector nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *DeviceRequestMutation) ResetField(name string) error { +func (m *ConnectorMutation) ResetField(name string) error { switch name { - case devicerequest.FieldUserCode: - m.ResetUserCode() - return nil - case devicerequest.FieldDeviceCode: - m.ResetDeviceCode() + case connector.FieldType: + m.ResetType() return nil - case devicerequest.FieldClientID: - m.ResetClientID() + case connector.FieldName: + m.ResetName() return nil - case devicerequest.FieldClientSecret: - m.ResetClientSecret() + case connector.FieldResourceVersion: + m.ResetResourceVersion() return nil - case devicerequest.FieldScopes: - m.ResetScopes() + case connector.FieldConfig: + m.ResetConfig() return nil - case devicerequest.FieldExpiry: - m.ResetExpiry() + case connector.FieldGrantTypes: + m.ResetGrantTypes() return nil } - return fmt.Errorf("unknown DeviceRequest field %s", name) + return fmt.Errorf("unknown Connector field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *DeviceRequestMutation) AddedEdges() []string { +func (m *ConnectorMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *DeviceRequestMutation) AddedIDs(name string) []ent.Value { +func (m *ConnectorMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *DeviceRequestMutation) RemovedEdges() []string { +func (m *ConnectorMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *DeviceRequestMutation) RemovedIDs(name string) []ent.Value { +func (m *ConnectorMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *DeviceRequestMutation) ClearedEdges() []string { +func (m *ConnectorMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *DeviceRequestMutation) EdgeCleared(name string) bool { +func (m *ConnectorMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *DeviceRequestMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown DeviceRequest unique edge %s", name) +func (m *ConnectorMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Connector unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *DeviceRequestMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown DeviceRequest edge %s", name) +func (m *ConnectorMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Connector edge %s", name) } -// DeviceTokenMutation represents an operation that mutates the DeviceToken nodes in the graph. -type DeviceTokenMutation struct { +// DeviceRequestMutation represents an operation that mutates the DeviceRequest nodes in the graph. +type DeviceRequestMutation struct { config - op Op - typ string - id *int - device_code *string - status *string - token *[]byte - expiry *time.Time - last_request *time.Time - poll_interval *int - addpoll_interval *int - code_challenge *string - code_challenge_method *string - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*DeviceToken, error) - predicates []predicate.DeviceToken + op Op + typ string + id *int + user_code *string + device_code *string + client_id *string + client_secret *string + scopes *[]string + appendscopes []string + expiry *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*DeviceRequest, error) + predicates []predicate.DeviceRequest } -var _ ent.Mutation = (*DeviceTokenMutation)(nil) +var _ ent.Mutation = (*DeviceRequestMutation)(nil) -// devicetokenOption allows management of the mutation configuration using functional options. -type devicetokenOption func(*DeviceTokenMutation) +// devicerequestOption allows management of the mutation configuration using functional options. +type devicerequestOption func(*DeviceRequestMutation) -// newDeviceTokenMutation creates new mutation for the DeviceToken entity. -func newDeviceTokenMutation(c config, op Op, opts ...devicetokenOption) *DeviceTokenMutation { - m := &DeviceTokenMutation{ +// newDeviceRequestMutation creates new mutation for the DeviceRequest entity. +func newDeviceRequestMutation(c config, op Op, opts ...devicerequestOption) *DeviceRequestMutation { + m := &DeviceRequestMutation{ config: c, op: op, - typ: TypeDeviceToken, + typ: TypeDeviceRequest, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -3886,20 +3894,20 @@ func newDeviceTokenMutation(c config, op Op, opts ...devicetokenOption) *DeviceT return m } -// withDeviceTokenID sets the ID field of the mutation. -func withDeviceTokenID(id int) devicetokenOption { - return func(m *DeviceTokenMutation) { +// withDeviceRequestID sets the ID field of the mutation. +func withDeviceRequestID(id int) devicerequestOption { + return func(m *DeviceRequestMutation) { var ( err error once sync.Once - value *DeviceToken + value *DeviceRequest ) - m.oldValue = func(ctx context.Context) (*DeviceToken, error) { + m.oldValue = func(ctx context.Context) (*DeviceRequest, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().DeviceToken.Get(ctx, id) + value, err = m.Client().DeviceRequest.Get(ctx, id) } }) return value, err @@ -3908,10 +3916,10 @@ func withDeviceTokenID(id int) devicetokenOption { } } -// withDeviceToken sets the old DeviceToken of the mutation. -func withDeviceToken(node *DeviceToken) devicetokenOption { - return func(m *DeviceTokenMutation) { - m.oldValue = func(context.Context) (*DeviceToken, error) { +// withDeviceRequest sets the old DeviceRequest of the mutation. +func withDeviceRequest(node *DeviceRequest) devicerequestOption { + return func(m *DeviceRequestMutation) { + m.oldValue = func(context.Context) (*DeviceRequest, error) { return node, nil } m.id = &node.ID @@ -3920,7 +3928,7 @@ func withDeviceToken(node *DeviceToken) devicetokenOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m DeviceTokenMutation) Client() *Client { +func (m DeviceRequestMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -3928,7 +3936,7 @@ func (m DeviceTokenMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m DeviceTokenMutation) Tx() (*Tx, error) { +func (m DeviceRequestMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -3939,7 +3947,7 @@ func (m DeviceTokenMutation) Tx() (*Tx, error) { // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *DeviceTokenMutation) ID() (id int, exists bool) { +func (m *DeviceRequestMutation) ID() (id int, exists bool) { if m.id == nil { return } @@ -3950,7 +3958,7 @@ func (m *DeviceTokenMutation) ID() (id int, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *DeviceTokenMutation) IDs(ctx context.Context) ([]int, error) { +func (m *DeviceRequestMutation) IDs(ctx context.Context) ([]int, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -3959,342 +3967,266 @@ func (m *DeviceTokenMutation) IDs(ctx context.Context) ([]int, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().DeviceToken.Query().Where(m.predicates...).IDs(ctx) + return m.Client().DeviceRequest.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetDeviceCode sets the "device_code" field. -func (m *DeviceTokenMutation) SetDeviceCode(s string) { - m.device_code = &s +// SetUserCode sets the "user_code" field. +func (m *DeviceRequestMutation) SetUserCode(s string) { + m.user_code = &s } -// DeviceCode returns the value of the "device_code" field in the mutation. -func (m *DeviceTokenMutation) DeviceCode() (r string, exists bool) { - v := m.device_code +// UserCode returns the value of the "user_code" field in the mutation. +func (m *DeviceRequestMutation) UserCode() (r string, exists bool) { + v := m.user_code if v == nil { return } return *v, true } -// OldDeviceCode returns the old "device_code" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// OldUserCode returns the old "user_code" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldDeviceCode(ctx context.Context) (v string, err error) { +func (m *DeviceRequestMutation) OldUserCode(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldDeviceCode is only allowed on UpdateOne operations") + return v, errors.New("OldUserCode is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldDeviceCode requires an ID field in the mutation") + return v, errors.New("OldUserCode requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldDeviceCode: %w", err) + return v, fmt.Errorf("querying old value for OldUserCode: %w", err) } - return oldValue.DeviceCode, nil + return oldValue.UserCode, nil } -// ResetDeviceCode resets all changes to the "device_code" field. -func (m *DeviceTokenMutation) ResetDeviceCode() { - m.device_code = nil +// ResetUserCode resets all changes to the "user_code" field. +func (m *DeviceRequestMutation) ResetUserCode() { + m.user_code = nil } -// SetStatus sets the "status" field. -func (m *DeviceTokenMutation) SetStatus(s string) { - m.status = &s +// SetDeviceCode sets the "device_code" field. +func (m *DeviceRequestMutation) SetDeviceCode(s string) { + m.device_code = &s } -// Status returns the value of the "status" field in the mutation. -func (m *DeviceTokenMutation) Status() (r string, exists bool) { - v := m.status +// DeviceCode returns the value of the "device_code" field in the mutation. +func (m *DeviceRequestMutation) DeviceCode() (r string, exists bool) { + v := m.device_code if v == nil { return } return *v, true } -// OldStatus returns the old "status" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// OldDeviceCode returns the old "device_code" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldStatus(ctx context.Context) (v string, err error) { +func (m *DeviceRequestMutation) OldDeviceCode(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldStatus is only allowed on UpdateOne operations") + return v, errors.New("OldDeviceCode is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldStatus requires an ID field in the mutation") + return v, errors.New("OldDeviceCode requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldStatus: %w", err) + return v, fmt.Errorf("querying old value for OldDeviceCode: %w", err) } - return oldValue.Status, nil + return oldValue.DeviceCode, nil } -// ResetStatus resets all changes to the "status" field. -func (m *DeviceTokenMutation) ResetStatus() { - m.status = nil +// ResetDeviceCode resets all changes to the "device_code" field. +func (m *DeviceRequestMutation) ResetDeviceCode() { + m.device_code = nil } -// SetToken sets the "token" field. -func (m *DeviceTokenMutation) SetToken(b []byte) { - m.token = &b +// SetClientID sets the "client_id" field. +func (m *DeviceRequestMutation) SetClientID(s string) { + m.client_id = &s } -// Token returns the value of the "token" field in the mutation. -func (m *DeviceTokenMutation) Token() (r []byte, exists bool) { - v := m.token +// ClientID returns the value of the "client_id" field in the mutation. +func (m *DeviceRequestMutation) ClientID() (r string, exists bool) { + v := m.client_id if v == nil { return } return *v, true } -// OldToken returns the old "token" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// OldClientID returns the old "client_id" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldToken(ctx context.Context) (v *[]byte, err error) { +func (m *DeviceRequestMutation) OldClientID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldToken is only allowed on UpdateOne operations") + return v, errors.New("OldClientID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldToken requires an ID field in the mutation") + return v, errors.New("OldClientID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldToken: %w", err) + return v, fmt.Errorf("querying old value for OldClientID: %w", err) } - return oldValue.Token, nil -} - -// ClearToken clears the value of the "token" field. -func (m *DeviceTokenMutation) ClearToken() { - m.token = nil - m.clearedFields[devicetoken.FieldToken] = struct{}{} -} - -// TokenCleared returns if the "token" field was cleared in this mutation. -func (m *DeviceTokenMutation) TokenCleared() bool { - _, ok := m.clearedFields[devicetoken.FieldToken] - return ok + return oldValue.ClientID, nil } -// ResetToken resets all changes to the "token" field. -func (m *DeviceTokenMutation) ResetToken() { - m.token = nil - delete(m.clearedFields, devicetoken.FieldToken) +// ResetClientID resets all changes to the "client_id" field. +func (m *DeviceRequestMutation) ResetClientID() { + m.client_id = nil } -// SetExpiry sets the "expiry" field. -func (m *DeviceTokenMutation) SetExpiry(t time.Time) { - m.expiry = &t +// SetClientSecret sets the "client_secret" field. +func (m *DeviceRequestMutation) SetClientSecret(s string) { + m.client_secret = &s } -// Expiry returns the value of the "expiry" field in the mutation. -func (m *DeviceTokenMutation) Expiry() (r time.Time, exists bool) { - v := m.expiry +// ClientSecret returns the value of the "client_secret" field in the mutation. +func (m *DeviceRequestMutation) ClientSecret() (r string, exists bool) { + v := m.client_secret if v == nil { return } return *v, true } -// OldExpiry returns the old "expiry" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// OldClientSecret returns the old "client_secret" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { +func (m *DeviceRequestMutation) OldClientSecret(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldExpiry is only allowed on UpdateOne operations") + return v, errors.New("OldClientSecret is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldExpiry requires an ID field in the mutation") + return v, errors.New("OldClientSecret requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldExpiry: %w", err) + return v, fmt.Errorf("querying old value for OldClientSecret: %w", err) } - return oldValue.Expiry, nil + return oldValue.ClientSecret, nil } -// ResetExpiry resets all changes to the "expiry" field. -func (m *DeviceTokenMutation) ResetExpiry() { - m.expiry = nil +// ResetClientSecret resets all changes to the "client_secret" field. +func (m *DeviceRequestMutation) ResetClientSecret() { + m.client_secret = nil } -// SetLastRequest sets the "last_request" field. -func (m *DeviceTokenMutation) SetLastRequest(t time.Time) { - m.last_request = &t +// SetScopes sets the "scopes" field. +func (m *DeviceRequestMutation) SetScopes(s []string) { + m.scopes = &s + m.appendscopes = nil } -// LastRequest returns the value of the "last_request" field in the mutation. -func (m *DeviceTokenMutation) LastRequest() (r time.Time, exists bool) { - v := m.last_request +// Scopes returns the value of the "scopes" field in the mutation. +func (m *DeviceRequestMutation) Scopes() (r []string, exists bool) { + v := m.scopes if v == nil { return } return *v, true } -// OldLastRequest returns the old "last_request" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// OldScopes returns the old "scopes" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldLastRequest(ctx context.Context) (v time.Time, err error) { +func (m *DeviceRequestMutation) OldScopes(ctx context.Context) (v []string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldLastRequest is only allowed on UpdateOne operations") + return v, errors.New("OldScopes is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldLastRequest requires an ID field in the mutation") + return v, errors.New("OldScopes requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldLastRequest: %w", err) + return v, fmt.Errorf("querying old value for OldScopes: %w", err) } - return oldValue.LastRequest, nil -} - -// ResetLastRequest resets all changes to the "last_request" field. -func (m *DeviceTokenMutation) ResetLastRequest() { - m.last_request = nil -} - -// SetPollInterval sets the "poll_interval" field. -func (m *DeviceTokenMutation) SetPollInterval(i int) { - m.poll_interval = &i - m.addpoll_interval = nil + return oldValue.Scopes, nil } -// PollInterval returns the value of the "poll_interval" field in the mutation. -func (m *DeviceTokenMutation) PollInterval() (r int, exists bool) { - v := m.poll_interval - if v == nil { - return - } - return *v, true +// AppendScopes adds s to the "scopes" field. +func (m *DeviceRequestMutation) AppendScopes(s []string) { + m.appendscopes = append(m.appendscopes, s...) } -// OldPollInterval returns the old "poll_interval" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldPollInterval(ctx context.Context) (v int, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPollInterval is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPollInterval requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPollInterval: %w", err) +// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. +func (m *DeviceRequestMutation) AppendedScopes() ([]string, bool) { + if len(m.appendscopes) == 0 { + return nil, false } - return oldValue.PollInterval, nil + return m.appendscopes, true } -// AddPollInterval adds i to the "poll_interval" field. -func (m *DeviceTokenMutation) AddPollInterval(i int) { - if m.addpoll_interval != nil { - *m.addpoll_interval += i - } else { - m.addpoll_interval = &i - } +// ClearScopes clears the value of the "scopes" field. +func (m *DeviceRequestMutation) ClearScopes() { + m.scopes = nil + m.appendscopes = nil + m.clearedFields[devicerequest.FieldScopes] = struct{}{} } -// AddedPollInterval returns the value that was added to the "poll_interval" field in this mutation. -func (m *DeviceTokenMutation) AddedPollInterval() (r int, exists bool) { - v := m.addpoll_interval - if v == nil { - return - } - return *v, true +// ScopesCleared returns if the "scopes" field was cleared in this mutation. +func (m *DeviceRequestMutation) ScopesCleared() bool { + _, ok := m.clearedFields[devicerequest.FieldScopes] + return ok } -// ResetPollInterval resets all changes to the "poll_interval" field. -func (m *DeviceTokenMutation) ResetPollInterval() { - m.poll_interval = nil - m.addpoll_interval = nil +// ResetScopes resets all changes to the "scopes" field. +func (m *DeviceRequestMutation) ResetScopes() { + m.scopes = nil + m.appendscopes = nil + delete(m.clearedFields, devicerequest.FieldScopes) } -// SetCodeChallenge sets the "code_challenge" field. -func (m *DeviceTokenMutation) SetCodeChallenge(s string) { - m.code_challenge = &s +// SetExpiry sets the "expiry" field. +func (m *DeviceRequestMutation) SetExpiry(t time.Time) { + m.expiry = &t } -// CodeChallenge returns the value of the "code_challenge" field in the mutation. -func (m *DeviceTokenMutation) CodeChallenge() (r string, exists bool) { - v := m.code_challenge +// Expiry returns the value of the "expiry" field in the mutation. +func (m *DeviceRequestMutation) Expiry() (r time.Time, exists bool) { + v := m.expiry if v == nil { return } return *v, true } -// OldCodeChallenge returns the old "code_challenge" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// OldExpiry returns the old "expiry" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldCodeChallenge(ctx context.Context) (v string, err error) { +func (m *DeviceRequestMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCodeChallenge is only allowed on UpdateOne operations") + return v, errors.New("OldExpiry is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCodeChallenge requires an ID field in the mutation") + return v, errors.New("OldExpiry requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldCodeChallenge: %w", err) + return v, fmt.Errorf("querying old value for OldExpiry: %w", err) } - return oldValue.CodeChallenge, nil + return oldValue.Expiry, nil } -// ResetCodeChallenge resets all changes to the "code_challenge" field. -func (m *DeviceTokenMutation) ResetCodeChallenge() { - m.code_challenge = nil +// ResetExpiry resets all changes to the "expiry" field. +func (m *DeviceRequestMutation) ResetExpiry() { + m.expiry = nil } -// SetCodeChallengeMethod sets the "code_challenge_method" field. -func (m *DeviceTokenMutation) SetCodeChallengeMethod(s string) { - m.code_challenge_method = &s -} - -// CodeChallengeMethod returns the value of the "code_challenge_method" field in the mutation. -func (m *DeviceTokenMutation) CodeChallengeMethod() (r string, exists bool) { - v := m.code_challenge_method - if v == nil { - return - } - return *v, true -} - -// OldCodeChallengeMethod returns the old "code_challenge_method" field's value of the DeviceToken entity. -// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DeviceTokenMutation) OldCodeChallengeMethod(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCodeChallengeMethod is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCodeChallengeMethod requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldCodeChallengeMethod: %w", err) - } - return oldValue.CodeChallengeMethod, nil -} - -// ResetCodeChallengeMethod resets all changes to the "code_challenge_method" field. -func (m *DeviceTokenMutation) ResetCodeChallengeMethod() { - m.code_challenge_method = nil -} - -// Where appends a list predicates to the DeviceTokenMutation builder. -func (m *DeviceTokenMutation) Where(ps ...predicate.DeviceToken) { +// Where appends a list predicates to the DeviceRequestMutation builder. +func (m *DeviceRequestMutation) Where(ps ...predicate.DeviceRequest) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the DeviceTokenMutation builder. Using this method, +// WhereP appends storage-level predicates to the DeviceRequestMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *DeviceTokenMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.DeviceToken, len(ps)) +func (m *DeviceRequestMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DeviceRequest, len(ps)) for i := range ps { p[i] = ps[i] } @@ -4302,48 +4234,42 @@ func (m *DeviceTokenMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *DeviceTokenMutation) Op() Op { +func (m *DeviceRequestMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *DeviceTokenMutation) SetOp(op Op) { +func (m *DeviceRequestMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (DeviceToken). -func (m *DeviceTokenMutation) Type() string { +// Type returns the node type of this mutation (DeviceRequest). +func (m *DeviceRequestMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *DeviceTokenMutation) Fields() []string { - fields := make([]string, 0, 8) - if m.device_code != nil { - fields = append(fields, devicetoken.FieldDeviceCode) - } - if m.status != nil { - fields = append(fields, devicetoken.FieldStatus) - } - if m.token != nil { - fields = append(fields, devicetoken.FieldToken) +func (m *DeviceRequestMutation) Fields() []string { + fields := make([]string, 0, 6) + if m.user_code != nil { + fields = append(fields, devicerequest.FieldUserCode) } - if m.expiry != nil { - fields = append(fields, devicetoken.FieldExpiry) + if m.device_code != nil { + fields = append(fields, devicerequest.FieldDeviceCode) } - if m.last_request != nil { - fields = append(fields, devicetoken.FieldLastRequest) + if m.client_id != nil { + fields = append(fields, devicerequest.FieldClientID) } - if m.poll_interval != nil { - fields = append(fields, devicetoken.FieldPollInterval) + if m.client_secret != nil { + fields = append(fields, devicerequest.FieldClientSecret) } - if m.code_challenge != nil { - fields = append(fields, devicetoken.FieldCodeChallenge) + if m.scopes != nil { + fields = append(fields, devicerequest.FieldScopes) } - if m.code_challenge_method != nil { - fields = append(fields, devicetoken.FieldCodeChallengeMethod) + if m.expiry != nil { + fields = append(fields, devicerequest.FieldExpiry) } return fields } @@ -4351,24 +4277,20 @@ func (m *DeviceTokenMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *DeviceTokenMutation) Field(name string) (ent.Value, bool) { +func (m *DeviceRequestMutation) Field(name string) (ent.Value, bool) { switch name { - case devicetoken.FieldDeviceCode: + case devicerequest.FieldUserCode: + return m.UserCode() + case devicerequest.FieldDeviceCode: return m.DeviceCode() - case devicetoken.FieldStatus: - return m.Status() - case devicetoken.FieldToken: - return m.Token() - case devicetoken.FieldExpiry: + case devicerequest.FieldClientID: + return m.ClientID() + case devicerequest.FieldClientSecret: + return m.ClientSecret() + case devicerequest.FieldScopes: + return m.Scopes() + case devicerequest.FieldExpiry: return m.Expiry() - case devicetoken.FieldLastRequest: - return m.LastRequest() - case devicetoken.FieldPollInterval: - return m.PollInterval() - case devicetoken.FieldCodeChallenge: - return m.CodeChallenge() - case devicetoken.FieldCodeChallengeMethod: - return m.CodeChallengeMethod() } return nil, false } @@ -4376,266 +4298,231 @@ func (m *DeviceTokenMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *DeviceTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *DeviceRequestMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case devicetoken.FieldDeviceCode: + case devicerequest.FieldUserCode: + return m.OldUserCode(ctx) + case devicerequest.FieldDeviceCode: return m.OldDeviceCode(ctx) - case devicetoken.FieldStatus: - return m.OldStatus(ctx) - case devicetoken.FieldToken: - return m.OldToken(ctx) - case devicetoken.FieldExpiry: + case devicerequest.FieldClientID: + return m.OldClientID(ctx) + case devicerequest.FieldClientSecret: + return m.OldClientSecret(ctx) + case devicerequest.FieldScopes: + return m.OldScopes(ctx) + case devicerequest.FieldExpiry: return m.OldExpiry(ctx) - case devicetoken.FieldLastRequest: - return m.OldLastRequest(ctx) - case devicetoken.FieldPollInterval: - return m.OldPollInterval(ctx) - case devicetoken.FieldCodeChallenge: - return m.OldCodeChallenge(ctx) - case devicetoken.FieldCodeChallengeMethod: - return m.OldCodeChallengeMethod(ctx) } - return nil, fmt.Errorf("unknown DeviceToken field %s", name) + return nil, fmt.Errorf("unknown DeviceRequest field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *DeviceTokenMutation) SetField(name string, value ent.Value) error { +func (m *DeviceRequestMutation) SetField(name string, value ent.Value) error { switch name { - case devicetoken.FieldDeviceCode: + case devicerequest.FieldUserCode: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetDeviceCode(v) + m.SetUserCode(v) return nil - case devicetoken.FieldStatus: + case devicerequest.FieldDeviceCode: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetStatus(v) - return nil - case devicetoken.FieldToken: - v, ok := value.([]byte) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetToken(v) - return nil - case devicetoken.FieldExpiry: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetExpiry(v) + m.SetDeviceCode(v) return nil - case devicetoken.FieldLastRequest: - v, ok := value.(time.Time) + case devicerequest.FieldClientID: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetLastRequest(v) + m.SetClientID(v) return nil - case devicetoken.FieldPollInterval: - v, ok := value.(int) + case devicerequest.FieldClientSecret: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetPollInterval(v) + m.SetClientSecret(v) return nil - case devicetoken.FieldCodeChallenge: - v, ok := value.(string) + case devicerequest.FieldScopes: + v, ok := value.([]string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCodeChallenge(v) + m.SetScopes(v) return nil - case devicetoken.FieldCodeChallengeMethod: - v, ok := value.(string) + case devicerequest.FieldExpiry: + v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCodeChallengeMethod(v) + m.SetExpiry(v) return nil } - return fmt.Errorf("unknown DeviceToken field %s", name) + return fmt.Errorf("unknown DeviceRequest field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *DeviceTokenMutation) AddedFields() []string { - var fields []string - if m.addpoll_interval != nil { - fields = append(fields, devicetoken.FieldPollInterval) - } - return fields +func (m *DeviceRequestMutation) AddedFields() []string { + return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *DeviceTokenMutation) AddedField(name string) (ent.Value, bool) { - switch name { - case devicetoken.FieldPollInterval: - return m.AddedPollInterval() - } +func (m *DeviceRequestMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *DeviceTokenMutation) AddField(name string, value ent.Value) error { +func (m *DeviceRequestMutation) AddField(name string, value ent.Value) error { switch name { - case devicetoken.FieldPollInterval: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.AddPollInterval(v) - return nil } - return fmt.Errorf("unknown DeviceToken numeric field %s", name) + return fmt.Errorf("unknown DeviceRequest numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *DeviceTokenMutation) ClearedFields() []string { +func (m *DeviceRequestMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(devicetoken.FieldToken) { - fields = append(fields, devicetoken.FieldToken) + if m.FieldCleared(devicerequest.FieldScopes) { + fields = append(fields, devicerequest.FieldScopes) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *DeviceTokenMutation) FieldCleared(name string) bool { +func (m *DeviceRequestMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *DeviceTokenMutation) ClearField(name string) error { +func (m *DeviceRequestMutation) ClearField(name string) error { switch name { - case devicetoken.FieldToken: - m.ClearToken() + case devicerequest.FieldScopes: + m.ClearScopes() return nil } - return fmt.Errorf("unknown DeviceToken nullable field %s", name) + return fmt.Errorf("unknown DeviceRequest nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *DeviceTokenMutation) ResetField(name string) error { +func (m *DeviceRequestMutation) ResetField(name string) error { switch name { - case devicetoken.FieldDeviceCode: - m.ResetDeviceCode() + case devicerequest.FieldUserCode: + m.ResetUserCode() return nil - case devicetoken.FieldStatus: - m.ResetStatus() + case devicerequest.FieldDeviceCode: + m.ResetDeviceCode() return nil - case devicetoken.FieldToken: - m.ResetToken() + case devicerequest.FieldClientID: + m.ResetClientID() return nil - case devicetoken.FieldExpiry: - m.ResetExpiry() + case devicerequest.FieldClientSecret: + m.ResetClientSecret() return nil - case devicetoken.FieldLastRequest: - m.ResetLastRequest() + case devicerequest.FieldScopes: + m.ResetScopes() return nil - case devicetoken.FieldPollInterval: - m.ResetPollInterval() - return nil - case devicetoken.FieldCodeChallenge: - m.ResetCodeChallenge() - return nil - case devicetoken.FieldCodeChallengeMethod: - m.ResetCodeChallengeMethod() + case devicerequest.FieldExpiry: + m.ResetExpiry() return nil } - return fmt.Errorf("unknown DeviceToken field %s", name) + return fmt.Errorf("unknown DeviceRequest field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *DeviceTokenMutation) AddedEdges() []string { +func (m *DeviceRequestMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *DeviceTokenMutation) AddedIDs(name string) []ent.Value { +func (m *DeviceRequestMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *DeviceTokenMutation) RemovedEdges() []string { +func (m *DeviceRequestMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *DeviceTokenMutation) RemovedIDs(name string) []ent.Value { +func (m *DeviceRequestMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *DeviceTokenMutation) ClearedEdges() []string { +func (m *DeviceRequestMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *DeviceTokenMutation) EdgeCleared(name string) bool { +func (m *DeviceRequestMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *DeviceTokenMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown DeviceToken unique edge %s", name) +func (m *DeviceRequestMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown DeviceRequest unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *DeviceTokenMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown DeviceToken edge %s", name) +func (m *DeviceRequestMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown DeviceRequest edge %s", name) } -// KeysMutation represents an operation that mutates the Keys nodes in the graph. -type KeysMutation struct { +// DeviceTokenMutation represents an operation that mutates the DeviceToken nodes in the graph. +type DeviceTokenMutation struct { config - op Op - typ string - id *string - verification_keys *[]storage.VerificationKey - appendverification_keys []storage.VerificationKey - signing_key *jose.JSONWebKey - signing_key_pub *jose.JSONWebKey - next_rotation *time.Time - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*Keys, error) - predicates []predicate.Keys + op Op + typ string + id *int + device_code *string + status *string + token *[]byte + expiry *time.Time + last_request *time.Time + poll_interval *int + addpoll_interval *int + code_challenge *string + code_challenge_method *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*DeviceToken, error) + predicates []predicate.DeviceToken } -var _ ent.Mutation = (*KeysMutation)(nil) +var _ ent.Mutation = (*DeviceTokenMutation)(nil) -// keysOption allows management of the mutation configuration using functional options. -type keysOption func(*KeysMutation) +// devicetokenOption allows management of the mutation configuration using functional options. +type devicetokenOption func(*DeviceTokenMutation) -// newKeysMutation creates new mutation for the Keys entity. -func newKeysMutation(c config, op Op, opts ...keysOption) *KeysMutation { - m := &KeysMutation{ +// newDeviceTokenMutation creates new mutation for the DeviceToken entity. +func newDeviceTokenMutation(c config, op Op, opts ...devicetokenOption) *DeviceTokenMutation { + m := &DeviceTokenMutation{ config: c, op: op, - typ: TypeKeys, + typ: TypeDeviceToken, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -4644,20 +4531,20 @@ func newKeysMutation(c config, op Op, opts ...keysOption) *KeysMutation { return m } -// withKeysID sets the ID field of the mutation. -func withKeysID(id string) keysOption { - return func(m *KeysMutation) { +// withDeviceTokenID sets the ID field of the mutation. +func withDeviceTokenID(id int) devicetokenOption { + return func(m *DeviceTokenMutation) { var ( err error once sync.Once - value *Keys + value *DeviceToken ) - m.oldValue = func(ctx context.Context) (*Keys, error) { + m.oldValue = func(ctx context.Context) (*DeviceToken, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().Keys.Get(ctx, id) + value, err = m.Client().DeviceToken.Get(ctx, id) } }) return value, err @@ -4666,10 +4553,10 @@ func withKeysID(id string) keysOption { } } -// withKeys sets the old Keys of the mutation. -func withKeys(node *Keys) keysOption { - return func(m *KeysMutation) { - m.oldValue = func(context.Context) (*Keys, error) { +// withDeviceToken sets the old DeviceToken of the mutation. +func withDeviceToken(node *DeviceToken) devicetokenOption { + return func(m *DeviceTokenMutation) { + m.oldValue = func(context.Context) (*DeviceToken, error) { return node, nil } m.id = &node.ID @@ -4678,7 +4565,7 @@ func withKeys(node *Keys) keysOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m KeysMutation) Client() *Client { +func (m DeviceTokenMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -4686,7 +4573,7 @@ func (m KeysMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m KeysMutation) Tx() (*Tx, error) { +func (m DeviceTokenMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -4695,15 +4582,9 @@ func (m KeysMutation) Tx() (*Tx, error) { return tx, nil } -// SetID sets the value of the id field. Note that this -// operation is only accepted on creation of Keys entities. -func (m *KeysMutation) SetID(id string) { - m.id = &id -} - // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *KeysMutation) ID() (id string, exists bool) { +func (m *DeviceTokenMutation) ID() (id int, exists bool) { if m.id == nil { return } @@ -4714,189 +4595,351 @@ func (m *KeysMutation) ID() (id string, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *KeysMutation) IDs(ctx context.Context) ([]string, error) { +func (m *DeviceTokenMutation) IDs(ctx context.Context) ([]int, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() if exists { - return []string{id}, nil + return []int{id}, nil } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().Keys.Query().Where(m.predicates...).IDs(ctx) + return m.Client().DeviceToken.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetVerificationKeys sets the "verification_keys" field. -func (m *KeysMutation) SetVerificationKeys(sk []storage.VerificationKey) { - m.verification_keys = &sk - m.appendverification_keys = nil +// SetDeviceCode sets the "device_code" field. +func (m *DeviceTokenMutation) SetDeviceCode(s string) { + m.device_code = &s } -// VerificationKeys returns the value of the "verification_keys" field in the mutation. -func (m *KeysMutation) VerificationKeys() (r []storage.VerificationKey, exists bool) { - v := m.verification_keys +// DeviceCode returns the value of the "device_code" field in the mutation. +func (m *DeviceTokenMutation) DeviceCode() (r string, exists bool) { + v := m.device_code if v == nil { return } return *v, true } -// OldVerificationKeys returns the old "verification_keys" field's value of the Keys entity. -// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// OldDeviceCode returns the old "device_code" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *KeysMutation) OldVerificationKeys(ctx context.Context) (v []storage.VerificationKey, err error) { +func (m *DeviceTokenMutation) OldDeviceCode(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldVerificationKeys is only allowed on UpdateOne operations") + return v, errors.New("OldDeviceCode is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldVerificationKeys requires an ID field in the mutation") + return v, errors.New("OldDeviceCode requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldVerificationKeys: %w", err) - } - return oldValue.VerificationKeys, nil -} - -// AppendVerificationKeys adds sk to the "verification_keys" field. -func (m *KeysMutation) AppendVerificationKeys(sk []storage.VerificationKey) { - m.appendverification_keys = append(m.appendverification_keys, sk...) -} - -// AppendedVerificationKeys returns the list of values that were appended to the "verification_keys" field in this mutation. -func (m *KeysMutation) AppendedVerificationKeys() ([]storage.VerificationKey, bool) { - if len(m.appendverification_keys) == 0 { - return nil, false + return v, fmt.Errorf("querying old value for OldDeviceCode: %w", err) } - return m.appendverification_keys, true + return oldValue.DeviceCode, nil } -// ResetVerificationKeys resets all changes to the "verification_keys" field. -func (m *KeysMutation) ResetVerificationKeys() { - m.verification_keys = nil - m.appendverification_keys = nil +// ResetDeviceCode resets all changes to the "device_code" field. +func (m *DeviceTokenMutation) ResetDeviceCode() { + m.device_code = nil } -// SetSigningKey sets the "signing_key" field. -func (m *KeysMutation) SetSigningKey(jwk jose.JSONWebKey) { - m.signing_key = &jwk +// SetStatus sets the "status" field. +func (m *DeviceTokenMutation) SetStatus(s string) { + m.status = &s } -// SigningKey returns the value of the "signing_key" field in the mutation. -func (m *KeysMutation) SigningKey() (r jose.JSONWebKey, exists bool) { - v := m.signing_key +// Status returns the value of the "status" field in the mutation. +func (m *DeviceTokenMutation) Status() (r string, exists bool) { + v := m.status if v == nil { return } return *v, true } -// OldSigningKey returns the old "signing_key" field's value of the Keys entity. -// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// OldStatus returns the old "status" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *KeysMutation) OldSigningKey(ctx context.Context) (v jose.JSONWebKey, err error) { +func (m *DeviceTokenMutation) OldStatus(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSigningKey is only allowed on UpdateOne operations") + return v, errors.New("OldStatus is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSigningKey requires an ID field in the mutation") + return v, errors.New("OldStatus requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldSigningKey: %w", err) + return v, fmt.Errorf("querying old value for OldStatus: %w", err) } - return oldValue.SigningKey, nil + return oldValue.Status, nil } -// ResetSigningKey resets all changes to the "signing_key" field. -func (m *KeysMutation) ResetSigningKey() { - m.signing_key = nil +// ResetStatus resets all changes to the "status" field. +func (m *DeviceTokenMutation) ResetStatus() { + m.status = nil } -// SetSigningKeyPub sets the "signing_key_pub" field. -func (m *KeysMutation) SetSigningKeyPub(jwk jose.JSONWebKey) { - m.signing_key_pub = &jwk +// SetToken sets the "token" field. +func (m *DeviceTokenMutation) SetToken(b []byte) { + m.token = &b } -// SigningKeyPub returns the value of the "signing_key_pub" field in the mutation. -func (m *KeysMutation) SigningKeyPub() (r jose.JSONWebKey, exists bool) { - v := m.signing_key_pub +// Token returns the value of the "token" field in the mutation. +func (m *DeviceTokenMutation) Token() (r []byte, exists bool) { + v := m.token if v == nil { return } return *v, true } -// OldSigningKeyPub returns the old "signing_key_pub" field's value of the Keys entity. -// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// OldToken returns the old "token" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *KeysMutation) OldSigningKeyPub(ctx context.Context) (v jose.JSONWebKey, err error) { +func (m *DeviceTokenMutation) OldToken(ctx context.Context) (v *[]byte, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSigningKeyPub is only allowed on UpdateOne operations") + return v, errors.New("OldToken is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSigningKeyPub requires an ID field in the mutation") + return v, errors.New("OldToken requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldSigningKeyPub: %w", err) + return v, fmt.Errorf("querying old value for OldToken: %w", err) } - return oldValue.SigningKeyPub, nil + return oldValue.Token, nil } -// ResetSigningKeyPub resets all changes to the "signing_key_pub" field. -func (m *KeysMutation) ResetSigningKeyPub() { - m.signing_key_pub = nil +// ClearToken clears the value of the "token" field. +func (m *DeviceTokenMutation) ClearToken() { + m.token = nil + m.clearedFields[devicetoken.FieldToken] = struct{}{} } -// SetNextRotation sets the "next_rotation" field. -func (m *KeysMutation) SetNextRotation(t time.Time) { - m.next_rotation = &t +// TokenCleared returns if the "token" field was cleared in this mutation. +func (m *DeviceTokenMutation) TokenCleared() bool { + _, ok := m.clearedFields[devicetoken.FieldToken] + return ok } -// NextRotation returns the value of the "next_rotation" field in the mutation. -func (m *KeysMutation) NextRotation() (r time.Time, exists bool) { - v := m.next_rotation +// ResetToken resets all changes to the "token" field. +func (m *DeviceTokenMutation) ResetToken() { + m.token = nil + delete(m.clearedFields, devicetoken.FieldToken) +} + +// SetExpiry sets the "expiry" field. +func (m *DeviceTokenMutation) SetExpiry(t time.Time) { + m.expiry = &t +} + +// Expiry returns the value of the "expiry" field in the mutation. +func (m *DeviceTokenMutation) Expiry() (r time.Time, exists bool) { + v := m.expiry if v == nil { return } return *v, true } -// OldNextRotation returns the old "next_rotation" field's value of the Keys entity. -// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// OldExpiry returns the old "expiry" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *KeysMutation) OldNextRotation(ctx context.Context) (v time.Time, err error) { +func (m *DeviceTokenMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldNextRotation is only allowed on UpdateOne operations") + return v, errors.New("OldExpiry is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldNextRotation requires an ID field in the mutation") + return v, errors.New("OldExpiry requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldNextRotation: %w", err) + return v, fmt.Errorf("querying old value for OldExpiry: %w", err) } - return oldValue.NextRotation, nil + return oldValue.Expiry, nil } -// ResetNextRotation resets all changes to the "next_rotation" field. -func (m *KeysMutation) ResetNextRotation() { - m.next_rotation = nil +// ResetExpiry resets all changes to the "expiry" field. +func (m *DeviceTokenMutation) ResetExpiry() { + m.expiry = nil } -// Where appends a list predicates to the KeysMutation builder. -func (m *KeysMutation) Where(ps ...predicate.Keys) { +// SetLastRequest sets the "last_request" field. +func (m *DeviceTokenMutation) SetLastRequest(t time.Time) { + m.last_request = &t +} + +// LastRequest returns the value of the "last_request" field in the mutation. +func (m *DeviceTokenMutation) LastRequest() (r time.Time, exists bool) { + v := m.last_request + if v == nil { + return + } + return *v, true +} + +// OldLastRequest returns the old "last_request" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldLastRequest(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastRequest is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastRequest requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastRequest: %w", err) + } + return oldValue.LastRequest, nil +} + +// ResetLastRequest resets all changes to the "last_request" field. +func (m *DeviceTokenMutation) ResetLastRequest() { + m.last_request = nil +} + +// SetPollInterval sets the "poll_interval" field. +func (m *DeviceTokenMutation) SetPollInterval(i int) { + m.poll_interval = &i + m.addpoll_interval = nil +} + +// PollInterval returns the value of the "poll_interval" field in the mutation. +func (m *DeviceTokenMutation) PollInterval() (r int, exists bool) { + v := m.poll_interval + if v == nil { + return + } + return *v, true +} + +// OldPollInterval returns the old "poll_interval" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldPollInterval(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPollInterval is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPollInterval requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPollInterval: %w", err) + } + return oldValue.PollInterval, nil +} + +// AddPollInterval adds i to the "poll_interval" field. +func (m *DeviceTokenMutation) AddPollInterval(i int) { + if m.addpoll_interval != nil { + *m.addpoll_interval += i + } else { + m.addpoll_interval = &i + } +} + +// AddedPollInterval returns the value that was added to the "poll_interval" field in this mutation. +func (m *DeviceTokenMutation) AddedPollInterval() (r int, exists bool) { + v := m.addpoll_interval + if v == nil { + return + } + return *v, true +} + +// ResetPollInterval resets all changes to the "poll_interval" field. +func (m *DeviceTokenMutation) ResetPollInterval() { + m.poll_interval = nil + m.addpoll_interval = nil +} + +// SetCodeChallenge sets the "code_challenge" field. +func (m *DeviceTokenMutation) SetCodeChallenge(s string) { + m.code_challenge = &s +} + +// CodeChallenge returns the value of the "code_challenge" field in the mutation. +func (m *DeviceTokenMutation) CodeChallenge() (r string, exists bool) { + v := m.code_challenge + if v == nil { + return + } + return *v, true +} + +// OldCodeChallenge returns the old "code_challenge" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldCodeChallenge(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallenge is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallenge requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallenge: %w", err) + } + return oldValue.CodeChallenge, nil +} + +// ResetCodeChallenge resets all changes to the "code_challenge" field. +func (m *DeviceTokenMutation) ResetCodeChallenge() { + m.code_challenge = nil +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (m *DeviceTokenMutation) SetCodeChallengeMethod(s string) { + m.code_challenge_method = &s +} + +// CodeChallengeMethod returns the value of the "code_challenge_method" field in the mutation. +func (m *DeviceTokenMutation) CodeChallengeMethod() (r string, exists bool) { + v := m.code_challenge_method + if v == nil { + return + } + return *v, true +} + +// OldCodeChallengeMethod returns the old "code_challenge_method" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldCodeChallengeMethod(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallengeMethod is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallengeMethod requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallengeMethod: %w", err) + } + return oldValue.CodeChallengeMethod, nil +} + +// ResetCodeChallengeMethod resets all changes to the "code_challenge_method" field. +func (m *DeviceTokenMutation) ResetCodeChallengeMethod() { + m.code_challenge_method = nil +} + +// Where appends a list predicates to the DeviceTokenMutation builder. +func (m *DeviceTokenMutation) Where(ps ...predicate.DeviceToken) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the KeysMutation builder. Using this method, +// WhereP appends storage-level predicates to the DeviceTokenMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *KeysMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.Keys, len(ps)) +func (m *DeviceTokenMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DeviceToken, len(ps)) for i := range ps { p[i] = ps[i] } @@ -4904,36 +4947,48 @@ func (m *KeysMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *KeysMutation) Op() Op { +func (m *DeviceTokenMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *KeysMutation) SetOp(op Op) { +func (m *DeviceTokenMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (Keys). -func (m *KeysMutation) Type() string { +// Type returns the node type of this mutation (DeviceToken). +func (m *DeviceTokenMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *KeysMutation) Fields() []string { - fields := make([]string, 0, 4) - if m.verification_keys != nil { - fields = append(fields, keys.FieldVerificationKeys) +func (m *DeviceTokenMutation) Fields() []string { + fields := make([]string, 0, 8) + if m.device_code != nil { + fields = append(fields, devicetoken.FieldDeviceCode) } - if m.signing_key != nil { - fields = append(fields, keys.FieldSigningKey) + if m.status != nil { + fields = append(fields, devicetoken.FieldStatus) } - if m.signing_key_pub != nil { - fields = append(fields, keys.FieldSigningKeyPub) + if m.token != nil { + fields = append(fields, devicetoken.FieldToken) } - if m.next_rotation != nil { - fields = append(fields, keys.FieldNextRotation) + if m.expiry != nil { + fields = append(fields, devicetoken.FieldExpiry) + } + if m.last_request != nil { + fields = append(fields, devicetoken.FieldLastRequest) + } + if m.poll_interval != nil { + fields = append(fields, devicetoken.FieldPollInterval) + } + if m.code_challenge != nil { + fields = append(fields, devicetoken.FieldCodeChallenge) + } + if m.code_challenge_method != nil { + fields = append(fields, devicetoken.FieldCodeChallengeMethod) } return fields } @@ -4941,16 +4996,24 @@ func (m *KeysMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *KeysMutation) Field(name string) (ent.Value, bool) { +func (m *DeviceTokenMutation) Field(name string) (ent.Value, bool) { switch name { - case keys.FieldVerificationKeys: - return m.VerificationKeys() - case keys.FieldSigningKey: - return m.SigningKey() - case keys.FieldSigningKeyPub: - return m.SigningKeyPub() - case keys.FieldNextRotation: - return m.NextRotation() + case devicetoken.FieldDeviceCode: + return m.DeviceCode() + case devicetoken.FieldStatus: + return m.Status() + case devicetoken.FieldToken: + return m.Token() + case devicetoken.FieldExpiry: + return m.Expiry() + case devicetoken.FieldLastRequest: + return m.LastRequest() + case devicetoken.FieldPollInterval: + return m.PollInterval() + case devicetoken.FieldCodeChallenge: + return m.CodeChallenge() + case devicetoken.FieldCodeChallengeMethod: + return m.CodeChallengeMethod() } return nil, false } @@ -4958,197 +5021,266 @@ func (m *KeysMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *KeysMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *DeviceTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case keys.FieldVerificationKeys: - return m.OldVerificationKeys(ctx) - case keys.FieldSigningKey: - return m.OldSigningKey(ctx) - case keys.FieldSigningKeyPub: - return m.OldSigningKeyPub(ctx) - case keys.FieldNextRotation: - return m.OldNextRotation(ctx) + case devicetoken.FieldDeviceCode: + return m.OldDeviceCode(ctx) + case devicetoken.FieldStatus: + return m.OldStatus(ctx) + case devicetoken.FieldToken: + return m.OldToken(ctx) + case devicetoken.FieldExpiry: + return m.OldExpiry(ctx) + case devicetoken.FieldLastRequest: + return m.OldLastRequest(ctx) + case devicetoken.FieldPollInterval: + return m.OldPollInterval(ctx) + case devicetoken.FieldCodeChallenge: + return m.OldCodeChallenge(ctx) + case devicetoken.FieldCodeChallengeMethod: + return m.OldCodeChallengeMethod(ctx) } - return nil, fmt.Errorf("unknown Keys field %s", name) + return nil, fmt.Errorf("unknown DeviceToken field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *KeysMutation) SetField(name string, value ent.Value) error { +func (m *DeviceTokenMutation) SetField(name string, value ent.Value) error { switch name { - case keys.FieldVerificationKeys: - v, ok := value.([]storage.VerificationKey) + case devicetoken.FieldDeviceCode: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetVerificationKeys(v) + m.SetDeviceCode(v) return nil - case keys.FieldSigningKey: - v, ok := value.(jose.JSONWebKey) + case devicetoken.FieldStatus: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetSigningKey(v) + m.SetStatus(v) return nil - case keys.FieldSigningKeyPub: - v, ok := value.(jose.JSONWebKey) + case devicetoken.FieldToken: + v, ok := value.([]byte) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetSigningKeyPub(v) + m.SetToken(v) return nil - case keys.FieldNextRotation: + case devicetoken.FieldExpiry: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetNextRotation(v) + m.SetExpiry(v) + return nil + case devicetoken.FieldLastRequest: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastRequest(v) + return nil + case devicetoken.FieldPollInterval: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPollInterval(v) + return nil + case devicetoken.FieldCodeChallenge: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallenge(v) + return nil + case devicetoken.FieldCodeChallengeMethod: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallengeMethod(v) return nil } - return fmt.Errorf("unknown Keys field %s", name) + return fmt.Errorf("unknown DeviceToken field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *KeysMutation) AddedFields() []string { - return nil +func (m *DeviceTokenMutation) AddedFields() []string { + var fields []string + if m.addpoll_interval != nil { + fields = append(fields, devicetoken.FieldPollInterval) + } + return fields } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *KeysMutation) AddedField(name string) (ent.Value, bool) { +func (m *DeviceTokenMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case devicetoken.FieldPollInterval: + return m.AddedPollInterval() + } return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *KeysMutation) AddField(name string, value ent.Value) error { +func (m *DeviceTokenMutation) AddField(name string, value ent.Value) error { switch name { + case devicetoken.FieldPollInterval: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPollInterval(v) + return nil } - return fmt.Errorf("unknown Keys numeric field %s", name) + return fmt.Errorf("unknown DeviceToken numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *KeysMutation) ClearedFields() []string { - return nil +func (m *DeviceTokenMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(devicetoken.FieldToken) { + fields = append(fields, devicetoken.FieldToken) + } + return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *KeysMutation) FieldCleared(name string) bool { +func (m *DeviceTokenMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *KeysMutation) ClearField(name string) error { - return fmt.Errorf("unknown Keys nullable field %s", name) +func (m *DeviceTokenMutation) ClearField(name string) error { + switch name { + case devicetoken.FieldToken: + m.ClearToken() + return nil + } + return fmt.Errorf("unknown DeviceToken nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *KeysMutation) ResetField(name string) error { +func (m *DeviceTokenMutation) ResetField(name string) error { switch name { - case keys.FieldVerificationKeys: - m.ResetVerificationKeys() + case devicetoken.FieldDeviceCode: + m.ResetDeviceCode() return nil - case keys.FieldSigningKey: - m.ResetSigningKey() + case devicetoken.FieldStatus: + m.ResetStatus() return nil - case keys.FieldSigningKeyPub: - m.ResetSigningKeyPub() + case devicetoken.FieldToken: + m.ResetToken() return nil - case keys.FieldNextRotation: - m.ResetNextRotation() + case devicetoken.FieldExpiry: + m.ResetExpiry() + return nil + case devicetoken.FieldLastRequest: + m.ResetLastRequest() + return nil + case devicetoken.FieldPollInterval: + m.ResetPollInterval() + return nil + case devicetoken.FieldCodeChallenge: + m.ResetCodeChallenge() + return nil + case devicetoken.FieldCodeChallengeMethod: + m.ResetCodeChallengeMethod() return nil } - return fmt.Errorf("unknown Keys field %s", name) + return fmt.Errorf("unknown DeviceToken field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *KeysMutation) AddedEdges() []string { +func (m *DeviceTokenMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *KeysMutation) AddedIDs(name string) []ent.Value { +func (m *DeviceTokenMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *KeysMutation) RemovedEdges() []string { +func (m *DeviceTokenMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *KeysMutation) RemovedIDs(name string) []ent.Value { +func (m *DeviceTokenMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *KeysMutation) ClearedEdges() []string { +func (m *DeviceTokenMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *KeysMutation) EdgeCleared(name string) bool { +func (m *DeviceTokenMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *KeysMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown Keys unique edge %s", name) +func (m *DeviceTokenMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown DeviceToken unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *KeysMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown Keys edge %s", name) +func (m *DeviceTokenMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown DeviceToken edge %s", name) } -// OAuth2ClientMutation represents an operation that mutates the OAuth2Client nodes in the graph. -type OAuth2ClientMutation struct { +// KeysMutation represents an operation that mutates the Keys nodes in the graph. +type KeysMutation struct { config - op Op - typ string - id *string - secret *string - redirect_uris *[]string - appendredirect_uris []string - trusted_peers *[]string - appendtrusted_peers []string - public *bool - name *string - logo_url *string - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*OAuth2Client, error) - predicates []predicate.OAuth2Client + op Op + typ string + id *string + verification_keys *[]storage.VerificationKey + appendverification_keys []storage.VerificationKey + signing_key *jose.JSONWebKey + signing_key_pub *jose.JSONWebKey + next_rotation *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Keys, error) + predicates []predicate.Keys } -var _ ent.Mutation = (*OAuth2ClientMutation)(nil) +var _ ent.Mutation = (*KeysMutation)(nil) -// oauth2clientOption allows management of the mutation configuration using functional options. -type oauth2clientOption func(*OAuth2ClientMutation) +// keysOption allows management of the mutation configuration using functional options. +type keysOption func(*KeysMutation) -// newOAuth2ClientMutation creates new mutation for the OAuth2Client entity. -func newOAuth2ClientMutation(c config, op Op, opts ...oauth2clientOption) *OAuth2ClientMutation { - m := &OAuth2ClientMutation{ +// newKeysMutation creates new mutation for the Keys entity. +func newKeysMutation(c config, op Op, opts ...keysOption) *KeysMutation { + m := &KeysMutation{ config: c, op: op, - typ: TypeOAuth2Client, + typ: TypeKeys, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -5157,20 +5289,20 @@ func newOAuth2ClientMutation(c config, op Op, opts ...oauth2clientOption) *OAuth return m } -// withOAuth2ClientID sets the ID field of the mutation. -func withOAuth2ClientID(id string) oauth2clientOption { - return func(m *OAuth2ClientMutation) { +// withKeysID sets the ID field of the mutation. +func withKeysID(id string) keysOption { + return func(m *KeysMutation) { var ( err error once sync.Once - value *OAuth2Client + value *Keys ) - m.oldValue = func(ctx context.Context) (*OAuth2Client, error) { + m.oldValue = func(ctx context.Context) (*Keys, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().OAuth2Client.Get(ctx, id) + value, err = m.Client().Keys.Get(ctx, id) } }) return value, err @@ -5179,10 +5311,10 @@ func withOAuth2ClientID(id string) oauth2clientOption { } } -// withOAuth2Client sets the old OAuth2Client of the mutation. -func withOAuth2Client(node *OAuth2Client) oauth2clientOption { - return func(m *OAuth2ClientMutation) { - m.oldValue = func(context.Context) (*OAuth2Client, error) { +// withKeys sets the old Keys of the mutation. +func withKeys(node *Keys) keysOption { + return func(m *KeysMutation) { + m.oldValue = func(context.Context) (*Keys, error) { return node, nil } m.id = &node.ID @@ -5191,7 +5323,7 @@ func withOAuth2Client(node *OAuth2Client) oauth2clientOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m OAuth2ClientMutation) Client() *Client { +func (m KeysMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -5199,7 +5331,7 @@ func (m OAuth2ClientMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m OAuth2ClientMutation) Tx() (*Tx, error) { +func (m KeysMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -5209,14 +5341,14 @@ func (m OAuth2ClientMutation) Tx() (*Tx, error) { } // SetID sets the value of the id field. Note that this -// operation is only accepted on creation of OAuth2Client entities. -func (m *OAuth2ClientMutation) SetID(id string) { +// operation is only accepted on creation of Keys entities. +func (m *KeysMutation) SetID(id string) { m.id = &id } // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *OAuth2ClientMutation) ID() (id string, exists bool) { +func (m *KeysMutation) ID() (id string, exists bool) { if m.id == nil { return } @@ -5227,7 +5359,7 @@ func (m *OAuth2ClientMutation) ID() (id string, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *OAuth2ClientMutation) IDs(ctx context.Context) ([]string, error) { +func (m *KeysMutation) IDs(ctx context.Context) ([]string, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -5236,295 +5368,180 @@ func (m *OAuth2ClientMutation) IDs(ctx context.Context) ([]string, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().OAuth2Client.Query().Where(m.predicates...).IDs(ctx) + return m.Client().Keys.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetSecret sets the "secret" field. -func (m *OAuth2ClientMutation) SetSecret(s string) { - m.secret = &s +// SetVerificationKeys sets the "verification_keys" field. +func (m *KeysMutation) SetVerificationKeys(sk []storage.VerificationKey) { + m.verification_keys = &sk + m.appendverification_keys = nil } -// Secret returns the value of the "secret" field in the mutation. -func (m *OAuth2ClientMutation) Secret() (r string, exists bool) { - v := m.secret +// VerificationKeys returns the value of the "verification_keys" field in the mutation. +func (m *KeysMutation) VerificationKeys() (r []storage.VerificationKey, exists bool) { + v := m.verification_keys if v == nil { return } return *v, true } -// OldSecret returns the old "secret" field's value of the OAuth2Client entity. -// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// OldVerificationKeys returns the old "verification_keys" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OAuth2ClientMutation) OldSecret(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSecret is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSecret requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldSecret: %w", err) - } - return oldValue.Secret, nil -} - -// ResetSecret resets all changes to the "secret" field. -func (m *OAuth2ClientMutation) ResetSecret() { - m.secret = nil -} - -// SetRedirectUris sets the "redirect_uris" field. -func (m *OAuth2ClientMutation) SetRedirectUris(s []string) { - m.redirect_uris = &s - m.appendredirect_uris = nil -} - -// RedirectUris returns the value of the "redirect_uris" field in the mutation. -func (m *OAuth2ClientMutation) RedirectUris() (r []string, exists bool) { - v := m.redirect_uris - if v == nil { - return - } - return *v, true -} - -// OldRedirectUris returns the old "redirect_uris" field's value of the OAuth2Client entity. -// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OAuth2ClientMutation) OldRedirectUris(ctx context.Context) (v []string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRedirectUris is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRedirectUris requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldRedirectUris: %w", err) - } - return oldValue.RedirectUris, nil -} - -// AppendRedirectUris adds s to the "redirect_uris" field. -func (m *OAuth2ClientMutation) AppendRedirectUris(s []string) { - m.appendredirect_uris = append(m.appendredirect_uris, s...) -} - -// AppendedRedirectUris returns the list of values that were appended to the "redirect_uris" field in this mutation. -func (m *OAuth2ClientMutation) AppendedRedirectUris() ([]string, bool) { - if len(m.appendredirect_uris) == 0 { - return nil, false - } - return m.appendredirect_uris, true -} - -// ClearRedirectUris clears the value of the "redirect_uris" field. -func (m *OAuth2ClientMutation) ClearRedirectUris() { - m.redirect_uris = nil - m.appendredirect_uris = nil - m.clearedFields[oauth2client.FieldRedirectUris] = struct{}{} -} - -// RedirectUrisCleared returns if the "redirect_uris" field was cleared in this mutation. -func (m *OAuth2ClientMutation) RedirectUrisCleared() bool { - _, ok := m.clearedFields[oauth2client.FieldRedirectUris] - return ok -} - -// ResetRedirectUris resets all changes to the "redirect_uris" field. -func (m *OAuth2ClientMutation) ResetRedirectUris() { - m.redirect_uris = nil - m.appendredirect_uris = nil - delete(m.clearedFields, oauth2client.FieldRedirectUris) -} - -// SetTrustedPeers sets the "trusted_peers" field. -func (m *OAuth2ClientMutation) SetTrustedPeers(s []string) { - m.trusted_peers = &s - m.appendtrusted_peers = nil -} - -// TrustedPeers returns the value of the "trusted_peers" field in the mutation. -func (m *OAuth2ClientMutation) TrustedPeers() (r []string, exists bool) { - v := m.trusted_peers - if v == nil { - return - } - return *v, true -} - -// OldTrustedPeers returns the old "trusted_peers" field's value of the OAuth2Client entity. -// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OAuth2ClientMutation) OldTrustedPeers(ctx context.Context) (v []string, err error) { +func (m *KeysMutation) OldVerificationKeys(ctx context.Context) (v []storage.VerificationKey, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldTrustedPeers is only allowed on UpdateOne operations") + return v, errors.New("OldVerificationKeys is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldTrustedPeers requires an ID field in the mutation") + return v, errors.New("OldVerificationKeys requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldTrustedPeers: %w", err) + return v, fmt.Errorf("querying old value for OldVerificationKeys: %w", err) } - return oldValue.TrustedPeers, nil + return oldValue.VerificationKeys, nil } -// AppendTrustedPeers adds s to the "trusted_peers" field. -func (m *OAuth2ClientMutation) AppendTrustedPeers(s []string) { - m.appendtrusted_peers = append(m.appendtrusted_peers, s...) +// AppendVerificationKeys adds sk to the "verification_keys" field. +func (m *KeysMutation) AppendVerificationKeys(sk []storage.VerificationKey) { + m.appendverification_keys = append(m.appendverification_keys, sk...) } -// AppendedTrustedPeers returns the list of values that were appended to the "trusted_peers" field in this mutation. -func (m *OAuth2ClientMutation) AppendedTrustedPeers() ([]string, bool) { - if len(m.appendtrusted_peers) == 0 { +// AppendedVerificationKeys returns the list of values that were appended to the "verification_keys" field in this mutation. +func (m *KeysMutation) AppendedVerificationKeys() ([]storage.VerificationKey, bool) { + if len(m.appendverification_keys) == 0 { return nil, false } - return m.appendtrusted_peers, true -} - -// ClearTrustedPeers clears the value of the "trusted_peers" field. -func (m *OAuth2ClientMutation) ClearTrustedPeers() { - m.trusted_peers = nil - m.appendtrusted_peers = nil - m.clearedFields[oauth2client.FieldTrustedPeers] = struct{}{} -} - -// TrustedPeersCleared returns if the "trusted_peers" field was cleared in this mutation. -func (m *OAuth2ClientMutation) TrustedPeersCleared() bool { - _, ok := m.clearedFields[oauth2client.FieldTrustedPeers] - return ok + return m.appendverification_keys, true } -// ResetTrustedPeers resets all changes to the "trusted_peers" field. -func (m *OAuth2ClientMutation) ResetTrustedPeers() { - m.trusted_peers = nil - m.appendtrusted_peers = nil - delete(m.clearedFields, oauth2client.FieldTrustedPeers) +// ResetVerificationKeys resets all changes to the "verification_keys" field. +func (m *KeysMutation) ResetVerificationKeys() { + m.verification_keys = nil + m.appendverification_keys = nil } -// SetPublic sets the "public" field. -func (m *OAuth2ClientMutation) SetPublic(b bool) { - m.public = &b +// SetSigningKey sets the "signing_key" field. +func (m *KeysMutation) SetSigningKey(jwk jose.JSONWebKey) { + m.signing_key = &jwk } -// Public returns the value of the "public" field in the mutation. -func (m *OAuth2ClientMutation) Public() (r bool, exists bool) { - v := m.public +// SigningKey returns the value of the "signing_key" field in the mutation. +func (m *KeysMutation) SigningKey() (r jose.JSONWebKey, exists bool) { + v := m.signing_key if v == nil { return } return *v, true } -// OldPublic returns the old "public" field's value of the OAuth2Client entity. -// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// OldSigningKey returns the old "signing_key" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OAuth2ClientMutation) OldPublic(ctx context.Context) (v bool, err error) { +func (m *KeysMutation) OldSigningKey(ctx context.Context) (v jose.JSONWebKey, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPublic is only allowed on UpdateOne operations") + return v, errors.New("OldSigningKey is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPublic requires an ID field in the mutation") + return v, errors.New("OldSigningKey requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldPublic: %w", err) + return v, fmt.Errorf("querying old value for OldSigningKey: %w", err) } - return oldValue.Public, nil + return oldValue.SigningKey, nil } -// ResetPublic resets all changes to the "public" field. -func (m *OAuth2ClientMutation) ResetPublic() { - m.public = nil +// ResetSigningKey resets all changes to the "signing_key" field. +func (m *KeysMutation) ResetSigningKey() { + m.signing_key = nil } -// SetName sets the "name" field. -func (m *OAuth2ClientMutation) SetName(s string) { - m.name = &s +// SetSigningKeyPub sets the "signing_key_pub" field. +func (m *KeysMutation) SetSigningKeyPub(jwk jose.JSONWebKey) { + m.signing_key_pub = &jwk } -// Name returns the value of the "name" field in the mutation. -func (m *OAuth2ClientMutation) Name() (r string, exists bool) { - v := m.name +// SigningKeyPub returns the value of the "signing_key_pub" field in the mutation. +func (m *KeysMutation) SigningKeyPub() (r jose.JSONWebKey, exists bool) { + v := m.signing_key_pub if v == nil { return } return *v, true } -// OldName returns the old "name" field's value of the OAuth2Client entity. -// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// OldSigningKeyPub returns the old "signing_key_pub" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OAuth2ClientMutation) OldName(ctx context.Context) (v string, err error) { +func (m *KeysMutation) OldSigningKeyPub(ctx context.Context) (v jose.JSONWebKey, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldName is only allowed on UpdateOne operations") + return v, errors.New("OldSigningKeyPub is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldName requires an ID field in the mutation") + return v, errors.New("OldSigningKeyPub requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldName: %w", err) + return v, fmt.Errorf("querying old value for OldSigningKeyPub: %w", err) } - return oldValue.Name, nil + return oldValue.SigningKeyPub, nil } -// ResetName resets all changes to the "name" field. -func (m *OAuth2ClientMutation) ResetName() { - m.name = nil +// ResetSigningKeyPub resets all changes to the "signing_key_pub" field. +func (m *KeysMutation) ResetSigningKeyPub() { + m.signing_key_pub = nil } -// SetLogoURL sets the "logo_url" field. -func (m *OAuth2ClientMutation) SetLogoURL(s string) { - m.logo_url = &s +// SetNextRotation sets the "next_rotation" field. +func (m *KeysMutation) SetNextRotation(t time.Time) { + m.next_rotation = &t } -// LogoURL returns the value of the "logo_url" field in the mutation. -func (m *OAuth2ClientMutation) LogoURL() (r string, exists bool) { - v := m.logo_url +// NextRotation returns the value of the "next_rotation" field in the mutation. +func (m *KeysMutation) NextRotation() (r time.Time, exists bool) { + v := m.next_rotation if v == nil { return } return *v, true } -// OldLogoURL returns the old "logo_url" field's value of the OAuth2Client entity. -// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// OldNextRotation returns the old "next_rotation" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OAuth2ClientMutation) OldLogoURL(ctx context.Context) (v string, err error) { +func (m *KeysMutation) OldNextRotation(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldLogoURL is only allowed on UpdateOne operations") + return v, errors.New("OldNextRotation is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldLogoURL requires an ID field in the mutation") + return v, errors.New("OldNextRotation requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldLogoURL: %w", err) + return v, fmt.Errorf("querying old value for OldNextRotation: %w", err) } - return oldValue.LogoURL, nil + return oldValue.NextRotation, nil } -// ResetLogoURL resets all changes to the "logo_url" field. -func (m *OAuth2ClientMutation) ResetLogoURL() { - m.logo_url = nil +// ResetNextRotation resets all changes to the "next_rotation" field. +func (m *KeysMutation) ResetNextRotation() { + m.next_rotation = nil } -// Where appends a list predicates to the OAuth2ClientMutation builder. -func (m *OAuth2ClientMutation) Where(ps ...predicate.OAuth2Client) { +// Where appends a list predicates to the KeysMutation builder. +func (m *KeysMutation) Where(ps ...predicate.Keys) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the OAuth2ClientMutation builder. Using this method, +// WhereP appends storage-level predicates to the KeysMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *OAuth2ClientMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.OAuth2Client, len(ps)) +func (m *KeysMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Keys, len(ps)) for i := range ps { p[i] = ps[i] } @@ -5532,42 +5549,36 @@ func (m *OAuth2ClientMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *OAuth2ClientMutation) Op() Op { +func (m *KeysMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *OAuth2ClientMutation) SetOp(op Op) { +func (m *KeysMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (OAuth2Client). -func (m *OAuth2ClientMutation) Type() string { +// Type returns the node type of this mutation (Keys). +func (m *KeysMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *OAuth2ClientMutation) Fields() []string { - fields := make([]string, 0, 6) - if m.secret != nil { - fields = append(fields, oauth2client.FieldSecret) - } - if m.redirect_uris != nil { - fields = append(fields, oauth2client.FieldRedirectUris) - } - if m.trusted_peers != nil { - fields = append(fields, oauth2client.FieldTrustedPeers) +func (m *KeysMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.verification_keys != nil { + fields = append(fields, keys.FieldVerificationKeys) } - if m.public != nil { - fields = append(fields, oauth2client.FieldPublic) + if m.signing_key != nil { + fields = append(fields, keys.FieldSigningKey) } - if m.name != nil { - fields = append(fields, oauth2client.FieldName) + if m.signing_key_pub != nil { + fields = append(fields, keys.FieldSigningKeyPub) } - if m.logo_url != nil { - fields = append(fields, oauth2client.FieldLogoURL) + if m.next_rotation != nil { + fields = append(fields, keys.FieldNextRotation) } return fields } @@ -5575,20 +5586,16 @@ func (m *OAuth2ClientMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *OAuth2ClientMutation) Field(name string) (ent.Value, bool) { +func (m *KeysMutation) Field(name string) (ent.Value, bool) { switch name { - case oauth2client.FieldSecret: - return m.Secret() - case oauth2client.FieldRedirectUris: - return m.RedirectUris() - case oauth2client.FieldTrustedPeers: - return m.TrustedPeers() - case oauth2client.FieldPublic: - return m.Public() - case oauth2client.FieldName: - return m.Name() - case oauth2client.FieldLogoURL: - return m.LogoURL() + case keys.FieldVerificationKeys: + return m.VerificationKeys() + case keys.FieldSigningKey: + return m.SigningKey() + case keys.FieldSigningKeyPub: + return m.SigningKeyPub() + case keys.FieldNextRotation: + return m.NextRotation() } return nil, false } @@ -5596,232 +5603,199 @@ func (m *OAuth2ClientMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *OAuth2ClientMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *KeysMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case oauth2client.FieldSecret: - return m.OldSecret(ctx) - case oauth2client.FieldRedirectUris: - return m.OldRedirectUris(ctx) - case oauth2client.FieldTrustedPeers: - return m.OldTrustedPeers(ctx) - case oauth2client.FieldPublic: - return m.OldPublic(ctx) - case oauth2client.FieldName: - return m.OldName(ctx) - case oauth2client.FieldLogoURL: - return m.OldLogoURL(ctx) + case keys.FieldVerificationKeys: + return m.OldVerificationKeys(ctx) + case keys.FieldSigningKey: + return m.OldSigningKey(ctx) + case keys.FieldSigningKeyPub: + return m.OldSigningKeyPub(ctx) + case keys.FieldNextRotation: + return m.OldNextRotation(ctx) } - return nil, fmt.Errorf("unknown OAuth2Client field %s", name) + return nil, fmt.Errorf("unknown Keys field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *OAuth2ClientMutation) SetField(name string, value ent.Value) error { +func (m *KeysMutation) SetField(name string, value ent.Value) error { switch name { - case oauth2client.FieldSecret: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetSecret(v) - return nil - case oauth2client.FieldRedirectUris: - v, ok := value.([]string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetRedirectUris(v) - return nil - case oauth2client.FieldTrustedPeers: - v, ok := value.([]string) + case keys.FieldVerificationKeys: + v, ok := value.([]storage.VerificationKey) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetTrustedPeers(v) + m.SetVerificationKeys(v) return nil - case oauth2client.FieldPublic: - v, ok := value.(bool) + case keys.FieldSigningKey: + v, ok := value.(jose.JSONWebKey) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetPublic(v) + m.SetSigningKey(v) return nil - case oauth2client.FieldName: - v, ok := value.(string) + case keys.FieldSigningKeyPub: + v, ok := value.(jose.JSONWebKey) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetName(v) + m.SetSigningKeyPub(v) return nil - case oauth2client.FieldLogoURL: - v, ok := value.(string) + case keys.FieldNextRotation: + v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetLogoURL(v) + m.SetNextRotation(v) return nil } - return fmt.Errorf("unknown OAuth2Client field %s", name) + return fmt.Errorf("unknown Keys field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *OAuth2ClientMutation) AddedFields() []string { +func (m *KeysMutation) AddedFields() []string { return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *OAuth2ClientMutation) AddedField(name string) (ent.Value, bool) { +func (m *KeysMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *OAuth2ClientMutation) AddField(name string, value ent.Value) error { +func (m *KeysMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown OAuth2Client numeric field %s", name) + return fmt.Errorf("unknown Keys numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *OAuth2ClientMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(oauth2client.FieldRedirectUris) { - fields = append(fields, oauth2client.FieldRedirectUris) - } - if m.FieldCleared(oauth2client.FieldTrustedPeers) { - fields = append(fields, oauth2client.FieldTrustedPeers) - } - return fields +func (m *KeysMutation) ClearedFields() []string { + return nil } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *OAuth2ClientMutation) FieldCleared(name string) bool { +func (m *KeysMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *OAuth2ClientMutation) ClearField(name string) error { - switch name { - case oauth2client.FieldRedirectUris: - m.ClearRedirectUris() - return nil - case oauth2client.FieldTrustedPeers: - m.ClearTrustedPeers() - return nil - } - return fmt.Errorf("unknown OAuth2Client nullable field %s", name) +func (m *KeysMutation) ClearField(name string) error { + return fmt.Errorf("unknown Keys nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *OAuth2ClientMutation) ResetField(name string) error { +func (m *KeysMutation) ResetField(name string) error { switch name { - case oauth2client.FieldSecret: - m.ResetSecret() - return nil - case oauth2client.FieldRedirectUris: - m.ResetRedirectUris() - return nil - case oauth2client.FieldTrustedPeers: - m.ResetTrustedPeers() + case keys.FieldVerificationKeys: + m.ResetVerificationKeys() return nil - case oauth2client.FieldPublic: - m.ResetPublic() + case keys.FieldSigningKey: + m.ResetSigningKey() return nil - case oauth2client.FieldName: - m.ResetName() + case keys.FieldSigningKeyPub: + m.ResetSigningKeyPub() return nil - case oauth2client.FieldLogoURL: - m.ResetLogoURL() + case keys.FieldNextRotation: + m.ResetNextRotation() return nil } - return fmt.Errorf("unknown OAuth2Client field %s", name) + return fmt.Errorf("unknown Keys field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *OAuth2ClientMutation) AddedEdges() []string { +func (m *KeysMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *OAuth2ClientMutation) AddedIDs(name string) []ent.Value { +func (m *KeysMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *OAuth2ClientMutation) RemovedEdges() []string { +func (m *KeysMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *OAuth2ClientMutation) RemovedIDs(name string) []ent.Value { +func (m *KeysMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *OAuth2ClientMutation) ClearedEdges() []string { +func (m *KeysMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *OAuth2ClientMutation) EdgeCleared(name string) bool { +func (m *KeysMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *OAuth2ClientMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown OAuth2Client unique edge %s", name) +func (m *KeysMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Keys unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *OAuth2ClientMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown OAuth2Client edge %s", name) +func (m *KeysMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Keys edge %s", name) } -// OfflineSessionMutation represents an operation that mutates the OfflineSession nodes in the graph. -type OfflineSessionMutation struct { +// OAuth2ClientMutation represents an operation that mutates the OAuth2Client nodes in the graph. +type OAuth2ClientMutation struct { config - op Op - typ string - id *string - user_id *string - conn_id *string - refresh *[]byte - connector_data *[]byte - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*OfflineSession, error) - predicates []predicate.OfflineSession + op Op + typ string + id *string + secret *string + redirect_uris *[]string + appendredirect_uris []string + trusted_peers *[]string + appendtrusted_peers []string + public *bool + name *string + logo_url *string + allowed_connectors *[]string + appendallowed_connectors []string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*OAuth2Client, error) + predicates []predicate.OAuth2Client } -var _ ent.Mutation = (*OfflineSessionMutation)(nil) +var _ ent.Mutation = (*OAuth2ClientMutation)(nil) -// offlinesessionOption allows management of the mutation configuration using functional options. -type offlinesessionOption func(*OfflineSessionMutation) +// oauth2clientOption allows management of the mutation configuration using functional options. +type oauth2clientOption func(*OAuth2ClientMutation) -// newOfflineSessionMutation creates new mutation for the OfflineSession entity. -func newOfflineSessionMutation(c config, op Op, opts ...offlinesessionOption) *OfflineSessionMutation { - m := &OfflineSessionMutation{ +// newOAuth2ClientMutation creates new mutation for the OAuth2Client entity. +func newOAuth2ClientMutation(c config, op Op, opts ...oauth2clientOption) *OAuth2ClientMutation { + m := &OAuth2ClientMutation{ config: c, op: op, - typ: TypeOfflineSession, + typ: TypeOAuth2Client, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -5830,20 +5804,20 @@ func newOfflineSessionMutation(c config, op Op, opts ...offlinesessionOption) *O return m } -// withOfflineSessionID sets the ID field of the mutation. -func withOfflineSessionID(id string) offlinesessionOption { - return func(m *OfflineSessionMutation) { +// withOAuth2ClientID sets the ID field of the mutation. +func withOAuth2ClientID(id string) oauth2clientOption { + return func(m *OAuth2ClientMutation) { var ( err error once sync.Once - value *OfflineSession + value *OAuth2Client ) - m.oldValue = func(ctx context.Context) (*OfflineSession, error) { + m.oldValue = func(ctx context.Context) (*OAuth2Client, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().OfflineSession.Get(ctx, id) + value, err = m.Client().OAuth2Client.Get(ctx, id) } }) return value, err @@ -5852,10 +5826,10 @@ func withOfflineSessionID(id string) offlinesessionOption { } } -// withOfflineSession sets the old OfflineSession of the mutation. -func withOfflineSession(node *OfflineSession) offlinesessionOption { - return func(m *OfflineSessionMutation) { - m.oldValue = func(context.Context) (*OfflineSession, error) { +// withOAuth2Client sets the old OAuth2Client of the mutation. +func withOAuth2Client(node *OAuth2Client) oauth2clientOption { + return func(m *OAuth2ClientMutation) { + m.oldValue = func(context.Context) (*OAuth2Client, error) { return node, nil } m.id = &node.ID @@ -5864,7 +5838,7 @@ func withOfflineSession(node *OfflineSession) offlinesessionOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m OfflineSessionMutation) Client() *Client { +func (m OAuth2ClientMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -5872,7 +5846,7 @@ func (m OfflineSessionMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m OfflineSessionMutation) Tx() (*Tx, error) { +func (m OAuth2ClientMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -5882,14 +5856,14 @@ func (m OfflineSessionMutation) Tx() (*Tx, error) { } // SetID sets the value of the id field. Note that this -// operation is only accepted on creation of OfflineSession entities. -func (m *OfflineSessionMutation) SetID(id string) { +// operation is only accepted on creation of OAuth2Client entities. +func (m *OAuth2ClientMutation) SetID(id string) { m.id = &id } // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *OfflineSessionMutation) ID() (id string, exists bool) { +func (m *OAuth2ClientMutation) ID() (id string, exists bool) { if m.id == nil { return } @@ -5900,7 +5874,7 @@ func (m *OfflineSessionMutation) ID() (id string, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *OfflineSessionMutation) IDs(ctx context.Context) ([]string, error) { +func (m *OAuth2ClientMutation) IDs(ctx context.Context) ([]string, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -5909,215 +5883,406 @@ func (m *OfflineSessionMutation) IDs(ctx context.Context) ([]string, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().OfflineSession.Query().Where(m.predicates...).IDs(ctx) + return m.Client().OAuth2Client.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetUserID sets the "user_id" field. -func (m *OfflineSessionMutation) SetUserID(s string) { - m.user_id = &s +// SetSecret sets the "secret" field. +func (m *OAuth2ClientMutation) SetSecret(s string) { + m.secret = &s } -// UserID returns the value of the "user_id" field in the mutation. -func (m *OfflineSessionMutation) UserID() (r string, exists bool) { - v := m.user_id +// Secret returns the value of the "secret" field in the mutation. +func (m *OAuth2ClientMutation) Secret() (r string, exists bool) { + v := m.secret if v == nil { return } return *v, true } -// OldUserID returns the old "user_id" field's value of the OfflineSession entity. -// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// OldSecret returns the old "secret" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OfflineSessionMutation) OldUserID(ctx context.Context) (v string, err error) { +func (m *OAuth2ClientMutation) OldSecret(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUserID is only allowed on UpdateOne operations") + return v, errors.New("OldSecret is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUserID requires an ID field in the mutation") + return v, errors.New("OldSecret requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldUserID: %w", err) + return v, fmt.Errorf("querying old value for OldSecret: %w", err) } - return oldValue.UserID, nil + return oldValue.Secret, nil } -// ResetUserID resets all changes to the "user_id" field. -func (m *OfflineSessionMutation) ResetUserID() { - m.user_id = nil +// ResetSecret resets all changes to the "secret" field. +func (m *OAuth2ClientMutation) ResetSecret() { + m.secret = nil } -// SetConnID sets the "conn_id" field. -func (m *OfflineSessionMutation) SetConnID(s string) { - m.conn_id = &s +// SetRedirectUris sets the "redirect_uris" field. +func (m *OAuth2ClientMutation) SetRedirectUris(s []string) { + m.redirect_uris = &s + m.appendredirect_uris = nil } -// ConnID returns the value of the "conn_id" field in the mutation. -func (m *OfflineSessionMutation) ConnID() (r string, exists bool) { - v := m.conn_id +// RedirectUris returns the value of the "redirect_uris" field in the mutation. +func (m *OAuth2ClientMutation) RedirectUris() (r []string, exists bool) { + v := m.redirect_uris if v == nil { return } return *v, true } -// OldConnID returns the old "conn_id" field's value of the OfflineSession entity. -// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// OldRedirectUris returns the old "redirect_uris" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OfflineSessionMutation) OldConnID(ctx context.Context) (v string, err error) { +func (m *OAuth2ClientMutation) OldRedirectUris(ctx context.Context) (v []string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldConnID is only allowed on UpdateOne operations") + return v, errors.New("OldRedirectUris is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldConnID requires an ID field in the mutation") + return v, errors.New("OldRedirectUris requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldConnID: %w", err) + return v, fmt.Errorf("querying old value for OldRedirectUris: %w", err) } - return oldValue.ConnID, nil + return oldValue.RedirectUris, nil } -// ResetConnID resets all changes to the "conn_id" field. -func (m *OfflineSessionMutation) ResetConnID() { - m.conn_id = nil +// AppendRedirectUris adds s to the "redirect_uris" field. +func (m *OAuth2ClientMutation) AppendRedirectUris(s []string) { + m.appendredirect_uris = append(m.appendredirect_uris, s...) } -// SetRefresh sets the "refresh" field. -func (m *OfflineSessionMutation) SetRefresh(b []byte) { - m.refresh = &b +// AppendedRedirectUris returns the list of values that were appended to the "redirect_uris" field in this mutation. +func (m *OAuth2ClientMutation) AppendedRedirectUris() ([]string, bool) { + if len(m.appendredirect_uris) == 0 { + return nil, false + } + return m.appendredirect_uris, true } -// Refresh returns the value of the "refresh" field in the mutation. -func (m *OfflineSessionMutation) Refresh() (r []byte, exists bool) { - v := m.refresh +// ClearRedirectUris clears the value of the "redirect_uris" field. +func (m *OAuth2ClientMutation) ClearRedirectUris() { + m.redirect_uris = nil + m.appendredirect_uris = nil + m.clearedFields[oauth2client.FieldRedirectUris] = struct{}{} +} + +// RedirectUrisCleared returns if the "redirect_uris" field was cleared in this mutation. +func (m *OAuth2ClientMutation) RedirectUrisCleared() bool { + _, ok := m.clearedFields[oauth2client.FieldRedirectUris] + return ok +} + +// ResetRedirectUris resets all changes to the "redirect_uris" field. +func (m *OAuth2ClientMutation) ResetRedirectUris() { + m.redirect_uris = nil + m.appendredirect_uris = nil + delete(m.clearedFields, oauth2client.FieldRedirectUris) +} + +// SetTrustedPeers sets the "trusted_peers" field. +func (m *OAuth2ClientMutation) SetTrustedPeers(s []string) { + m.trusted_peers = &s + m.appendtrusted_peers = nil +} + +// TrustedPeers returns the value of the "trusted_peers" field in the mutation. +func (m *OAuth2ClientMutation) TrustedPeers() (r []string, exists bool) { + v := m.trusted_peers if v == nil { return } return *v, true } -// OldRefresh returns the old "refresh" field's value of the OfflineSession entity. -// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// OldTrustedPeers returns the old "trusted_peers" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OfflineSessionMutation) OldRefresh(ctx context.Context) (v []byte, err error) { +func (m *OAuth2ClientMutation) OldTrustedPeers(ctx context.Context) (v []string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRefresh is only allowed on UpdateOne operations") + return v, errors.New("OldTrustedPeers is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRefresh requires an ID field in the mutation") + return v, errors.New("OldTrustedPeers requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldRefresh: %w", err) + return v, fmt.Errorf("querying old value for OldTrustedPeers: %w", err) } - return oldValue.Refresh, nil + return oldValue.TrustedPeers, nil } -// ResetRefresh resets all changes to the "refresh" field. -func (m *OfflineSessionMutation) ResetRefresh() { - m.refresh = nil +// AppendTrustedPeers adds s to the "trusted_peers" field. +func (m *OAuth2ClientMutation) AppendTrustedPeers(s []string) { + m.appendtrusted_peers = append(m.appendtrusted_peers, s...) } -// SetConnectorData sets the "connector_data" field. -func (m *OfflineSessionMutation) SetConnectorData(b []byte) { - m.connector_data = &b +// AppendedTrustedPeers returns the list of values that were appended to the "trusted_peers" field in this mutation. +func (m *OAuth2ClientMutation) AppendedTrustedPeers() ([]string, bool) { + if len(m.appendtrusted_peers) == 0 { + return nil, false + } + return m.appendtrusted_peers, true } -// ConnectorData returns the value of the "connector_data" field in the mutation. -func (m *OfflineSessionMutation) ConnectorData() (r []byte, exists bool) { - v := m.connector_data +// ClearTrustedPeers clears the value of the "trusted_peers" field. +func (m *OAuth2ClientMutation) ClearTrustedPeers() { + m.trusted_peers = nil + m.appendtrusted_peers = nil + m.clearedFields[oauth2client.FieldTrustedPeers] = struct{}{} +} + +// TrustedPeersCleared returns if the "trusted_peers" field was cleared in this mutation. +func (m *OAuth2ClientMutation) TrustedPeersCleared() bool { + _, ok := m.clearedFields[oauth2client.FieldTrustedPeers] + return ok +} + +// ResetTrustedPeers resets all changes to the "trusted_peers" field. +func (m *OAuth2ClientMutation) ResetTrustedPeers() { + m.trusted_peers = nil + m.appendtrusted_peers = nil + delete(m.clearedFields, oauth2client.FieldTrustedPeers) +} + +// SetPublic sets the "public" field. +func (m *OAuth2ClientMutation) SetPublic(b bool) { + m.public = &b +} + +// Public returns the value of the "public" field in the mutation. +func (m *OAuth2ClientMutation) Public() (r bool, exists bool) { + v := m.public if v == nil { return } return *v, true } -// OldConnectorData returns the old "connector_data" field's value of the OfflineSession entity. -// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// OldPublic returns the old "public" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *OfflineSessionMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { +func (m *OAuth2ClientMutation) OldPublic(ctx context.Context) (v bool, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") + return v, errors.New("OldPublic is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldConnectorData requires an ID field in the mutation") + return v, errors.New("OldPublic requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) + return v, fmt.Errorf("querying old value for OldPublic: %w", err) } - return oldValue.ConnectorData, nil + return oldValue.Public, nil } -// ClearConnectorData clears the value of the "connector_data" field. -func (m *OfflineSessionMutation) ClearConnectorData() { - m.connector_data = nil - m.clearedFields[offlinesession.FieldConnectorData] = struct{}{} +// ResetPublic resets all changes to the "public" field. +func (m *OAuth2ClientMutation) ResetPublic() { + m.public = nil } -// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. -func (m *OfflineSessionMutation) ConnectorDataCleared() bool { - _, ok := m.clearedFields[offlinesession.FieldConnectorData] - return ok +// SetName sets the "name" field. +func (m *OAuth2ClientMutation) SetName(s string) { + m.name = &s } -// ResetConnectorData resets all changes to the "connector_data" field. -func (m *OfflineSessionMutation) ResetConnectorData() { - m.connector_data = nil - delete(m.clearedFields, offlinesession.FieldConnectorData) +// Name returns the value of the "name" field in the mutation. +func (m *OAuth2ClientMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true } -// Where appends a list predicates to the OfflineSessionMutation builder. -func (m *OfflineSessionMutation) Where(ps ...predicate.OfflineSession) { - m.predicates = append(m.predicates, ps...) +// OldName returns the old "name" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil } -// WhereP appends storage-level predicates to the OfflineSessionMutation builder. Using this method, -// users can use type-assertion to append predicates that do not depend on any generated package. -func (m *OfflineSessionMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.OfflineSession, len(ps)) - for i := range ps { - p[i] = ps[i] - } - m.Where(p...) +// ResetName resets all changes to the "name" field. +func (m *OAuth2ClientMutation) ResetName() { + m.name = nil } -// Op returns the operation name. -func (m *OfflineSessionMutation) Op() Op { - return m.op +// SetLogoURL sets the "logo_url" field. +func (m *OAuth2ClientMutation) SetLogoURL(s string) { + m.logo_url = &s } -// SetOp allows setting the mutation operation. -func (m *OfflineSessionMutation) SetOp(op Op) { - m.op = op +// LogoURL returns the value of the "logo_url" field in the mutation. +func (m *OAuth2ClientMutation) LogoURL() (r string, exists bool) { + v := m.logo_url + if v == nil { + return + } + return *v, true } -// Type returns the node type of this mutation (OfflineSession). -func (m *OfflineSessionMutation) Type() string { +// OldLogoURL returns the old "logo_url" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldLogoURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLogoURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLogoURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLogoURL: %w", err) + } + return oldValue.LogoURL, nil +} + +// ResetLogoURL resets all changes to the "logo_url" field. +func (m *OAuth2ClientMutation) ResetLogoURL() { + m.logo_url = nil +} + +// SetAllowedConnectors sets the "allowed_connectors" field. +func (m *OAuth2ClientMutation) SetAllowedConnectors(s []string) { + m.allowed_connectors = &s + m.appendallowed_connectors = nil +} + +// AllowedConnectors returns the value of the "allowed_connectors" field in the mutation. +func (m *OAuth2ClientMutation) AllowedConnectors() (r []string, exists bool) { + v := m.allowed_connectors + if v == nil { + return + } + return *v, true +} + +// OldAllowedConnectors returns the old "allowed_connectors" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldAllowedConnectors(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAllowedConnectors is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAllowedConnectors requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAllowedConnectors: %w", err) + } + return oldValue.AllowedConnectors, nil +} + +// AppendAllowedConnectors adds s to the "allowed_connectors" field. +func (m *OAuth2ClientMutation) AppendAllowedConnectors(s []string) { + m.appendallowed_connectors = append(m.appendallowed_connectors, s...) +} + +// AppendedAllowedConnectors returns the list of values that were appended to the "allowed_connectors" field in this mutation. +func (m *OAuth2ClientMutation) AppendedAllowedConnectors() ([]string, bool) { + if len(m.appendallowed_connectors) == 0 { + return nil, false + } + return m.appendallowed_connectors, true +} + +// ClearAllowedConnectors clears the value of the "allowed_connectors" field. +func (m *OAuth2ClientMutation) ClearAllowedConnectors() { + m.allowed_connectors = nil + m.appendallowed_connectors = nil + m.clearedFields[oauth2client.FieldAllowedConnectors] = struct{}{} +} + +// AllowedConnectorsCleared returns if the "allowed_connectors" field was cleared in this mutation. +func (m *OAuth2ClientMutation) AllowedConnectorsCleared() bool { + _, ok := m.clearedFields[oauth2client.FieldAllowedConnectors] + return ok +} + +// ResetAllowedConnectors resets all changes to the "allowed_connectors" field. +func (m *OAuth2ClientMutation) ResetAllowedConnectors() { + m.allowed_connectors = nil + m.appendallowed_connectors = nil + delete(m.clearedFields, oauth2client.FieldAllowedConnectors) +} + +// Where appends a list predicates to the OAuth2ClientMutation builder. +func (m *OAuth2ClientMutation) Where(ps ...predicate.OAuth2Client) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the OAuth2ClientMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *OAuth2ClientMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.OAuth2Client, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *OAuth2ClientMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *OAuth2ClientMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (OAuth2Client). +func (m *OAuth2ClientMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *OfflineSessionMutation) Fields() []string { - fields := make([]string, 0, 4) - if m.user_id != nil { - fields = append(fields, offlinesession.FieldUserID) +func (m *OAuth2ClientMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.secret != nil { + fields = append(fields, oauth2client.FieldSecret) } - if m.conn_id != nil { - fields = append(fields, offlinesession.FieldConnID) + if m.redirect_uris != nil { + fields = append(fields, oauth2client.FieldRedirectUris) } - if m.refresh != nil { - fields = append(fields, offlinesession.FieldRefresh) + if m.trusted_peers != nil { + fields = append(fields, oauth2client.FieldTrustedPeers) } - if m.connector_data != nil { - fields = append(fields, offlinesession.FieldConnectorData) + if m.public != nil { + fields = append(fields, oauth2client.FieldPublic) + } + if m.name != nil { + fields = append(fields, oauth2client.FieldName) + } + if m.logo_url != nil { + fields = append(fields, oauth2client.FieldLogoURL) + } + if m.allowed_connectors != nil { + fields = append(fields, oauth2client.FieldAllowedConnectors) } return fields } @@ -6125,16 +6290,22 @@ func (m *OfflineSessionMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *OfflineSessionMutation) Field(name string) (ent.Value, bool) { +func (m *OAuth2ClientMutation) Field(name string) (ent.Value, bool) { switch name { - case offlinesession.FieldUserID: - return m.UserID() - case offlinesession.FieldConnID: - return m.ConnID() - case offlinesession.FieldRefresh: - return m.Refresh() - case offlinesession.FieldConnectorData: - return m.ConnectorData() + case oauth2client.FieldSecret: + return m.Secret() + case oauth2client.FieldRedirectUris: + return m.RedirectUris() + case oauth2client.FieldTrustedPeers: + return m.TrustedPeers() + case oauth2client.FieldPublic: + return m.Public() + case oauth2client.FieldName: + return m.Name() + case oauth2client.FieldLogoURL: + return m.LogoURL() + case oauth2client.FieldAllowedConnectors: + return m.AllowedConnectors() } return nil, false } @@ -6142,633 +6313,2254 @@ func (m *OfflineSessionMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *OfflineSessionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *OAuth2ClientMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case offlinesession.FieldUserID: - return m.OldUserID(ctx) - case offlinesession.FieldConnID: - return m.OldConnID(ctx) - case offlinesession.FieldRefresh: - return m.OldRefresh(ctx) - case offlinesession.FieldConnectorData: - return m.OldConnectorData(ctx) + case oauth2client.FieldSecret: + return m.OldSecret(ctx) + case oauth2client.FieldRedirectUris: + return m.OldRedirectUris(ctx) + case oauth2client.FieldTrustedPeers: + return m.OldTrustedPeers(ctx) + case oauth2client.FieldPublic: + return m.OldPublic(ctx) + case oauth2client.FieldName: + return m.OldName(ctx) + case oauth2client.FieldLogoURL: + return m.OldLogoURL(ctx) + case oauth2client.FieldAllowedConnectors: + return m.OldAllowedConnectors(ctx) } - return nil, fmt.Errorf("unknown OfflineSession field %s", name) + return nil, fmt.Errorf("unknown OAuth2Client field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *OfflineSessionMutation) SetField(name string, value ent.Value) error { +func (m *OAuth2ClientMutation) SetField(name string, value ent.Value) error { switch name { - case offlinesession.FieldUserID: + case oauth2client.FieldSecret: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetUserID(v) + m.SetSecret(v) return nil - case offlinesession.FieldConnID: + case oauth2client.FieldRedirectUris: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRedirectUris(v) + return nil + case oauth2client.FieldTrustedPeers: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTrustedPeers(v) + return nil + case oauth2client.FieldPublic: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPublic(v) + return nil + case oauth2client.FieldName: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetConnID(v) + m.SetName(v) return nil - case offlinesession.FieldRefresh: - v, ok := value.([]byte) + case oauth2client.FieldLogoURL: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetRefresh(v) + m.SetLogoURL(v) return nil - case offlinesession.FieldConnectorData: - v, ok := value.([]byte) + case oauth2client.FieldAllowedConnectors: + v, ok := value.([]string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetConnectorData(v) + m.SetAllowedConnectors(v) return nil } - return fmt.Errorf("unknown OfflineSession field %s", name) + return fmt.Errorf("unknown OAuth2Client field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *OfflineSessionMutation) AddedFields() []string { +func (m *OAuth2ClientMutation) AddedFields() []string { return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *OfflineSessionMutation) AddedField(name string) (ent.Value, bool) { +func (m *OAuth2ClientMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *OfflineSessionMutation) AddField(name string, value ent.Value) error { +func (m *OAuth2ClientMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown OfflineSession numeric field %s", name) + return fmt.Errorf("unknown OAuth2Client numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *OfflineSessionMutation) ClearedFields() []string { +func (m *OAuth2ClientMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(offlinesession.FieldConnectorData) { - fields = append(fields, offlinesession.FieldConnectorData) + if m.FieldCleared(oauth2client.FieldRedirectUris) { + fields = append(fields, oauth2client.FieldRedirectUris) + } + if m.FieldCleared(oauth2client.FieldTrustedPeers) { + fields = append(fields, oauth2client.FieldTrustedPeers) + } + if m.FieldCleared(oauth2client.FieldAllowedConnectors) { + fields = append(fields, oauth2client.FieldAllowedConnectors) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *OfflineSessionMutation) FieldCleared(name string) bool { +func (m *OAuth2ClientMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *OfflineSessionMutation) ClearField(name string) error { +func (m *OAuth2ClientMutation) ClearField(name string) error { switch name { - case offlinesession.FieldConnectorData: - m.ClearConnectorData() + case oauth2client.FieldRedirectUris: + m.ClearRedirectUris() + return nil + case oauth2client.FieldTrustedPeers: + m.ClearTrustedPeers() + return nil + case oauth2client.FieldAllowedConnectors: + m.ClearAllowedConnectors() return nil } - return fmt.Errorf("unknown OfflineSession nullable field %s", name) + return fmt.Errorf("unknown OAuth2Client nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *OfflineSessionMutation) ResetField(name string) error { +func (m *OAuth2ClientMutation) ResetField(name string) error { switch name { - case offlinesession.FieldUserID: - m.ResetUserID() + case oauth2client.FieldSecret: + m.ResetSecret() return nil - case offlinesession.FieldConnID: - m.ResetConnID() + case oauth2client.FieldRedirectUris: + m.ResetRedirectUris() return nil - case offlinesession.FieldRefresh: - m.ResetRefresh() + case oauth2client.FieldTrustedPeers: + m.ResetTrustedPeers() return nil - case offlinesession.FieldConnectorData: - m.ResetConnectorData() + case oauth2client.FieldPublic: + m.ResetPublic() + return nil + case oauth2client.FieldName: + m.ResetName() + return nil + case oauth2client.FieldLogoURL: + m.ResetLogoURL() + return nil + case oauth2client.FieldAllowedConnectors: + m.ResetAllowedConnectors() return nil } - return fmt.Errorf("unknown OfflineSession field %s", name) + return fmt.Errorf("unknown OAuth2Client field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *OfflineSessionMutation) AddedEdges() []string { +func (m *OAuth2ClientMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *OfflineSessionMutation) AddedIDs(name string) []ent.Value { +func (m *OAuth2ClientMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *OfflineSessionMutation) RemovedEdges() []string { +func (m *OAuth2ClientMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *OfflineSessionMutation) RemovedIDs(name string) []ent.Value { +func (m *OAuth2ClientMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *OfflineSessionMutation) ClearedEdges() []string { +func (m *OAuth2ClientMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *OfflineSessionMutation) EdgeCleared(name string) bool { +func (m *OAuth2ClientMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *OfflineSessionMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown OfflineSession unique edge %s", name) +func (m *OAuth2ClientMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown OAuth2Client unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *OfflineSessionMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown OfflineSession edge %s", name) +func (m *OAuth2ClientMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown OAuth2Client edge %s", name) } -// PasswordMutation represents an operation that mutates the Password nodes in the graph. -type PasswordMutation struct { +// OfflineSessionMutation represents an operation that mutates the OfflineSession nodes in the graph. +type OfflineSessionMutation struct { config - op Op - typ string - id *int - email *string - hash *[]byte - username *string - name *string - preferred_username *string - email_verified *bool - user_id *string - groups *[]string - appendgroups []string - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*Password, error) - predicates []predicate.Password + op Op + typ string + id *string + user_id *string + conn_id *string + refresh *[]byte + connector_data *[]byte + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*OfflineSession, error) + predicates []predicate.OfflineSession +} + +var _ ent.Mutation = (*OfflineSessionMutation)(nil) + +// offlinesessionOption allows management of the mutation configuration using functional options. +type offlinesessionOption func(*OfflineSessionMutation) + +// newOfflineSessionMutation creates new mutation for the OfflineSession entity. +func newOfflineSessionMutation(c config, op Op, opts ...offlinesessionOption) *OfflineSessionMutation { + m := &OfflineSessionMutation{ + config: c, + op: op, + typ: TypeOfflineSession, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withOfflineSessionID sets the ID field of the mutation. +func withOfflineSessionID(id string) offlinesessionOption { + return func(m *OfflineSessionMutation) { + var ( + err error + once sync.Once + value *OfflineSession + ) + m.oldValue = func(ctx context.Context) (*OfflineSession, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().OfflineSession.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withOfflineSession sets the old OfflineSession of the mutation. +func withOfflineSession(node *OfflineSession) offlinesessionOption { + return func(m *OfflineSessionMutation) { + m.oldValue = func(context.Context) (*OfflineSession, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m OfflineSessionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m OfflineSessionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of OfflineSession entities. +func (m *OfflineSessionMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *OfflineSessionMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *OfflineSessionMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().OfflineSession.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetUserID sets the "user_id" field. +func (m *OfflineSessionMutation) SetUserID(s string) { + m.user_id = &s +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *OfflineSessionMutation) UserID() (r string, exists bool) { + v := m.user_id + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *OfflineSessionMutation) ResetUserID() { + m.user_id = nil +} + +// SetConnID sets the "conn_id" field. +func (m *OfflineSessionMutation) SetConnID(s string) { + m.conn_id = &s +} + +// ConnID returns the value of the "conn_id" field in the mutation. +func (m *OfflineSessionMutation) ConnID() (r string, exists bool) { + v := m.conn_id + if v == nil { + return + } + return *v, true +} + +// OldConnID returns the old "conn_id" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldConnID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnID: %w", err) + } + return oldValue.ConnID, nil +} + +// ResetConnID resets all changes to the "conn_id" field. +func (m *OfflineSessionMutation) ResetConnID() { + m.conn_id = nil +} + +// SetRefresh sets the "refresh" field. +func (m *OfflineSessionMutation) SetRefresh(b []byte) { + m.refresh = &b +} + +// Refresh returns the value of the "refresh" field in the mutation. +func (m *OfflineSessionMutation) Refresh() (r []byte, exists bool) { + v := m.refresh + if v == nil { + return + } + return *v, true +} + +// OldRefresh returns the old "refresh" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldRefresh(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRefresh is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRefresh requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRefresh: %w", err) + } + return oldValue.Refresh, nil +} + +// ResetRefresh resets all changes to the "refresh" field. +func (m *OfflineSessionMutation) ResetRefresh() { + m.refresh = nil +} + +// SetConnectorData sets the "connector_data" field. +func (m *OfflineSessionMutation) SetConnectorData(b []byte) { + m.connector_data = &b +} + +// ConnectorData returns the value of the "connector_data" field in the mutation. +func (m *OfflineSessionMutation) ConnectorData() (r []byte, exists bool) { + v := m.connector_data + if v == nil { + return + } + return *v, true +} + +// OldConnectorData returns the old "connector_data" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorData requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) + } + return oldValue.ConnectorData, nil +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (m *OfflineSessionMutation) ClearConnectorData() { + m.connector_data = nil + m.clearedFields[offlinesession.FieldConnectorData] = struct{}{} +} + +// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. +func (m *OfflineSessionMutation) ConnectorDataCleared() bool { + _, ok := m.clearedFields[offlinesession.FieldConnectorData] + return ok +} + +// ResetConnectorData resets all changes to the "connector_data" field. +func (m *OfflineSessionMutation) ResetConnectorData() { + m.connector_data = nil + delete(m.clearedFields, offlinesession.FieldConnectorData) +} + +// Where appends a list predicates to the OfflineSessionMutation builder. +func (m *OfflineSessionMutation) Where(ps ...predicate.OfflineSession) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the OfflineSessionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *OfflineSessionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.OfflineSession, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *OfflineSessionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *OfflineSessionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (OfflineSession). +func (m *OfflineSessionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *OfflineSessionMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.user_id != nil { + fields = append(fields, offlinesession.FieldUserID) + } + if m.conn_id != nil { + fields = append(fields, offlinesession.FieldConnID) + } + if m.refresh != nil { + fields = append(fields, offlinesession.FieldRefresh) + } + if m.connector_data != nil { + fields = append(fields, offlinesession.FieldConnectorData) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *OfflineSessionMutation) Field(name string) (ent.Value, bool) { + switch name { + case offlinesession.FieldUserID: + return m.UserID() + case offlinesession.FieldConnID: + return m.ConnID() + case offlinesession.FieldRefresh: + return m.Refresh() + case offlinesession.FieldConnectorData: + return m.ConnectorData() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *OfflineSessionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case offlinesession.FieldUserID: + return m.OldUserID(ctx) + case offlinesession.FieldConnID: + return m.OldConnID(ctx) + case offlinesession.FieldRefresh: + return m.OldRefresh(ctx) + case offlinesession.FieldConnectorData: + return m.OldConnectorData(ctx) + } + return nil, fmt.Errorf("unknown OfflineSession field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *OfflineSessionMutation) SetField(name string, value ent.Value) error { + switch name { + case offlinesession.FieldUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case offlinesession.FieldConnID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnID(v) + return nil + case offlinesession.FieldRefresh: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRefresh(v) + return nil + case offlinesession.FieldConnectorData: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorData(v) + return nil + } + return fmt.Errorf("unknown OfflineSession field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *OfflineSessionMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *OfflineSessionMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *OfflineSessionMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown OfflineSession numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *OfflineSessionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(offlinesession.FieldConnectorData) { + fields = append(fields, offlinesession.FieldConnectorData) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *OfflineSessionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *OfflineSessionMutation) ClearField(name string) error { + switch name { + case offlinesession.FieldConnectorData: + m.ClearConnectorData() + return nil + } + return fmt.Errorf("unknown OfflineSession nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *OfflineSessionMutation) ResetField(name string) error { + switch name { + case offlinesession.FieldUserID: + m.ResetUserID() + return nil + case offlinesession.FieldConnID: + m.ResetConnID() + return nil + case offlinesession.FieldRefresh: + m.ResetRefresh() + return nil + case offlinesession.FieldConnectorData: + m.ResetConnectorData() + return nil + } + return fmt.Errorf("unknown OfflineSession field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *OfflineSessionMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *OfflineSessionMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *OfflineSessionMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *OfflineSessionMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *OfflineSessionMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *OfflineSessionMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *OfflineSessionMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown OfflineSession unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *OfflineSessionMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown OfflineSession edge %s", name) +} + +// PasswordMutation represents an operation that mutates the Password nodes in the graph. +type PasswordMutation struct { + config + op Op + typ string + id *int + email *string + hash *[]byte + username *string + name *string + preferred_username *string + email_verified *bool + user_id *string + groups *[]string + appendgroups []string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Password, error) + predicates []predicate.Password +} + +var _ ent.Mutation = (*PasswordMutation)(nil) + +// passwordOption allows management of the mutation configuration using functional options. +type passwordOption func(*PasswordMutation) + +// newPasswordMutation creates new mutation for the Password entity. +func newPasswordMutation(c config, op Op, opts ...passwordOption) *PasswordMutation { + m := &PasswordMutation{ + config: c, + op: op, + typ: TypePassword, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPasswordID sets the ID field of the mutation. +func withPasswordID(id int) passwordOption { + return func(m *PasswordMutation) { + var ( + err error + once sync.Once + value *Password + ) + m.oldValue = func(ctx context.Context) (*Password, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Password.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPassword sets the old Password of the mutation. +func withPassword(node *Password) passwordOption { + return func(m *PasswordMutation) { + m.oldValue = func(context.Context) (*Password, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PasswordMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PasswordMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PasswordMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PasswordMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Password.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetEmail sets the "email" field. +func (m *PasswordMutation) SetEmail(s string) { + m.email = &s +} + +// Email returns the value of the "email" field in the mutation. +func (m *PasswordMutation) Email() (r string, exists bool) { + v := m.email + if v == nil { + return + } + return *v, true +} + +// OldEmail returns the old "email" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEmail: %w", err) + } + return oldValue.Email, nil +} + +// ResetEmail resets all changes to the "email" field. +func (m *PasswordMutation) ResetEmail() { + m.email = nil +} + +// SetHash sets the "hash" field. +func (m *PasswordMutation) SetHash(b []byte) { + m.hash = &b +} + +// Hash returns the value of the "hash" field in the mutation. +func (m *PasswordMutation) Hash() (r []byte, exists bool) { + v := m.hash + if v == nil { + return + } + return *v, true +} + +// OldHash returns the old "hash" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldHash(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHash: %w", err) + } + return oldValue.Hash, nil +} + +// ResetHash resets all changes to the "hash" field. +func (m *PasswordMutation) ResetHash() { + m.hash = nil +} + +// SetUsername sets the "username" field. +func (m *PasswordMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *PasswordMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ResetUsername resets all changes to the "username" field. +func (m *PasswordMutation) ResetUsername() { + m.username = nil +} + +// SetName sets the "name" field. +func (m *PasswordMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *PasswordMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *PasswordMutation) ResetName() { + m.name = nil +} + +// SetPreferredUsername sets the "preferred_username" field. +func (m *PasswordMutation) SetPreferredUsername(s string) { + m.preferred_username = &s +} + +// PreferredUsername returns the value of the "preferred_username" field in the mutation. +func (m *PasswordMutation) PreferredUsername() (r string, exists bool) { + v := m.preferred_username + if v == nil { + return + } + return *v, true +} + +// OldPreferredUsername returns the old "preferred_username" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldPreferredUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPreferredUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPreferredUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPreferredUsername: %w", err) + } + return oldValue.PreferredUsername, nil +} + +// ResetPreferredUsername resets all changes to the "preferred_username" field. +func (m *PasswordMutation) ResetPreferredUsername() { + m.preferred_username = nil +} + +// SetEmailVerified sets the "email_verified" field. +func (m *PasswordMutation) SetEmailVerified(b bool) { + m.email_verified = &b +} + +// EmailVerified returns the value of the "email_verified" field in the mutation. +func (m *PasswordMutation) EmailVerified() (r bool, exists bool) { + v := m.email_verified + if v == nil { + return + } + return *v, true +} + +// OldEmailVerified returns the old "email_verified" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldEmailVerified(ctx context.Context) (v *bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEmailVerified is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEmailVerified requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEmailVerified: %w", err) + } + return oldValue.EmailVerified, nil +} + +// ClearEmailVerified clears the value of the "email_verified" field. +func (m *PasswordMutation) ClearEmailVerified() { + m.email_verified = nil + m.clearedFields[password.FieldEmailVerified] = struct{}{} +} + +// EmailVerifiedCleared returns if the "email_verified" field was cleared in this mutation. +func (m *PasswordMutation) EmailVerifiedCleared() bool { + _, ok := m.clearedFields[password.FieldEmailVerified] + return ok +} + +// ResetEmailVerified resets all changes to the "email_verified" field. +func (m *PasswordMutation) ResetEmailVerified() { + m.email_verified = nil + delete(m.clearedFields, password.FieldEmailVerified) +} + +// SetUserID sets the "user_id" field. +func (m *PasswordMutation) SetUserID(s string) { + m.user_id = &s +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *PasswordMutation) UserID() (r string, exists bool) { + v := m.user_id + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *PasswordMutation) ResetUserID() { + m.user_id = nil +} + +// SetGroups sets the "groups" field. +func (m *PasswordMutation) SetGroups(s []string) { + m.groups = &s + m.appendgroups = nil +} + +// Groups returns the value of the "groups" field in the mutation. +func (m *PasswordMutation) Groups() (r []string, exists bool) { + v := m.groups + if v == nil { + return + } + return *v, true +} + +// OldGroups returns the old "groups" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldGroups(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroups is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroups requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroups: %w", err) + } + return oldValue.Groups, nil +} + +// AppendGroups adds s to the "groups" field. +func (m *PasswordMutation) AppendGroups(s []string) { + m.appendgroups = append(m.appendgroups, s...) +} + +// AppendedGroups returns the list of values that were appended to the "groups" field in this mutation. +func (m *PasswordMutation) AppendedGroups() ([]string, bool) { + if len(m.appendgroups) == 0 { + return nil, false + } + return m.appendgroups, true +} + +// ClearGroups clears the value of the "groups" field. +func (m *PasswordMutation) ClearGroups() { + m.groups = nil + m.appendgroups = nil + m.clearedFields[password.FieldGroups] = struct{}{} +} + +// GroupsCleared returns if the "groups" field was cleared in this mutation. +func (m *PasswordMutation) GroupsCleared() bool { + _, ok := m.clearedFields[password.FieldGroups] + return ok +} + +// ResetGroups resets all changes to the "groups" field. +func (m *PasswordMutation) ResetGroups() { + m.groups = nil + m.appendgroups = nil + delete(m.clearedFields, password.FieldGroups) +} + +// Where appends a list predicates to the PasswordMutation builder. +func (m *PasswordMutation) Where(ps ...predicate.Password) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PasswordMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PasswordMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Password, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PasswordMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PasswordMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Password). +func (m *PasswordMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PasswordMutation) Fields() []string { + fields := make([]string, 0, 8) + if m.email != nil { + fields = append(fields, password.FieldEmail) + } + if m.hash != nil { + fields = append(fields, password.FieldHash) + } + if m.username != nil { + fields = append(fields, password.FieldUsername) + } + if m.name != nil { + fields = append(fields, password.FieldName) + } + if m.preferred_username != nil { + fields = append(fields, password.FieldPreferredUsername) + } + if m.email_verified != nil { + fields = append(fields, password.FieldEmailVerified) + } + if m.user_id != nil { + fields = append(fields, password.FieldUserID) + } + if m.groups != nil { + fields = append(fields, password.FieldGroups) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PasswordMutation) Field(name string) (ent.Value, bool) { + switch name { + case password.FieldEmail: + return m.Email() + case password.FieldHash: + return m.Hash() + case password.FieldUsername: + return m.Username() + case password.FieldName: + return m.Name() + case password.FieldPreferredUsername: + return m.PreferredUsername() + case password.FieldEmailVerified: + return m.EmailVerified() + case password.FieldUserID: + return m.UserID() + case password.FieldGroups: + return m.Groups() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PasswordMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case password.FieldEmail: + return m.OldEmail(ctx) + case password.FieldHash: + return m.OldHash(ctx) + case password.FieldUsername: + return m.OldUsername(ctx) + case password.FieldName: + return m.OldName(ctx) + case password.FieldPreferredUsername: + return m.OldPreferredUsername(ctx) + case password.FieldEmailVerified: + return m.OldEmailVerified(ctx) + case password.FieldUserID: + return m.OldUserID(ctx) + case password.FieldGroups: + return m.OldGroups(ctx) + } + return nil, fmt.Errorf("unknown Password field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PasswordMutation) SetField(name string, value ent.Value) error { + switch name { + case password.FieldEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEmail(v) + return nil + case password.FieldHash: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHash(v) + return nil + case password.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case password.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case password.FieldPreferredUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPreferredUsername(v) + return nil + case password.FieldEmailVerified: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEmailVerified(v) + return nil + case password.FieldUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case password.FieldGroups: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroups(v) + return nil + } + return fmt.Errorf("unknown Password field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PasswordMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PasswordMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PasswordMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Password numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PasswordMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(password.FieldEmailVerified) { + fields = append(fields, password.FieldEmailVerified) + } + if m.FieldCleared(password.FieldGroups) { + fields = append(fields, password.FieldGroups) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PasswordMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PasswordMutation) ClearField(name string) error { + switch name { + case password.FieldEmailVerified: + m.ClearEmailVerified() + return nil + case password.FieldGroups: + m.ClearGroups() + return nil + } + return fmt.Errorf("unknown Password nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PasswordMutation) ResetField(name string) error { + switch name { + case password.FieldEmail: + m.ResetEmail() + return nil + case password.FieldHash: + m.ResetHash() + return nil + case password.FieldUsername: + m.ResetUsername() + return nil + case password.FieldName: + m.ResetName() + return nil + case password.FieldPreferredUsername: + m.ResetPreferredUsername() + return nil + case password.FieldEmailVerified: + m.ResetEmailVerified() + return nil + case password.FieldUserID: + m.ResetUserID() + return nil + case password.FieldGroups: + m.ResetGroups() + return nil + } + return fmt.Errorf("unknown Password field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PasswordMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PasswordMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PasswordMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PasswordMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PasswordMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PasswordMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PasswordMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Password unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PasswordMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Password edge %s", name) +} + +// RefreshTokenMutation represents an operation that mutates the RefreshToken nodes in the graph. +type RefreshTokenMutation struct { + config + op Op + typ string + id *string + client_id *string + scopes *[]string + appendscopes []string + nonce *string + claims_user_id *string + claims_username *string + claims_email *string + claims_email_verified *bool + claims_groups *[]string + appendclaims_groups []string + claims_preferred_username *string + connector_id *string + connector_data *[]byte + token *string + obsolete_token *string + created_at *time.Time + last_used *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*RefreshToken, error) + predicates []predicate.RefreshToken +} + +var _ ent.Mutation = (*RefreshTokenMutation)(nil) + +// refreshtokenOption allows management of the mutation configuration using functional options. +type refreshtokenOption func(*RefreshTokenMutation) + +// newRefreshTokenMutation creates new mutation for the RefreshToken entity. +func newRefreshTokenMutation(c config, op Op, opts ...refreshtokenOption) *RefreshTokenMutation { + m := &RefreshTokenMutation{ + config: c, + op: op, + typ: TypeRefreshToken, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withRefreshTokenID sets the ID field of the mutation. +func withRefreshTokenID(id string) refreshtokenOption { + return func(m *RefreshTokenMutation) { + var ( + err error + once sync.Once + value *RefreshToken + ) + m.oldValue = func(ctx context.Context) (*RefreshToken, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().RefreshToken.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withRefreshToken sets the old RefreshToken of the mutation. +func withRefreshToken(node *RefreshToken) refreshtokenOption { + return func(m *RefreshTokenMutation) { + m.oldValue = func(context.Context) (*RefreshToken, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m RefreshTokenMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m RefreshTokenMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of RefreshToken entities. +func (m *RefreshTokenMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *RefreshTokenMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *RefreshTokenMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().RefreshToken.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetClientID sets the "client_id" field. +func (m *RefreshTokenMutation) SetClientID(s string) { + m.client_id = &s +} + +// ClientID returns the value of the "client_id" field in the mutation. +func (m *RefreshTokenMutation) ClientID() (r string, exists bool) { + v := m.client_id + if v == nil { + return + } + return *v, true +} + +// OldClientID returns the old "client_id" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClientID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClientID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClientID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClientID: %w", err) + } + return oldValue.ClientID, nil +} + +// ResetClientID resets all changes to the "client_id" field. +func (m *RefreshTokenMutation) ResetClientID() { + m.client_id = nil +} + +// SetScopes sets the "scopes" field. +func (m *RefreshTokenMutation) SetScopes(s []string) { + m.scopes = &s + m.appendscopes = nil +} + +// Scopes returns the value of the "scopes" field in the mutation. +func (m *RefreshTokenMutation) Scopes() (r []string, exists bool) { + v := m.scopes + if v == nil { + return + } + return *v, true +} + +// OldScopes returns the old "scopes" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldScopes(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScopes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScopes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScopes: %w", err) + } + return oldValue.Scopes, nil +} + +// AppendScopes adds s to the "scopes" field. +func (m *RefreshTokenMutation) AppendScopes(s []string) { + m.appendscopes = append(m.appendscopes, s...) +} + +// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. +func (m *RefreshTokenMutation) AppendedScopes() ([]string, bool) { + if len(m.appendscopes) == 0 { + return nil, false + } + return m.appendscopes, true +} + +// ClearScopes clears the value of the "scopes" field. +func (m *RefreshTokenMutation) ClearScopes() { + m.scopes = nil + m.appendscopes = nil + m.clearedFields[refreshtoken.FieldScopes] = struct{}{} +} + +// ScopesCleared returns if the "scopes" field was cleared in this mutation. +func (m *RefreshTokenMutation) ScopesCleared() bool { + _, ok := m.clearedFields[refreshtoken.FieldScopes] + return ok +} + +// ResetScopes resets all changes to the "scopes" field. +func (m *RefreshTokenMutation) ResetScopes() { + m.scopes = nil + m.appendscopes = nil + delete(m.clearedFields, refreshtoken.FieldScopes) +} + +// SetNonce sets the "nonce" field. +func (m *RefreshTokenMutation) SetNonce(s string) { + m.nonce = &s +} + +// Nonce returns the value of the "nonce" field in the mutation. +func (m *RefreshTokenMutation) Nonce() (r string, exists bool) { + v := m.nonce + if v == nil { + return + } + return *v, true +} + +// OldNonce returns the old "nonce" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldNonce(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNonce is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNonce requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNonce: %w", err) + } + return oldValue.Nonce, nil +} + +// ResetNonce resets all changes to the "nonce" field. +func (m *RefreshTokenMutation) ResetNonce() { + m.nonce = nil +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (m *RefreshTokenMutation) SetClaimsUserID(s string) { + m.claims_user_id = &s +} + +// ClaimsUserID returns the value of the "claims_user_id" field in the mutation. +func (m *RefreshTokenMutation) ClaimsUserID() (r string, exists bool) { + v := m.claims_user_id + if v == nil { + return + } + return *v, true +} + +// OldClaimsUserID returns the old "claims_user_id" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUserID: %w", err) + } + return oldValue.ClaimsUserID, nil } -var _ ent.Mutation = (*PasswordMutation)(nil) +// ResetClaimsUserID resets all changes to the "claims_user_id" field. +func (m *RefreshTokenMutation) ResetClaimsUserID() { + m.claims_user_id = nil +} -// passwordOption allows management of the mutation configuration using functional options. -type passwordOption func(*PasswordMutation) +// SetClaimsUsername sets the "claims_username" field. +func (m *RefreshTokenMutation) SetClaimsUsername(s string) { + m.claims_username = &s +} -// newPasswordMutation creates new mutation for the Password entity. -func newPasswordMutation(c config, op Op, opts ...passwordOption) *PasswordMutation { - m := &PasswordMutation{ - config: c, - op: op, - typ: TypePassword, - clearedFields: make(map[string]struct{}), +// ClaimsUsername returns the value of the "claims_username" field in the mutation. +func (m *RefreshTokenMutation) ClaimsUsername() (r string, exists bool) { + v := m.claims_username + if v == nil { + return } - for _, opt := range opts { - opt(m) + return *v, true +} + +// OldClaimsUsername returns the old "claims_username" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUsername is only allowed on UpdateOne operations") } - return m + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUsername: %w", err) + } + return oldValue.ClaimsUsername, nil } -// withPasswordID sets the ID field of the mutation. -func withPasswordID(id int) passwordOption { - return func(m *PasswordMutation) { - var ( - err error - once sync.Once - value *Password - ) - m.oldValue = func(ctx context.Context) (*Password, error) { - once.Do(func() { - if m.done { - err = errors.New("querying old values post mutation is not allowed") - } else { - value, err = m.Client().Password.Get(ctx, id) - } - }) - return value, err - } - m.id = &id +// ResetClaimsUsername resets all changes to the "claims_username" field. +func (m *RefreshTokenMutation) ResetClaimsUsername() { + m.claims_username = nil +} + +// SetClaimsEmail sets the "claims_email" field. +func (m *RefreshTokenMutation) SetClaimsEmail(s string) { + m.claims_email = &s +} + +// ClaimsEmail returns the value of the "claims_email" field in the mutation. +func (m *RefreshTokenMutation) ClaimsEmail() (r string, exists bool) { + v := m.claims_email + if v == nil { + return } + return *v, true } -// withPassword sets the old Password of the mutation. -func withPassword(node *Password) passwordOption { - return func(m *PasswordMutation) { - m.oldValue = func(context.Context) (*Password, error) { - return node, nil - } - m.id = &node.ID +// OldClaimsEmail returns the old "claims_email" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmail: %w", err) } + return oldValue.ClaimsEmail, nil } -// Client returns a new `ent.Client` from the mutation. If the mutation was -// executed in a transaction (ent.Tx), a transactional client is returned. -func (m PasswordMutation) Client() *Client { - client := &Client{config: m.config} - client.init() - return client +// ResetClaimsEmail resets all changes to the "claims_email" field. +func (m *RefreshTokenMutation) ResetClaimsEmail() { + m.claims_email = nil } -// Tx returns an `ent.Tx` for mutations that were executed in transactions; -// it returns an error otherwise. -func (m PasswordMutation) Tx() (*Tx, error) { - if _, ok := m.driver.(*txDriver); !ok { - return nil, errors.New("db: mutation is not running in a transaction") - } - tx := &Tx{config: m.config} - tx.init() - return tx, nil +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (m *RefreshTokenMutation) SetClaimsEmailVerified(b bool) { + m.claims_email_verified = &b } -// ID returns the ID value in the mutation. Note that the ID is only available -// if it was provided to the builder or after it was returned from the database. -func (m *PasswordMutation) ID() (id int, exists bool) { - if m.id == nil { +// ClaimsEmailVerified returns the value of the "claims_email_verified" field in the mutation. +func (m *RefreshTokenMutation) ClaimsEmailVerified() (r bool, exists bool) { + v := m.claims_email_verified + if v == nil { return } - return *m.id, true + return *v, true } -// IDs queries the database and returns the entity ids that match the mutation's predicate. -// That means, if the mutation is applied within a transaction with an isolation level such -// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated -// or updated by the mutation. -func (m *PasswordMutation) IDs(ctx context.Context) ([]int, error) { - switch { - case m.op.Is(OpUpdateOne | OpDeleteOne): - id, exists := m.ID() - if exists { - return []int{id}, nil - } - fallthrough - case m.op.Is(OpUpdate | OpDelete): - return m.Client().Password.Query().Where(m.predicates...).IDs(ctx) - default: - return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) +// OldClaimsEmailVerified returns the old "claims_email_verified" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsEmailVerified(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmailVerified is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmailVerified requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmailVerified: %w", err) } + return oldValue.ClaimsEmailVerified, nil } -// SetEmail sets the "email" field. -func (m *PasswordMutation) SetEmail(s string) { - m.email = &s +// ResetClaimsEmailVerified resets all changes to the "claims_email_verified" field. +func (m *RefreshTokenMutation) ResetClaimsEmailVerified() { + m.claims_email_verified = nil } -// Email returns the value of the "email" field in the mutation. -func (m *PasswordMutation) Email() (r string, exists bool) { - v := m.email +// SetClaimsGroups sets the "claims_groups" field. +func (m *RefreshTokenMutation) SetClaimsGroups(s []string) { + m.claims_groups = &s + m.appendclaims_groups = nil +} + +// ClaimsGroups returns the value of the "claims_groups" field in the mutation. +func (m *RefreshTokenMutation) ClaimsGroups() (r []string, exists bool) { + v := m.claims_groups if v == nil { return } return *v, true } -// OldEmail returns the old "email" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldClaimsGroups returns the old "claims_groups" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldEmail(ctx context.Context) (v string, err error) { +func (m *RefreshTokenMutation) OldClaimsGroups(ctx context.Context) (v []string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldEmail is only allowed on UpdateOne operations") + return v, errors.New("OldClaimsGroups is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldEmail requires an ID field in the mutation") + return v, errors.New("OldClaimsGroups requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldEmail: %w", err) + return v, fmt.Errorf("querying old value for OldClaimsGroups: %w", err) } - return oldValue.Email, nil + return oldValue.ClaimsGroups, nil } -// ResetEmail resets all changes to the "email" field. -func (m *PasswordMutation) ResetEmail() { - m.email = nil +// AppendClaimsGroups adds s to the "claims_groups" field. +func (m *RefreshTokenMutation) AppendClaimsGroups(s []string) { + m.appendclaims_groups = append(m.appendclaims_groups, s...) } -// SetHash sets the "hash" field. -func (m *PasswordMutation) SetHash(b []byte) { - m.hash = &b +// AppendedClaimsGroups returns the list of values that were appended to the "claims_groups" field in this mutation. +func (m *RefreshTokenMutation) AppendedClaimsGroups() ([]string, bool) { + if len(m.appendclaims_groups) == 0 { + return nil, false + } + return m.appendclaims_groups, true } -// Hash returns the value of the "hash" field in the mutation. -func (m *PasswordMutation) Hash() (r []byte, exists bool) { - v := m.hash +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (m *RefreshTokenMutation) ClearClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + m.clearedFields[refreshtoken.FieldClaimsGroups] = struct{}{} +} + +// ClaimsGroupsCleared returns if the "claims_groups" field was cleared in this mutation. +func (m *RefreshTokenMutation) ClaimsGroupsCleared() bool { + _, ok := m.clearedFields[refreshtoken.FieldClaimsGroups] + return ok +} + +// ResetClaimsGroups resets all changes to the "claims_groups" field. +func (m *RefreshTokenMutation) ResetClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + delete(m.clearedFields, refreshtoken.FieldClaimsGroups) +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (m *RefreshTokenMutation) SetClaimsPreferredUsername(s string) { + m.claims_preferred_username = &s +} + +// ClaimsPreferredUsername returns the value of the "claims_preferred_username" field in the mutation. +func (m *RefreshTokenMutation) ClaimsPreferredUsername() (r string, exists bool) { + v := m.claims_preferred_username if v == nil { return } return *v, true } -// OldHash returns the old "hash" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldClaimsPreferredUsername returns the old "claims_preferred_username" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldHash(ctx context.Context) (v []byte, err error) { +func (m *RefreshTokenMutation) OldClaimsPreferredUsername(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHash is only allowed on UpdateOne operations") + return v, errors.New("OldClaimsPreferredUsername is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHash requires an ID field in the mutation") + return v, errors.New("OldClaimsPreferredUsername requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHash: %w", err) + return v, fmt.Errorf("querying old value for OldClaimsPreferredUsername: %w", err) } - return oldValue.Hash, nil + return oldValue.ClaimsPreferredUsername, nil } -// ResetHash resets all changes to the "hash" field. -func (m *PasswordMutation) ResetHash() { - m.hash = nil +// ResetClaimsPreferredUsername resets all changes to the "claims_preferred_username" field. +func (m *RefreshTokenMutation) ResetClaimsPreferredUsername() { + m.claims_preferred_username = nil } -// SetUsername sets the "username" field. -func (m *PasswordMutation) SetUsername(s string) { - m.username = &s +// SetConnectorID sets the "connector_id" field. +func (m *RefreshTokenMutation) SetConnectorID(s string) { + m.connector_id = &s } -// Username returns the value of the "username" field in the mutation. -func (m *PasswordMutation) Username() (r string, exists bool) { - v := m.username +// ConnectorID returns the value of the "connector_id" field in the mutation. +func (m *RefreshTokenMutation) ConnectorID() (r string, exists bool) { + v := m.connector_id if v == nil { return } return *v, true } -// OldUsername returns the old "username" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldConnectorID returns the old "connector_id" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldUsername(ctx context.Context) (v string, err error) { +func (m *RefreshTokenMutation) OldConnectorID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUsername is only allowed on UpdateOne operations") + return v, errors.New("OldConnectorID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUsername requires an ID field in the mutation") + return v, errors.New("OldConnectorID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldUsername: %w", err) + return v, fmt.Errorf("querying old value for OldConnectorID: %w", err) } - return oldValue.Username, nil + return oldValue.ConnectorID, nil } -// ResetUsername resets all changes to the "username" field. -func (m *PasswordMutation) ResetUsername() { - m.username = nil +// ResetConnectorID resets all changes to the "connector_id" field. +func (m *RefreshTokenMutation) ResetConnectorID() { + m.connector_id = nil } -// SetName sets the "name" field. -func (m *PasswordMutation) SetName(s string) { - m.name = &s +// SetConnectorData sets the "connector_data" field. +func (m *RefreshTokenMutation) SetConnectorData(b []byte) { + m.connector_data = &b } -// Name returns the value of the "name" field in the mutation. -func (m *PasswordMutation) Name() (r string, exists bool) { - v := m.name +// ConnectorData returns the value of the "connector_data" field in the mutation. +func (m *RefreshTokenMutation) ConnectorData() (r []byte, exists bool) { + v := m.connector_data if v == nil { return } return *v, true } -// OldName returns the old "name" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldConnectorData returns the old "connector_data" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldName(ctx context.Context) (v string, err error) { +func (m *RefreshTokenMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldName is only allowed on UpdateOne operations") + return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldName requires an ID field in the mutation") + return v, errors.New("OldConnectorData requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldName: %w", err) + return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) } - return oldValue.Name, nil + return oldValue.ConnectorData, nil } -// ResetName resets all changes to the "name" field. -func (m *PasswordMutation) ResetName() { - m.name = nil +// ClearConnectorData clears the value of the "connector_data" field. +func (m *RefreshTokenMutation) ClearConnectorData() { + m.connector_data = nil + m.clearedFields[refreshtoken.FieldConnectorData] = struct{}{} } -// SetPreferredUsername sets the "preferred_username" field. -func (m *PasswordMutation) SetPreferredUsername(s string) { - m.preferred_username = &s +// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. +func (m *RefreshTokenMutation) ConnectorDataCleared() bool { + _, ok := m.clearedFields[refreshtoken.FieldConnectorData] + return ok } -// PreferredUsername returns the value of the "preferred_username" field in the mutation. -func (m *PasswordMutation) PreferredUsername() (r string, exists bool) { - v := m.preferred_username +// ResetConnectorData resets all changes to the "connector_data" field. +func (m *RefreshTokenMutation) ResetConnectorData() { + m.connector_data = nil + delete(m.clearedFields, refreshtoken.FieldConnectorData) +} + +// SetToken sets the "token" field. +func (m *RefreshTokenMutation) SetToken(s string) { + m.token = &s +} + +// Token returns the value of the "token" field in the mutation. +func (m *RefreshTokenMutation) Token() (r string, exists bool) { + v := m.token if v == nil { return } return *v, true } -// OldPreferredUsername returns the old "preferred_username" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldToken returns the old "token" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldPreferredUsername(ctx context.Context) (v string, err error) { +func (m *RefreshTokenMutation) OldToken(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPreferredUsername is only allowed on UpdateOne operations") + return v, errors.New("OldToken is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPreferredUsername requires an ID field in the mutation") + return v, errors.New("OldToken requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldPreferredUsername: %w", err) + return v, fmt.Errorf("querying old value for OldToken: %w", err) } - return oldValue.PreferredUsername, nil + return oldValue.Token, nil } -// ResetPreferredUsername resets all changes to the "preferred_username" field. -func (m *PasswordMutation) ResetPreferredUsername() { - m.preferred_username = nil +// ResetToken resets all changes to the "token" field. +func (m *RefreshTokenMutation) ResetToken() { + m.token = nil } -// SetEmailVerified sets the "email_verified" field. -func (m *PasswordMutation) SetEmailVerified(b bool) { - m.email_verified = &b +// SetObsoleteToken sets the "obsolete_token" field. +func (m *RefreshTokenMutation) SetObsoleteToken(s string) { + m.obsolete_token = &s } -// EmailVerified returns the value of the "email_verified" field in the mutation. -func (m *PasswordMutation) EmailVerified() (r bool, exists bool) { - v := m.email_verified +// ObsoleteToken returns the value of the "obsolete_token" field in the mutation. +func (m *RefreshTokenMutation) ObsoleteToken() (r string, exists bool) { + v := m.obsolete_token if v == nil { return } return *v, true } -// OldEmailVerified returns the old "email_verified" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldObsoleteToken returns the old "obsolete_token" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldEmailVerified(ctx context.Context) (v *bool, err error) { +func (m *RefreshTokenMutation) OldObsoleteToken(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldEmailVerified is only allowed on UpdateOne operations") + return v, errors.New("OldObsoleteToken is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldEmailVerified requires an ID field in the mutation") + return v, errors.New("OldObsoleteToken requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldEmailVerified: %w", err) + return v, fmt.Errorf("querying old value for OldObsoleteToken: %w", err) } - return oldValue.EmailVerified, nil -} - -// ClearEmailVerified clears the value of the "email_verified" field. -func (m *PasswordMutation) ClearEmailVerified() { - m.email_verified = nil - m.clearedFields[password.FieldEmailVerified] = struct{}{} -} - -// EmailVerifiedCleared returns if the "email_verified" field was cleared in this mutation. -func (m *PasswordMutation) EmailVerifiedCleared() bool { - _, ok := m.clearedFields[password.FieldEmailVerified] - return ok + return oldValue.ObsoleteToken, nil } -// ResetEmailVerified resets all changes to the "email_verified" field. -func (m *PasswordMutation) ResetEmailVerified() { - m.email_verified = nil - delete(m.clearedFields, password.FieldEmailVerified) +// ResetObsoleteToken resets all changes to the "obsolete_token" field. +func (m *RefreshTokenMutation) ResetObsoleteToken() { + m.obsolete_token = nil } -// SetUserID sets the "user_id" field. -func (m *PasswordMutation) SetUserID(s string) { - m.user_id = &s +// SetCreatedAt sets the "created_at" field. +func (m *RefreshTokenMutation) SetCreatedAt(t time.Time) { + m.created_at = &t } -// UserID returns the value of the "user_id" field in the mutation. -func (m *PasswordMutation) UserID() (r string, exists bool) { - v := m.user_id +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *RefreshTokenMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at if v == nil { return } return *v, true } -// OldUserID returns the old "user_id" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldCreatedAt returns the old "created_at" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldUserID(ctx context.Context) (v string, err error) { +func (m *RefreshTokenMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUserID is only allowed on UpdateOne operations") + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUserID requires an ID field in the mutation") + return v, errors.New("OldCreatedAt requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldUserID: %w", err) + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) } - return oldValue.UserID, nil + return oldValue.CreatedAt, nil } -// ResetUserID resets all changes to the "user_id" field. -func (m *PasswordMutation) ResetUserID() { - m.user_id = nil +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *RefreshTokenMutation) ResetCreatedAt() { + m.created_at = nil } -// SetGroups sets the "groups" field. -func (m *PasswordMutation) SetGroups(s []string) { - m.groups = &s - m.appendgroups = nil +// SetLastUsed sets the "last_used" field. +func (m *RefreshTokenMutation) SetLastUsed(t time.Time) { + m.last_used = &t } -// Groups returns the value of the "groups" field in the mutation. -func (m *PasswordMutation) Groups() (r []string, exists bool) { - v := m.groups +// LastUsed returns the value of the "last_used" field in the mutation. +func (m *RefreshTokenMutation) LastUsed() (r time.Time, exists bool) { + v := m.last_used if v == nil { return } return *v, true } -// OldGroups returns the old "groups" field's value of the Password entity. -// If the Password object wasn't provided to the builder, the object is fetched from the database. +// OldLastUsed returns the old "last_used" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PasswordMutation) OldGroups(ctx context.Context) (v []string, err error) { +func (m *RefreshTokenMutation) OldLastUsed(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldGroups is only allowed on UpdateOne operations") + return v, errors.New("OldLastUsed is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldGroups requires an ID field in the mutation") + return v, errors.New("OldLastUsed requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldGroups: %w", err) - } - return oldValue.Groups, nil -} - -// AppendGroups adds s to the "groups" field. -func (m *PasswordMutation) AppendGroups(s []string) { - m.appendgroups = append(m.appendgroups, s...) -} - -// AppendedGroups returns the list of values that were appended to the "groups" field in this mutation. -func (m *PasswordMutation) AppendedGroups() ([]string, bool) { - if len(m.appendgroups) == 0 { - return nil, false + return v, fmt.Errorf("querying old value for OldLastUsed: %w", err) } - return m.appendgroups, true -} - -// ClearGroups clears the value of the "groups" field. -func (m *PasswordMutation) ClearGroups() { - m.groups = nil - m.appendgroups = nil - m.clearedFields[password.FieldGroups] = struct{}{} -} - -// GroupsCleared returns if the "groups" field was cleared in this mutation. -func (m *PasswordMutation) GroupsCleared() bool { - _, ok := m.clearedFields[password.FieldGroups] - return ok + return oldValue.LastUsed, nil } -// ResetGroups resets all changes to the "groups" field. -func (m *PasswordMutation) ResetGroups() { - m.groups = nil - m.appendgroups = nil - delete(m.clearedFields, password.FieldGroups) +// ResetLastUsed resets all changes to the "last_used" field. +func (m *RefreshTokenMutation) ResetLastUsed() { + m.last_used = nil } - -// Where appends a list predicates to the PasswordMutation builder. -func (m *PasswordMutation) Where(ps ...predicate.Password) { + +// Where appends a list predicates to the RefreshTokenMutation builder. +func (m *RefreshTokenMutation) Where(ps ...predicate.RefreshToken) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the PasswordMutation builder. Using this method, +// WhereP appends storage-level predicates to the RefreshTokenMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *PasswordMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.Password, len(ps)) +func (m *RefreshTokenMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.RefreshToken, len(ps)) for i := range ps { p[i] = ps[i] } @@ -6776,48 +8568,69 @@ func (m *PasswordMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *PasswordMutation) Op() Op { +func (m *RefreshTokenMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *PasswordMutation) SetOp(op Op) { +func (m *RefreshTokenMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (Password). -func (m *PasswordMutation) Type() string { +// Type returns the node type of this mutation (RefreshToken). +func (m *RefreshTokenMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *PasswordMutation) Fields() []string { - fields := make([]string, 0, 8) - if m.email != nil { - fields = append(fields, password.FieldEmail) +func (m *RefreshTokenMutation) Fields() []string { + fields := make([]string, 0, 15) + if m.client_id != nil { + fields = append(fields, refreshtoken.FieldClientID) } - if m.hash != nil { - fields = append(fields, password.FieldHash) + if m.scopes != nil { + fields = append(fields, refreshtoken.FieldScopes) } - if m.username != nil { - fields = append(fields, password.FieldUsername) + if m.nonce != nil { + fields = append(fields, refreshtoken.FieldNonce) } - if m.name != nil { - fields = append(fields, password.FieldName) + if m.claims_user_id != nil { + fields = append(fields, refreshtoken.FieldClaimsUserID) } - if m.preferred_username != nil { - fields = append(fields, password.FieldPreferredUsername) + if m.claims_username != nil { + fields = append(fields, refreshtoken.FieldClaimsUsername) } - if m.email_verified != nil { - fields = append(fields, password.FieldEmailVerified) + if m.claims_email != nil { + fields = append(fields, refreshtoken.FieldClaimsEmail) } - if m.user_id != nil { - fields = append(fields, password.FieldUserID) + if m.claims_email_verified != nil { + fields = append(fields, refreshtoken.FieldClaimsEmailVerified) } - if m.groups != nil { - fields = append(fields, password.FieldGroups) + if m.claims_groups != nil { + fields = append(fields, refreshtoken.FieldClaimsGroups) + } + if m.claims_preferred_username != nil { + fields = append(fields, refreshtoken.FieldClaimsPreferredUsername) + } + if m.connector_id != nil { + fields = append(fields, refreshtoken.FieldConnectorID) + } + if m.connector_data != nil { + fields = append(fields, refreshtoken.FieldConnectorData) + } + if m.token != nil { + fields = append(fields, refreshtoken.FieldToken) + } + if m.obsolete_token != nil { + fields = append(fields, refreshtoken.FieldObsoleteToken) + } + if m.created_at != nil { + fields = append(fields, refreshtoken.FieldCreatedAt) + } + if m.last_used != nil { + fields = append(fields, refreshtoken.FieldLastUsed) } return fields } @@ -6825,24 +8638,38 @@ func (m *PasswordMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *PasswordMutation) Field(name string) (ent.Value, bool) { +func (m *RefreshTokenMutation) Field(name string) (ent.Value, bool) { switch name { - case password.FieldEmail: - return m.Email() - case password.FieldHash: - return m.Hash() - case password.FieldUsername: - return m.Username() - case password.FieldName: - return m.Name() - case password.FieldPreferredUsername: - return m.PreferredUsername() - case password.FieldEmailVerified: - return m.EmailVerified() - case password.FieldUserID: - return m.UserID() - case password.FieldGroups: - return m.Groups() + case refreshtoken.FieldClientID: + return m.ClientID() + case refreshtoken.FieldScopes: + return m.Scopes() + case refreshtoken.FieldNonce: + return m.Nonce() + case refreshtoken.FieldClaimsUserID: + return m.ClaimsUserID() + case refreshtoken.FieldClaimsUsername: + return m.ClaimsUsername() + case refreshtoken.FieldClaimsEmail: + return m.ClaimsEmail() + case refreshtoken.FieldClaimsEmailVerified: + return m.ClaimsEmailVerified() + case refreshtoken.FieldClaimsGroups: + return m.ClaimsGroups() + case refreshtoken.FieldClaimsPreferredUsername: + return m.ClaimsPreferredUsername() + case refreshtoken.FieldConnectorID: + return m.ConnectorID() + case refreshtoken.FieldConnectorData: + return m.ConnectorData() + case refreshtoken.FieldToken: + return m.Token() + case refreshtoken.FieldObsoleteToken: + return m.ObsoleteToken() + case refreshtoken.FieldCreatedAt: + return m.CreatedAt() + case refreshtoken.FieldLastUsed: + return m.LastUsed() } return nil, false } @@ -6850,269 +8677,355 @@ func (m *PasswordMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *PasswordMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *RefreshTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case password.FieldEmail: - return m.OldEmail(ctx) - case password.FieldHash: - return m.OldHash(ctx) - case password.FieldUsername: - return m.OldUsername(ctx) - case password.FieldName: - return m.OldName(ctx) - case password.FieldPreferredUsername: - return m.OldPreferredUsername(ctx) - case password.FieldEmailVerified: - return m.OldEmailVerified(ctx) - case password.FieldUserID: - return m.OldUserID(ctx) - case password.FieldGroups: - return m.OldGroups(ctx) + case refreshtoken.FieldClientID: + return m.OldClientID(ctx) + case refreshtoken.FieldScopes: + return m.OldScopes(ctx) + case refreshtoken.FieldNonce: + return m.OldNonce(ctx) + case refreshtoken.FieldClaimsUserID: + return m.OldClaimsUserID(ctx) + case refreshtoken.FieldClaimsUsername: + return m.OldClaimsUsername(ctx) + case refreshtoken.FieldClaimsEmail: + return m.OldClaimsEmail(ctx) + case refreshtoken.FieldClaimsEmailVerified: + return m.OldClaimsEmailVerified(ctx) + case refreshtoken.FieldClaimsGroups: + return m.OldClaimsGroups(ctx) + case refreshtoken.FieldClaimsPreferredUsername: + return m.OldClaimsPreferredUsername(ctx) + case refreshtoken.FieldConnectorID: + return m.OldConnectorID(ctx) + case refreshtoken.FieldConnectorData: + return m.OldConnectorData(ctx) + case refreshtoken.FieldToken: + return m.OldToken(ctx) + case refreshtoken.FieldObsoleteToken: + return m.OldObsoleteToken(ctx) + case refreshtoken.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case refreshtoken.FieldLastUsed: + return m.OldLastUsed(ctx) } - return nil, fmt.Errorf("unknown Password field %s", name) + return nil, fmt.Errorf("unknown RefreshToken field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PasswordMutation) SetField(name string, value ent.Value) error { +func (m *RefreshTokenMutation) SetField(name string, value ent.Value) error { switch name { - case password.FieldEmail: + case refreshtoken.FieldClientID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetEmail(v) + m.SetClientID(v) return nil - case password.FieldHash: - v, ok := value.([]byte) + case refreshtoken.FieldScopes: + v, ok := value.([]string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHash(v) + m.SetScopes(v) return nil - case password.FieldUsername: + case refreshtoken.FieldNonce: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetUsername(v) + m.SetNonce(v) return nil - case password.FieldName: + case refreshtoken.FieldClaimsUserID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetName(v) + m.SetClaimsUserID(v) return nil - case password.FieldPreferredUsername: + case refreshtoken.FieldClaimsUsername: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetPreferredUsername(v) + m.SetClaimsUsername(v) return nil - case password.FieldEmailVerified: + case refreshtoken.FieldClaimsEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsEmail(v) + return nil + case refreshtoken.FieldClaimsEmailVerified: v, ok := value.(bool) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetEmailVerified(v) + m.SetClaimsEmailVerified(v) return nil - case password.FieldUserID: + case refreshtoken.FieldClaimsGroups: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsGroups(v) + return nil + case refreshtoken.FieldClaimsPreferredUsername: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetUserID(v) + m.SetClaimsPreferredUsername(v) return nil - case password.FieldGroups: - v, ok := value.([]string) + case refreshtoken.FieldConnectorID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorID(v) + return nil + case refreshtoken.FieldConnectorData: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorData(v) + return nil + case refreshtoken.FieldToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + case refreshtoken.FieldObsoleteToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetObsoleteToken(v) + return nil + case refreshtoken.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case refreshtoken.FieldLastUsed: + v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetGroups(v) + m.SetLastUsed(v) return nil } - return fmt.Errorf("unknown Password field %s", name) + return fmt.Errorf("unknown RefreshToken field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *PasswordMutation) AddedFields() []string { +func (m *RefreshTokenMutation) AddedFields() []string { return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *PasswordMutation) AddedField(name string) (ent.Value, bool) { +func (m *RefreshTokenMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PasswordMutation) AddField(name string, value ent.Value) error { +func (m *RefreshTokenMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown Password numeric field %s", name) + return fmt.Errorf("unknown RefreshToken numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *PasswordMutation) ClearedFields() []string { +func (m *RefreshTokenMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(password.FieldEmailVerified) { - fields = append(fields, password.FieldEmailVerified) + if m.FieldCleared(refreshtoken.FieldScopes) { + fields = append(fields, refreshtoken.FieldScopes) } - if m.FieldCleared(password.FieldGroups) { - fields = append(fields, password.FieldGroups) + if m.FieldCleared(refreshtoken.FieldClaimsGroups) { + fields = append(fields, refreshtoken.FieldClaimsGroups) + } + if m.FieldCleared(refreshtoken.FieldConnectorData) { + fields = append(fields, refreshtoken.FieldConnectorData) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *PasswordMutation) FieldCleared(name string) bool { +func (m *RefreshTokenMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *PasswordMutation) ClearField(name string) error { +func (m *RefreshTokenMutation) ClearField(name string) error { switch name { - case password.FieldEmailVerified: - m.ClearEmailVerified() + case refreshtoken.FieldScopes: + m.ClearScopes() return nil - case password.FieldGroups: - m.ClearGroups() + case refreshtoken.FieldClaimsGroups: + m.ClearClaimsGroups() + return nil + case refreshtoken.FieldConnectorData: + m.ClearConnectorData() return nil } - return fmt.Errorf("unknown Password nullable field %s", name) + return fmt.Errorf("unknown RefreshToken nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *PasswordMutation) ResetField(name string) error { +func (m *RefreshTokenMutation) ResetField(name string) error { switch name { - case password.FieldEmail: - m.ResetEmail() + case refreshtoken.FieldClientID: + m.ResetClientID() return nil - case password.FieldHash: - m.ResetHash() + case refreshtoken.FieldScopes: + m.ResetScopes() return nil - case password.FieldUsername: - m.ResetUsername() + case refreshtoken.FieldNonce: + m.ResetNonce() return nil - case password.FieldName: - m.ResetName() + case refreshtoken.FieldClaimsUserID: + m.ResetClaimsUserID() return nil - case password.FieldPreferredUsername: - m.ResetPreferredUsername() + case refreshtoken.FieldClaimsUsername: + m.ResetClaimsUsername() return nil - case password.FieldEmailVerified: - m.ResetEmailVerified() + case refreshtoken.FieldClaimsEmail: + m.ResetClaimsEmail() return nil - case password.FieldUserID: - m.ResetUserID() + case refreshtoken.FieldClaimsEmailVerified: + m.ResetClaimsEmailVerified() return nil - case password.FieldGroups: - m.ResetGroups() + case refreshtoken.FieldClaimsGroups: + m.ResetClaimsGroups() + return nil + case refreshtoken.FieldClaimsPreferredUsername: + m.ResetClaimsPreferredUsername() + return nil + case refreshtoken.FieldConnectorID: + m.ResetConnectorID() + return nil + case refreshtoken.FieldConnectorData: + m.ResetConnectorData() + return nil + case refreshtoken.FieldToken: + m.ResetToken() + return nil + case refreshtoken.FieldObsoleteToken: + m.ResetObsoleteToken() + return nil + case refreshtoken.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case refreshtoken.FieldLastUsed: + m.ResetLastUsed() return nil } - return fmt.Errorf("unknown Password field %s", name) + return fmt.Errorf("unknown RefreshToken field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *PasswordMutation) AddedEdges() []string { +func (m *RefreshTokenMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *PasswordMutation) AddedIDs(name string) []ent.Value { +func (m *RefreshTokenMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *PasswordMutation) RemovedEdges() []string { +func (m *RefreshTokenMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *PasswordMutation) RemovedIDs(name string) []ent.Value { +func (m *RefreshTokenMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *PasswordMutation) ClearedEdges() []string { +func (m *RefreshTokenMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *PasswordMutation) EdgeCleared(name string) bool { +func (m *RefreshTokenMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *PasswordMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown Password unique edge %s", name) +func (m *RefreshTokenMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown RefreshToken unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *PasswordMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown Password edge %s", name) +func (m *RefreshTokenMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown RefreshToken edge %s", name) } -// RefreshTokenMutation represents an operation that mutates the RefreshToken nodes in the graph. -type RefreshTokenMutation struct { +// UserIdentityMutation represents an operation that mutates the UserIdentity nodes in the graph. +type UserIdentityMutation struct { config op Op typ string id *string - client_id *string - scopes *[]string - appendscopes []string - nonce *string + user_id *string + connector_id *string claims_user_id *string claims_username *string + claims_preferred_username *string claims_email *string claims_email_verified *bool claims_groups *[]string appendclaims_groups []string - claims_preferred_username *string - connector_id *string - connector_data *[]byte - token *string - obsolete_token *string + consents *[]byte created_at *time.Time - last_used *time.Time + last_login *time.Time + blocked_until *time.Time clearedFields map[string]struct{} done bool - oldValue func(context.Context) (*RefreshToken, error) - predicates []predicate.RefreshToken + oldValue func(context.Context) (*UserIdentity, error) + predicates []predicate.UserIdentity } -var _ ent.Mutation = (*RefreshTokenMutation)(nil) +var _ ent.Mutation = (*UserIdentityMutation)(nil) -// refreshtokenOption allows management of the mutation configuration using functional options. -type refreshtokenOption func(*RefreshTokenMutation) +// useridentityOption allows management of the mutation configuration using functional options. +type useridentityOption func(*UserIdentityMutation) -// newRefreshTokenMutation creates new mutation for the RefreshToken entity. -func newRefreshTokenMutation(c config, op Op, opts ...refreshtokenOption) *RefreshTokenMutation { - m := &RefreshTokenMutation{ +// newUserIdentityMutation creates new mutation for the UserIdentity entity. +func newUserIdentityMutation(c config, op Op, opts ...useridentityOption) *UserIdentityMutation { + m := &UserIdentityMutation{ config: c, op: op, - typ: TypeRefreshToken, + typ: TypeUserIdentity, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -7121,20 +9034,20 @@ func newRefreshTokenMutation(c config, op Op, opts ...refreshtokenOption) *Refre return m } -// withRefreshTokenID sets the ID field of the mutation. -func withRefreshTokenID(id string) refreshtokenOption { - return func(m *RefreshTokenMutation) { +// withUserIdentityID sets the ID field of the mutation. +func withUserIdentityID(id string) useridentityOption { + return func(m *UserIdentityMutation) { var ( err error once sync.Once - value *RefreshToken + value *UserIdentity ) - m.oldValue = func(ctx context.Context) (*RefreshToken, error) { + m.oldValue = func(ctx context.Context) (*UserIdentity, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().RefreshToken.Get(ctx, id) + value, err = m.Client().UserIdentity.Get(ctx, id) } }) return value, err @@ -7143,10 +9056,10 @@ func withRefreshTokenID(id string) refreshtokenOption { } } -// withRefreshToken sets the old RefreshToken of the mutation. -func withRefreshToken(node *RefreshToken) refreshtokenOption { - return func(m *RefreshTokenMutation) { - m.oldValue = func(context.Context) (*RefreshToken, error) { +// withUserIdentity sets the old UserIdentity of the mutation. +func withUserIdentity(node *UserIdentity) useridentityOption { + return func(m *UserIdentityMutation) { + m.oldValue = func(context.Context) (*UserIdentity, error) { return node, nil } m.id = &node.ID @@ -7155,7 +9068,7 @@ func withRefreshToken(node *RefreshToken) refreshtokenOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m RefreshTokenMutation) Client() *Client { +func (m UserIdentityMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -7163,7 +9076,7 @@ func (m RefreshTokenMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m RefreshTokenMutation) Tx() (*Tx, error) { +func (m UserIdentityMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -7173,14 +9086,14 @@ func (m RefreshTokenMutation) Tx() (*Tx, error) { } // SetID sets the value of the id field. Note that this -// operation is only accepted on creation of RefreshToken entities. -func (m *RefreshTokenMutation) SetID(id string) { +// operation is only accepted on creation of UserIdentity entities. +func (m *UserIdentityMutation) SetID(id string) { m.id = &id } // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *RefreshTokenMutation) ID() (id string, exists bool) { +func (m *UserIdentityMutation) ID() (id string, exists bool) { if m.id == nil { return } @@ -7191,7 +9104,7 @@ func (m *RefreshTokenMutation) ID() (id string, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *RefreshTokenMutation) IDs(ctx context.Context) ([]string, error) { +func (m *UserIdentityMutation) IDs(ctx context.Context) ([]string, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -7200,156 +9113,91 @@ func (m *RefreshTokenMutation) IDs(ctx context.Context) ([]string, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().RefreshToken.Query().Where(m.predicates...).IDs(ctx) + return m.Client().UserIdentity.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetClientID sets the "client_id" field. -func (m *RefreshTokenMutation) SetClientID(s string) { - m.client_id = &s -} - -// ClientID returns the value of the "client_id" field in the mutation. -func (m *RefreshTokenMutation) ClientID() (r string, exists bool) { - v := m.client_id - if v == nil { - return - } - return *v, true -} - -// OldClientID returns the old "client_id" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldClientID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldClientID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldClientID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldClientID: %w", err) - } - return oldValue.ClientID, nil -} - -// ResetClientID resets all changes to the "client_id" field. -func (m *RefreshTokenMutation) ResetClientID() { - m.client_id = nil -} - -// SetScopes sets the "scopes" field. -func (m *RefreshTokenMutation) SetScopes(s []string) { - m.scopes = &s - m.appendscopes = nil +// SetUserID sets the "user_id" field. +func (m *UserIdentityMutation) SetUserID(s string) { + m.user_id = &s } -// Scopes returns the value of the "scopes" field in the mutation. -func (m *RefreshTokenMutation) Scopes() (r []string, exists bool) { - v := m.scopes +// UserID returns the value of the "user_id" field in the mutation. +func (m *UserIdentityMutation) UserID() (r string, exists bool) { + v := m.user_id if v == nil { return } return *v, true } -// OldScopes returns the old "scopes" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldUserID returns the old "user_id" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldScopes(ctx context.Context) (v []string, err error) { +func (m *UserIdentityMutation) OldUserID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldScopes is only allowed on UpdateOne operations") + return v, errors.New("OldUserID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldScopes requires an ID field in the mutation") + return v, errors.New("OldUserID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldScopes: %w", err) - } - return oldValue.Scopes, nil -} - -// AppendScopes adds s to the "scopes" field. -func (m *RefreshTokenMutation) AppendScopes(s []string) { - m.appendscopes = append(m.appendscopes, s...) -} - -// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. -func (m *RefreshTokenMutation) AppendedScopes() ([]string, bool) { - if len(m.appendscopes) == 0 { - return nil, false + return v, fmt.Errorf("querying old value for OldUserID: %w", err) } - return m.appendscopes, true -} - -// ClearScopes clears the value of the "scopes" field. -func (m *RefreshTokenMutation) ClearScopes() { - m.scopes = nil - m.appendscopes = nil - m.clearedFields[refreshtoken.FieldScopes] = struct{}{} -} - -// ScopesCleared returns if the "scopes" field was cleared in this mutation. -func (m *RefreshTokenMutation) ScopesCleared() bool { - _, ok := m.clearedFields[refreshtoken.FieldScopes] - return ok + return oldValue.UserID, nil } -// ResetScopes resets all changes to the "scopes" field. -func (m *RefreshTokenMutation) ResetScopes() { - m.scopes = nil - m.appendscopes = nil - delete(m.clearedFields, refreshtoken.FieldScopes) +// ResetUserID resets all changes to the "user_id" field. +func (m *UserIdentityMutation) ResetUserID() { + m.user_id = nil } -// SetNonce sets the "nonce" field. -func (m *RefreshTokenMutation) SetNonce(s string) { - m.nonce = &s +// SetConnectorID sets the "connector_id" field. +func (m *UserIdentityMutation) SetConnectorID(s string) { + m.connector_id = &s } -// Nonce returns the value of the "nonce" field in the mutation. -func (m *RefreshTokenMutation) Nonce() (r string, exists bool) { - v := m.nonce +// ConnectorID returns the value of the "connector_id" field in the mutation. +func (m *UserIdentityMutation) ConnectorID() (r string, exists bool) { + v := m.connector_id if v == nil { return } return *v, true } -// OldNonce returns the old "nonce" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldConnectorID returns the old "connector_id" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldNonce(ctx context.Context) (v string, err error) { +func (m *UserIdentityMutation) OldConnectorID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldNonce is only allowed on UpdateOne operations") + return v, errors.New("OldConnectorID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldNonce requires an ID field in the mutation") + return v, errors.New("OldConnectorID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldNonce: %w", err) + return v, fmt.Errorf("querying old value for OldConnectorID: %w", err) } - return oldValue.Nonce, nil + return oldValue.ConnectorID, nil } -// ResetNonce resets all changes to the "nonce" field. -func (m *RefreshTokenMutation) ResetNonce() { - m.nonce = nil +// ResetConnectorID resets all changes to the "connector_id" field. +func (m *UserIdentityMutation) ResetConnectorID() { + m.connector_id = nil } // SetClaimsUserID sets the "claims_user_id" field. -func (m *RefreshTokenMutation) SetClaimsUserID(s string) { +func (m *UserIdentityMutation) SetClaimsUserID(s string) { m.claims_user_id = &s } // ClaimsUserID returns the value of the "claims_user_id" field in the mutation. -func (m *RefreshTokenMutation) ClaimsUserID() (r string, exists bool) { +func (m *UserIdentityMutation) ClaimsUserID() (r string, exists bool) { v := m.claims_user_id if v == nil { return @@ -7357,10 +9205,10 @@ func (m *RefreshTokenMutation) ClaimsUserID() (r string, exists bool) { return *v, true } -// OldClaimsUserID returns the old "claims_user_id" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldClaimsUserID returns the old "claims_user_id" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldClaimsUserID(ctx context.Context) (v string, err error) { +func (m *UserIdentityMutation) OldClaimsUserID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldClaimsUserID is only allowed on UpdateOne operations") } @@ -7375,17 +9223,17 @@ func (m *RefreshTokenMutation) OldClaimsUserID(ctx context.Context) (v string, e } // ResetClaimsUserID resets all changes to the "claims_user_id" field. -func (m *RefreshTokenMutation) ResetClaimsUserID() { +func (m *UserIdentityMutation) ResetClaimsUserID() { m.claims_user_id = nil } // SetClaimsUsername sets the "claims_username" field. -func (m *RefreshTokenMutation) SetClaimsUsername(s string) { +func (m *UserIdentityMutation) SetClaimsUsername(s string) { m.claims_username = &s } // ClaimsUsername returns the value of the "claims_username" field in the mutation. -func (m *RefreshTokenMutation) ClaimsUsername() (r string, exists bool) { +func (m *UserIdentityMutation) ClaimsUsername() (r string, exists bool) { v := m.claims_username if v == nil { return @@ -7393,10 +9241,10 @@ func (m *RefreshTokenMutation) ClaimsUsername() (r string, exists bool) { return *v, true } -// OldClaimsUsername returns the old "claims_username" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldClaimsUsername returns the old "claims_username" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldClaimsUsername(ctx context.Context) (v string, err error) { +func (m *UserIdentityMutation) OldClaimsUsername(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldClaimsUsername is only allowed on UpdateOne operations") } @@ -7411,17 +9259,53 @@ func (m *RefreshTokenMutation) OldClaimsUsername(ctx context.Context) (v string, } // ResetClaimsUsername resets all changes to the "claims_username" field. -func (m *RefreshTokenMutation) ResetClaimsUsername() { +func (m *UserIdentityMutation) ResetClaimsUsername() { m.claims_username = nil } +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (m *UserIdentityMutation) SetClaimsPreferredUsername(s string) { + m.claims_preferred_username = &s +} + +// ClaimsPreferredUsername returns the value of the "claims_preferred_username" field in the mutation. +func (m *UserIdentityMutation) ClaimsPreferredUsername() (r string, exists bool) { + v := m.claims_preferred_username + if v == nil { + return + } + return *v, true +} + +// OldClaimsPreferredUsername returns the old "claims_preferred_username" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserIdentityMutation) OldClaimsPreferredUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsPreferredUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsPreferredUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsPreferredUsername: %w", err) + } + return oldValue.ClaimsPreferredUsername, nil +} + +// ResetClaimsPreferredUsername resets all changes to the "claims_preferred_username" field. +func (m *UserIdentityMutation) ResetClaimsPreferredUsername() { + m.claims_preferred_username = nil +} + // SetClaimsEmail sets the "claims_email" field. -func (m *RefreshTokenMutation) SetClaimsEmail(s string) { +func (m *UserIdentityMutation) SetClaimsEmail(s string) { m.claims_email = &s } // ClaimsEmail returns the value of the "claims_email" field in the mutation. -func (m *RefreshTokenMutation) ClaimsEmail() (r string, exists bool) { +func (m *UserIdentityMutation) ClaimsEmail() (r string, exists bool) { v := m.claims_email if v == nil { return @@ -7429,10 +9313,10 @@ func (m *RefreshTokenMutation) ClaimsEmail() (r string, exists bool) { return *v, true } -// OldClaimsEmail returns the old "claims_email" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldClaimsEmail returns the old "claims_email" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldClaimsEmail(ctx context.Context) (v string, err error) { +func (m *UserIdentityMutation) OldClaimsEmail(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldClaimsEmail is only allowed on UpdateOne operations") } @@ -7447,17 +9331,17 @@ func (m *RefreshTokenMutation) OldClaimsEmail(ctx context.Context) (v string, er } // ResetClaimsEmail resets all changes to the "claims_email" field. -func (m *RefreshTokenMutation) ResetClaimsEmail() { +func (m *UserIdentityMutation) ResetClaimsEmail() { m.claims_email = nil } // SetClaimsEmailVerified sets the "claims_email_verified" field. -func (m *RefreshTokenMutation) SetClaimsEmailVerified(b bool) { +func (m *UserIdentityMutation) SetClaimsEmailVerified(b bool) { m.claims_email_verified = &b } // ClaimsEmailVerified returns the value of the "claims_email_verified" field in the mutation. -func (m *RefreshTokenMutation) ClaimsEmailVerified() (r bool, exists bool) { +func (m *UserIdentityMutation) ClaimsEmailVerified() (r bool, exists bool) { v := m.claims_email_verified if v == nil { return @@ -7465,10 +9349,10 @@ func (m *RefreshTokenMutation) ClaimsEmailVerified() (r bool, exists bool) { return *v, true } -// OldClaimsEmailVerified returns the old "claims_email_verified" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldClaimsEmailVerified returns the old "claims_email_verified" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldClaimsEmailVerified(ctx context.Context) (v bool, err error) { +func (m *UserIdentityMutation) OldClaimsEmailVerified(ctx context.Context) (v bool, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldClaimsEmailVerified is only allowed on UpdateOne operations") } @@ -7483,18 +9367,18 @@ func (m *RefreshTokenMutation) OldClaimsEmailVerified(ctx context.Context) (v bo } // ResetClaimsEmailVerified resets all changes to the "claims_email_verified" field. -func (m *RefreshTokenMutation) ResetClaimsEmailVerified() { +func (m *UserIdentityMutation) ResetClaimsEmailVerified() { m.claims_email_verified = nil } // SetClaimsGroups sets the "claims_groups" field. -func (m *RefreshTokenMutation) SetClaimsGroups(s []string) { +func (m *UserIdentityMutation) SetClaimsGroups(s []string) { m.claims_groups = &s m.appendclaims_groups = nil } // ClaimsGroups returns the value of the "claims_groups" field in the mutation. -func (m *RefreshTokenMutation) ClaimsGroups() (r []string, exists bool) { +func (m *UserIdentityMutation) ClaimsGroups() (r []string, exists bool) { v := m.claims_groups if v == nil { return @@ -7502,10 +9386,10 @@ func (m *RefreshTokenMutation) ClaimsGroups() (r []string, exists bool) { return *v, true } -// OldClaimsGroups returns the old "claims_groups" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldClaimsGroups returns the old "claims_groups" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldClaimsGroups(ctx context.Context) (v []string, err error) { +func (m *UserIdentityMutation) OldClaimsGroups(ctx context.Context) (v []string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldClaimsGroups is only allowed on UpdateOne operations") } @@ -7520,12 +9404,12 @@ func (m *RefreshTokenMutation) OldClaimsGroups(ctx context.Context) (v []string, } // AppendClaimsGroups adds s to the "claims_groups" field. -func (m *RefreshTokenMutation) AppendClaimsGroups(s []string) { +func (m *UserIdentityMutation) AppendClaimsGroups(s []string) { m.appendclaims_groups = append(m.appendclaims_groups, s...) } // AppendedClaimsGroups returns the list of values that were appended to the "claims_groups" field in this mutation. -func (m *RefreshTokenMutation) AppendedClaimsGroups() ([]string, bool) { +func (m *UserIdentityMutation) AppendedClaimsGroups() ([]string, bool) { if len(m.appendclaims_groups) == 0 { return nil, false } @@ -7533,299 +9417,178 @@ func (m *RefreshTokenMutation) AppendedClaimsGroups() ([]string, bool) { } // ClearClaimsGroups clears the value of the "claims_groups" field. -func (m *RefreshTokenMutation) ClearClaimsGroups() { +func (m *UserIdentityMutation) ClearClaimsGroups() { m.claims_groups = nil m.appendclaims_groups = nil - m.clearedFields[refreshtoken.FieldClaimsGroups] = struct{}{} + m.clearedFields[useridentity.FieldClaimsGroups] = struct{}{} } // ClaimsGroupsCleared returns if the "claims_groups" field was cleared in this mutation. -func (m *RefreshTokenMutation) ClaimsGroupsCleared() bool { - _, ok := m.clearedFields[refreshtoken.FieldClaimsGroups] +func (m *UserIdentityMutation) ClaimsGroupsCleared() bool { + _, ok := m.clearedFields[useridentity.FieldClaimsGroups] return ok } // ResetClaimsGroups resets all changes to the "claims_groups" field. -func (m *RefreshTokenMutation) ResetClaimsGroups() { +func (m *UserIdentityMutation) ResetClaimsGroups() { m.claims_groups = nil m.appendclaims_groups = nil - delete(m.clearedFields, refreshtoken.FieldClaimsGroups) -} - -// SetClaimsPreferredUsername sets the "claims_preferred_username" field. -func (m *RefreshTokenMutation) SetClaimsPreferredUsername(s string) { - m.claims_preferred_username = &s -} - -// ClaimsPreferredUsername returns the value of the "claims_preferred_username" field in the mutation. -func (m *RefreshTokenMutation) ClaimsPreferredUsername() (r string, exists bool) { - v := m.claims_preferred_username - if v == nil { - return - } - return *v, true -} - -// OldClaimsPreferredUsername returns the old "claims_preferred_username" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldClaimsPreferredUsername(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldClaimsPreferredUsername is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldClaimsPreferredUsername requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldClaimsPreferredUsername: %w", err) - } - return oldValue.ClaimsPreferredUsername, nil -} - -// ResetClaimsPreferredUsername resets all changes to the "claims_preferred_username" field. -func (m *RefreshTokenMutation) ResetClaimsPreferredUsername() { - m.claims_preferred_username = nil -} - -// SetConnectorID sets the "connector_id" field. -func (m *RefreshTokenMutation) SetConnectorID(s string) { - m.connector_id = &s -} - -// ConnectorID returns the value of the "connector_id" field in the mutation. -func (m *RefreshTokenMutation) ConnectorID() (r string, exists bool) { - v := m.connector_id - if v == nil { - return - } - return *v, true -} - -// OldConnectorID returns the old "connector_id" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldConnectorID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldConnectorID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldConnectorID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldConnectorID: %w", err) - } - return oldValue.ConnectorID, nil -} - -// ResetConnectorID resets all changes to the "connector_id" field. -func (m *RefreshTokenMutation) ResetConnectorID() { - m.connector_id = nil -} - -// SetConnectorData sets the "connector_data" field. -func (m *RefreshTokenMutation) SetConnectorData(b []byte) { - m.connector_data = &b -} - -// ConnectorData returns the value of the "connector_data" field in the mutation. -func (m *RefreshTokenMutation) ConnectorData() (r []byte, exists bool) { - v := m.connector_data - if v == nil { - return - } - return *v, true -} - -// OldConnectorData returns the old "connector_data" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldConnectorData requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) - } - return oldValue.ConnectorData, nil -} - -// ClearConnectorData clears the value of the "connector_data" field. -func (m *RefreshTokenMutation) ClearConnectorData() { - m.connector_data = nil - m.clearedFields[refreshtoken.FieldConnectorData] = struct{}{} -} - -// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. -func (m *RefreshTokenMutation) ConnectorDataCleared() bool { - _, ok := m.clearedFields[refreshtoken.FieldConnectorData] - return ok -} - -// ResetConnectorData resets all changes to the "connector_data" field. -func (m *RefreshTokenMutation) ResetConnectorData() { - m.connector_data = nil - delete(m.clearedFields, refreshtoken.FieldConnectorData) + delete(m.clearedFields, useridentity.FieldClaimsGroups) } -// SetToken sets the "token" field. -func (m *RefreshTokenMutation) SetToken(s string) { - m.token = &s +// SetConsents sets the "consents" field. +func (m *UserIdentityMutation) SetConsents(b []byte) { + m.consents = &b } -// Token returns the value of the "token" field in the mutation. -func (m *RefreshTokenMutation) Token() (r string, exists bool) { - v := m.token +// Consents returns the value of the "consents" field in the mutation. +func (m *UserIdentityMutation) Consents() (r []byte, exists bool) { + v := m.consents if v == nil { return } return *v, true } -// OldToken returns the old "token" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldConsents returns the old "consents" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldToken(ctx context.Context) (v string, err error) { +func (m *UserIdentityMutation) OldConsents(ctx context.Context) (v []byte, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldToken is only allowed on UpdateOne operations") + return v, errors.New("OldConsents is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldToken requires an ID field in the mutation") + return v, errors.New("OldConsents requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldToken: %w", err) + return v, fmt.Errorf("querying old value for OldConsents: %w", err) } - return oldValue.Token, nil + return oldValue.Consents, nil } -// ResetToken resets all changes to the "token" field. -func (m *RefreshTokenMutation) ResetToken() { - m.token = nil +// ResetConsents resets all changes to the "consents" field. +func (m *UserIdentityMutation) ResetConsents() { + m.consents = nil } -// SetObsoleteToken sets the "obsolete_token" field. -func (m *RefreshTokenMutation) SetObsoleteToken(s string) { - m.obsolete_token = &s +// SetCreatedAt sets the "created_at" field. +func (m *UserIdentityMutation) SetCreatedAt(t time.Time) { + m.created_at = &t } -// ObsoleteToken returns the value of the "obsolete_token" field in the mutation. -func (m *RefreshTokenMutation) ObsoleteToken() (r string, exists bool) { - v := m.obsolete_token +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserIdentityMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at if v == nil { return } return *v, true } -// OldObsoleteToken returns the old "obsolete_token" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldCreatedAt returns the old "created_at" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldObsoleteToken(ctx context.Context) (v string, err error) { +func (m *UserIdentityMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldObsoleteToken is only allowed on UpdateOne operations") + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldObsoleteToken requires an ID field in the mutation") + return v, errors.New("OldCreatedAt requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldObsoleteToken: %w", err) + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) } - return oldValue.ObsoleteToken, nil -} - -// ResetObsoleteToken resets all changes to the "obsolete_token" field. -func (m *RefreshTokenMutation) ResetObsoleteToken() { - m.obsolete_token = nil + return oldValue.CreatedAt, nil } -// SetCreatedAt sets the "created_at" field. -func (m *RefreshTokenMutation) SetCreatedAt(t time.Time) { - m.created_at = &t +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserIdentityMutation) ResetCreatedAt() { + m.created_at = nil } -// CreatedAt returns the value of the "created_at" field in the mutation. -func (m *RefreshTokenMutation) CreatedAt() (r time.Time, exists bool) { - v := m.created_at +// SetLastLogin sets the "last_login" field. +func (m *UserIdentityMutation) SetLastLogin(t time.Time) { + m.last_login = &t +} + +// LastLogin returns the value of the "last_login" field in the mutation. +func (m *UserIdentityMutation) LastLogin() (r time.Time, exists bool) { + v := m.last_login if v == nil { return } return *v, true } -// OldCreatedAt returns the old "created_at" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldLastLogin returns the old "last_login" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { +func (m *UserIdentityMutation) OldLastLogin(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + return v, errors.New("OldLastLogin is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCreatedAt requires an ID field in the mutation") + return v, errors.New("OldLastLogin requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + return v, fmt.Errorf("querying old value for OldLastLogin: %w", err) } - return oldValue.CreatedAt, nil + return oldValue.LastLogin, nil } -// ResetCreatedAt resets all changes to the "created_at" field. -func (m *RefreshTokenMutation) ResetCreatedAt() { - m.created_at = nil +// ResetLastLogin resets all changes to the "last_login" field. +func (m *UserIdentityMutation) ResetLastLogin() { + m.last_login = nil } -// SetLastUsed sets the "last_used" field. -func (m *RefreshTokenMutation) SetLastUsed(t time.Time) { - m.last_used = &t +// SetBlockedUntil sets the "blocked_until" field. +func (m *UserIdentityMutation) SetBlockedUntil(t time.Time) { + m.blocked_until = &t } -// LastUsed returns the value of the "last_used" field in the mutation. -func (m *RefreshTokenMutation) LastUsed() (r time.Time, exists bool) { - v := m.last_used +// BlockedUntil returns the value of the "blocked_until" field in the mutation. +func (m *UserIdentityMutation) BlockedUntil() (r time.Time, exists bool) { + v := m.blocked_until if v == nil { return } return *v, true } -// OldLastUsed returns the old "last_used" field's value of the RefreshToken entity. -// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// OldBlockedUntil returns the old "blocked_until" field's value of the UserIdentity entity. +// If the UserIdentity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *RefreshTokenMutation) OldLastUsed(ctx context.Context) (v time.Time, err error) { +func (m *UserIdentityMutation) OldBlockedUntil(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldLastUsed is only allowed on UpdateOne operations") + return v, errors.New("OldBlockedUntil is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldLastUsed requires an ID field in the mutation") + return v, errors.New("OldBlockedUntil requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldLastUsed: %w", err) + return v, fmt.Errorf("querying old value for OldBlockedUntil: %w", err) } - return oldValue.LastUsed, nil + return oldValue.BlockedUntil, nil } -// ResetLastUsed resets all changes to the "last_used" field. -func (m *RefreshTokenMutation) ResetLastUsed() { - m.last_used = nil +// ResetBlockedUntil resets all changes to the "blocked_until" field. +func (m *UserIdentityMutation) ResetBlockedUntil() { + m.blocked_until = nil } -// Where appends a list predicates to the RefreshTokenMutation builder. -func (m *RefreshTokenMutation) Where(ps ...predicate.RefreshToken) { +// Where appends a list predicates to the UserIdentityMutation builder. +func (m *UserIdentityMutation) Where(ps ...predicate.UserIdentity) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the RefreshTokenMutation builder. Using this method, +// WhereP appends storage-level predicates to the UserIdentityMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *RefreshTokenMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.RefreshToken, len(ps)) +func (m *UserIdentityMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserIdentity, len(ps)) for i := range ps { p[i] = ps[i] } @@ -7833,69 +9596,60 @@ func (m *RefreshTokenMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *RefreshTokenMutation) Op() Op { +func (m *UserIdentityMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *RefreshTokenMutation) SetOp(op Op) { +func (m *UserIdentityMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (RefreshToken). -func (m *RefreshTokenMutation) Type() string { +// Type returns the node type of this mutation (UserIdentity). +func (m *UserIdentityMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *RefreshTokenMutation) Fields() []string { - fields := make([]string, 0, 15) - if m.client_id != nil { - fields = append(fields, refreshtoken.FieldClientID) - } - if m.scopes != nil { - fields = append(fields, refreshtoken.FieldScopes) +func (m *UserIdentityMutation) Fields() []string { + fields := make([]string, 0, 12) + if m.user_id != nil { + fields = append(fields, useridentity.FieldUserID) } - if m.nonce != nil { - fields = append(fields, refreshtoken.FieldNonce) + if m.connector_id != nil { + fields = append(fields, useridentity.FieldConnectorID) } if m.claims_user_id != nil { - fields = append(fields, refreshtoken.FieldClaimsUserID) + fields = append(fields, useridentity.FieldClaimsUserID) } if m.claims_username != nil { - fields = append(fields, refreshtoken.FieldClaimsUsername) + fields = append(fields, useridentity.FieldClaimsUsername) + } + if m.claims_preferred_username != nil { + fields = append(fields, useridentity.FieldClaimsPreferredUsername) } if m.claims_email != nil { - fields = append(fields, refreshtoken.FieldClaimsEmail) + fields = append(fields, useridentity.FieldClaimsEmail) } if m.claims_email_verified != nil { - fields = append(fields, refreshtoken.FieldClaimsEmailVerified) + fields = append(fields, useridentity.FieldClaimsEmailVerified) } if m.claims_groups != nil { - fields = append(fields, refreshtoken.FieldClaimsGroups) - } - if m.claims_preferred_username != nil { - fields = append(fields, refreshtoken.FieldClaimsPreferredUsername) - } - if m.connector_id != nil { - fields = append(fields, refreshtoken.FieldConnectorID) + fields = append(fields, useridentity.FieldClaimsGroups) } - if m.connector_data != nil { - fields = append(fields, refreshtoken.FieldConnectorData) - } - if m.token != nil { - fields = append(fields, refreshtoken.FieldToken) - } - if m.obsolete_token != nil { - fields = append(fields, refreshtoken.FieldObsoleteToken) + if m.consents != nil { + fields = append(fields, useridentity.FieldConsents) } if m.created_at != nil { - fields = append(fields, refreshtoken.FieldCreatedAt) + fields = append(fields, useridentity.FieldCreatedAt) } - if m.last_used != nil { - fields = append(fields, refreshtoken.FieldLastUsed) + if m.last_login != nil { + fields = append(fields, useridentity.FieldLastLogin) + } + if m.blocked_until != nil { + fields = append(fields, useridentity.FieldBlockedUntil) } return fields } @@ -7903,38 +9657,32 @@ func (m *RefreshTokenMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *RefreshTokenMutation) Field(name string) (ent.Value, bool) { +func (m *UserIdentityMutation) Field(name string) (ent.Value, bool) { switch name { - case refreshtoken.FieldClientID: - return m.ClientID() - case refreshtoken.FieldScopes: - return m.Scopes() - case refreshtoken.FieldNonce: - return m.Nonce() - case refreshtoken.FieldClaimsUserID: + case useridentity.FieldUserID: + return m.UserID() + case useridentity.FieldConnectorID: + return m.ConnectorID() + case useridentity.FieldClaimsUserID: return m.ClaimsUserID() - case refreshtoken.FieldClaimsUsername: + case useridentity.FieldClaimsUsername: return m.ClaimsUsername() - case refreshtoken.FieldClaimsEmail: + case useridentity.FieldClaimsPreferredUsername: + return m.ClaimsPreferredUsername() + case useridentity.FieldClaimsEmail: return m.ClaimsEmail() - case refreshtoken.FieldClaimsEmailVerified: + case useridentity.FieldClaimsEmailVerified: return m.ClaimsEmailVerified() - case refreshtoken.FieldClaimsGroups: + case useridentity.FieldClaimsGroups: return m.ClaimsGroups() - case refreshtoken.FieldClaimsPreferredUsername: - return m.ClaimsPreferredUsername() - case refreshtoken.FieldConnectorID: - return m.ConnectorID() - case refreshtoken.FieldConnectorData: - return m.ConnectorData() - case refreshtoken.FieldToken: - return m.Token() - case refreshtoken.FieldObsoleteToken: - return m.ObsoleteToken() - case refreshtoken.FieldCreatedAt: + case useridentity.FieldConsents: + return m.Consents() + case useridentity.FieldCreatedAt: return m.CreatedAt() - case refreshtoken.FieldLastUsed: - return m.LastUsed() + case useridentity.FieldLastLogin: + return m.LastLogin() + case useridentity.FieldBlockedUntil: + return m.BlockedUntil() } return nil, false } @@ -7942,315 +9690,267 @@ func (m *RefreshTokenMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *RefreshTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *UserIdentityMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case refreshtoken.FieldClientID: - return m.OldClientID(ctx) - case refreshtoken.FieldScopes: - return m.OldScopes(ctx) - case refreshtoken.FieldNonce: - return m.OldNonce(ctx) - case refreshtoken.FieldClaimsUserID: + case useridentity.FieldUserID: + return m.OldUserID(ctx) + case useridentity.FieldConnectorID: + return m.OldConnectorID(ctx) + case useridentity.FieldClaimsUserID: return m.OldClaimsUserID(ctx) - case refreshtoken.FieldClaimsUsername: + case useridentity.FieldClaimsUsername: return m.OldClaimsUsername(ctx) - case refreshtoken.FieldClaimsEmail: + case useridentity.FieldClaimsPreferredUsername: + return m.OldClaimsPreferredUsername(ctx) + case useridentity.FieldClaimsEmail: return m.OldClaimsEmail(ctx) - case refreshtoken.FieldClaimsEmailVerified: + case useridentity.FieldClaimsEmailVerified: return m.OldClaimsEmailVerified(ctx) - case refreshtoken.FieldClaimsGroups: + case useridentity.FieldClaimsGroups: return m.OldClaimsGroups(ctx) - case refreshtoken.FieldClaimsPreferredUsername: - return m.OldClaimsPreferredUsername(ctx) - case refreshtoken.FieldConnectorID: - return m.OldConnectorID(ctx) - case refreshtoken.FieldConnectorData: - return m.OldConnectorData(ctx) - case refreshtoken.FieldToken: - return m.OldToken(ctx) - case refreshtoken.FieldObsoleteToken: - return m.OldObsoleteToken(ctx) - case refreshtoken.FieldCreatedAt: + case useridentity.FieldConsents: + return m.OldConsents(ctx) + case useridentity.FieldCreatedAt: return m.OldCreatedAt(ctx) - case refreshtoken.FieldLastUsed: - return m.OldLastUsed(ctx) + case useridentity.FieldLastLogin: + return m.OldLastLogin(ctx) + case useridentity.FieldBlockedUntil: + return m.OldBlockedUntil(ctx) } - return nil, fmt.Errorf("unknown RefreshToken field %s", name) + return nil, fmt.Errorf("unknown UserIdentity field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *RefreshTokenMutation) SetField(name string, value ent.Value) error { +func (m *UserIdentityMutation) SetField(name string, value ent.Value) error { switch name { - case refreshtoken.FieldClientID: + case useridentity.FieldUserID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetClientID(v) + m.SetUserID(v) return nil - case refreshtoken.FieldScopes: - v, ok := value.([]string) + case useridentity.FieldConnectorID: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetScopes(v) + m.SetConnectorID(v) return nil - case refreshtoken.FieldNonce: + case useridentity.FieldClaimsUserID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetNonce(v) + m.SetClaimsUserID(v) return nil - case refreshtoken.FieldClaimsUserID: + case useridentity.FieldClaimsUsername: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetClaimsUserID(v) + m.SetClaimsUsername(v) return nil - case refreshtoken.FieldClaimsUsername: + case useridentity.FieldClaimsPreferredUsername: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetClaimsUsername(v) + m.SetClaimsPreferredUsername(v) return nil - case refreshtoken.FieldClaimsEmail: + case useridentity.FieldClaimsEmail: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetClaimsEmail(v) return nil - case refreshtoken.FieldClaimsEmailVerified: + case useridentity.FieldClaimsEmailVerified: v, ok := value.(bool) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetClaimsEmailVerified(v) return nil - case refreshtoken.FieldClaimsGroups: + case useridentity.FieldClaimsGroups: v, ok := value.([]string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetClaimsGroups(v) return nil - case refreshtoken.FieldClaimsPreferredUsername: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetClaimsPreferredUsername(v) - return nil - case refreshtoken.FieldConnectorID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetConnectorID(v) - return nil - case refreshtoken.FieldConnectorData: + case useridentity.FieldConsents: v, ok := value.([]byte) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetConnectorData(v) - return nil - case refreshtoken.FieldToken: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetToken(v) + m.SetConsents(v) return nil - case refreshtoken.FieldObsoleteToken: - v, ok := value.(string) + case useridentity.FieldCreatedAt: + v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetObsoleteToken(v) + m.SetCreatedAt(v) return nil - case refreshtoken.FieldCreatedAt: + case useridentity.FieldLastLogin: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCreatedAt(v) + m.SetLastLogin(v) return nil - case refreshtoken.FieldLastUsed: + case useridentity.FieldBlockedUntil: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetLastUsed(v) + m.SetBlockedUntil(v) return nil } - return fmt.Errorf("unknown RefreshToken field %s", name) + return fmt.Errorf("unknown UserIdentity field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *RefreshTokenMutation) AddedFields() []string { +func (m *UserIdentityMutation) AddedFields() []string { return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *RefreshTokenMutation) AddedField(name string) (ent.Value, bool) { +func (m *UserIdentityMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *RefreshTokenMutation) AddField(name string, value ent.Value) error { +func (m *UserIdentityMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown RefreshToken numeric field %s", name) + return fmt.Errorf("unknown UserIdentity numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *RefreshTokenMutation) ClearedFields() []string { +func (m *UserIdentityMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(refreshtoken.FieldScopes) { - fields = append(fields, refreshtoken.FieldScopes) - } - if m.FieldCleared(refreshtoken.FieldClaimsGroups) { - fields = append(fields, refreshtoken.FieldClaimsGroups) - } - if m.FieldCleared(refreshtoken.FieldConnectorData) { - fields = append(fields, refreshtoken.FieldConnectorData) + if m.FieldCleared(useridentity.FieldClaimsGroups) { + fields = append(fields, useridentity.FieldClaimsGroups) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *RefreshTokenMutation) FieldCleared(name string) bool { +func (m *UserIdentityMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *RefreshTokenMutation) ClearField(name string) error { +func (m *UserIdentityMutation) ClearField(name string) error { switch name { - case refreshtoken.FieldScopes: - m.ClearScopes() - return nil - case refreshtoken.FieldClaimsGroups: + case useridentity.FieldClaimsGroups: m.ClearClaimsGroups() return nil - case refreshtoken.FieldConnectorData: - m.ClearConnectorData() - return nil } - return fmt.Errorf("unknown RefreshToken nullable field %s", name) + return fmt.Errorf("unknown UserIdentity nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *RefreshTokenMutation) ResetField(name string) error { +func (m *UserIdentityMutation) ResetField(name string) error { switch name { - case refreshtoken.FieldClientID: - m.ResetClientID() - return nil - case refreshtoken.FieldScopes: - m.ResetScopes() + case useridentity.FieldUserID: + m.ResetUserID() return nil - case refreshtoken.FieldNonce: - m.ResetNonce() + case useridentity.FieldConnectorID: + m.ResetConnectorID() return nil - case refreshtoken.FieldClaimsUserID: + case useridentity.FieldClaimsUserID: m.ResetClaimsUserID() return nil - case refreshtoken.FieldClaimsUsername: + case useridentity.FieldClaimsUsername: m.ResetClaimsUsername() return nil - case refreshtoken.FieldClaimsEmail: + case useridentity.FieldClaimsPreferredUsername: + m.ResetClaimsPreferredUsername() + return nil + case useridentity.FieldClaimsEmail: m.ResetClaimsEmail() return nil - case refreshtoken.FieldClaimsEmailVerified: + case useridentity.FieldClaimsEmailVerified: m.ResetClaimsEmailVerified() return nil - case refreshtoken.FieldClaimsGroups: + case useridentity.FieldClaimsGroups: m.ResetClaimsGroups() return nil - case refreshtoken.FieldClaimsPreferredUsername: - m.ResetClaimsPreferredUsername() - return nil - case refreshtoken.FieldConnectorID: - m.ResetConnectorID() - return nil - case refreshtoken.FieldConnectorData: - m.ResetConnectorData() - return nil - case refreshtoken.FieldToken: - m.ResetToken() - return nil - case refreshtoken.FieldObsoleteToken: - m.ResetObsoleteToken() + case useridentity.FieldConsents: + m.ResetConsents() return nil - case refreshtoken.FieldCreatedAt: + case useridentity.FieldCreatedAt: m.ResetCreatedAt() return nil - case refreshtoken.FieldLastUsed: - m.ResetLastUsed() + case useridentity.FieldLastLogin: + m.ResetLastLogin() + return nil + case useridentity.FieldBlockedUntil: + m.ResetBlockedUntil() return nil } - return fmt.Errorf("unknown RefreshToken field %s", name) + return fmt.Errorf("unknown UserIdentity field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *RefreshTokenMutation) AddedEdges() []string { +func (m *UserIdentityMutation) AddedEdges() []string { edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *RefreshTokenMutation) AddedIDs(name string) []ent.Value { +func (m *UserIdentityMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *RefreshTokenMutation) RemovedEdges() []string { +func (m *UserIdentityMutation) RemovedEdges() []string { edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *RefreshTokenMutation) RemovedIDs(name string) []ent.Value { +func (m *UserIdentityMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *RefreshTokenMutation) ClearedEdges() []string { +func (m *UserIdentityMutation) ClearedEdges() []string { edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *RefreshTokenMutation) EdgeCleared(name string) bool { +func (m *UserIdentityMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *RefreshTokenMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown RefreshToken unique edge %s", name) +func (m *UserIdentityMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown UserIdentity unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *RefreshTokenMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown RefreshToken edge %s", name) +func (m *UserIdentityMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown UserIdentity edge %s", name) } diff --git a/storage/ent/db/oauth2client.go b/storage/ent/db/oauth2client.go index f8491be2..c6671aba 100644 --- a/storage/ent/db/oauth2client.go +++ b/storage/ent/db/oauth2client.go @@ -28,8 +28,10 @@ type OAuth2Client struct { // Name holds the value of the "name" field. Name string `json:"name,omitempty"` // LogoURL holds the value of the "logo_url" field. - LogoURL string `json:"logo_url,omitempty"` - selectValues sql.SelectValues + LogoURL string `json:"logo_url,omitempty"` + // AllowedConnectors holds the value of the "allowed_connectors" field. + AllowedConnectors []string `json:"allowed_connectors,omitempty"` + selectValues sql.SelectValues } // scanValues returns the types for scanning values from sql.Rows. @@ -37,7 +39,7 @@ func (*OAuth2Client) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case oauth2client.FieldRedirectUris, oauth2client.FieldTrustedPeers: + case oauth2client.FieldRedirectUris, oauth2client.FieldTrustedPeers, oauth2client.FieldAllowedConnectors: values[i] = new([]byte) case oauth2client.FieldPublic: values[i] = new(sql.NullBool) @@ -104,6 +106,14 @@ func (_m *OAuth2Client) assignValues(columns []string, values []any) error { } else if value.Valid { _m.LogoURL = value.String } + case oauth2client.FieldAllowedConnectors: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field allowed_connectors", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.AllowedConnectors); err != nil { + return fmt.Errorf("unmarshal field allowed_connectors: %w", err) + } + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -157,6 +167,9 @@ func (_m *OAuth2Client) String() string { builder.WriteString(", ") builder.WriteString("logo_url=") builder.WriteString(_m.LogoURL) + builder.WriteString(", ") + builder.WriteString("allowed_connectors=") + builder.WriteString(fmt.Sprintf("%v", _m.AllowedConnectors)) builder.WriteByte(')') return builder.String() } diff --git a/storage/ent/db/oauth2client/oauth2client.go b/storage/ent/db/oauth2client/oauth2client.go index 08df76be..529f4c8d 100644 --- a/storage/ent/db/oauth2client/oauth2client.go +++ b/storage/ent/db/oauth2client/oauth2client.go @@ -23,6 +23,8 @@ const ( FieldName = "name" // FieldLogoURL holds the string denoting the logo_url field in the database. FieldLogoURL = "logo_url" + // FieldAllowedConnectors holds the string denoting the allowed_connectors field in the database. + FieldAllowedConnectors = "allowed_connectors" // Table holds the table name of the oauth2client in the database. Table = "oauth2clients" ) @@ -36,6 +38,7 @@ var Columns = []string{ FieldPublic, FieldName, FieldLogoURL, + FieldAllowedConnectors, } // ValidColumn reports if the column name is valid (part of the table columns). diff --git a/storage/ent/db/oauth2client/where.go b/storage/ent/db/oauth2client/where.go index 55aee79b..1425bf7e 100644 --- a/storage/ent/db/oauth2client/where.go +++ b/storage/ent/db/oauth2client/where.go @@ -307,6 +307,16 @@ func LogoURLContainsFold(v string) predicate.OAuth2Client { return predicate.OAuth2Client(sql.FieldContainsFold(FieldLogoURL, v)) } +// AllowedConnectorsIsNil applies the IsNil predicate on the "allowed_connectors" field. +func AllowedConnectorsIsNil() predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldIsNull(FieldAllowedConnectors)) +} + +// AllowedConnectorsNotNil applies the NotNil predicate on the "allowed_connectors" field. +func AllowedConnectorsNotNil() predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNotNull(FieldAllowedConnectors)) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.OAuth2Client) predicate.OAuth2Client { return predicate.OAuth2Client(sql.AndPredicates(predicates...)) diff --git a/storage/ent/db/oauth2client_create.go b/storage/ent/db/oauth2client_create.go index 2bbf1f50..fe29ab2a 100644 --- a/storage/ent/db/oauth2client_create.go +++ b/storage/ent/db/oauth2client_create.go @@ -55,6 +55,12 @@ func (_c *OAuth2ClientCreate) SetLogoURL(v string) *OAuth2ClientCreate { return _c } +// SetAllowedConnectors sets the "allowed_connectors" field. +func (_c *OAuth2ClientCreate) SetAllowedConnectors(v []string) *OAuth2ClientCreate { + _c.mutation.SetAllowedConnectors(v) + return _c +} + // SetID sets the "id" field. func (_c *OAuth2ClientCreate) SetID(v string) *OAuth2ClientCreate { _c.mutation.SetID(v) @@ -186,6 +192,10 @@ func (_c *OAuth2ClientCreate) createSpec() (*OAuth2Client, *sqlgraph.CreateSpec) _spec.SetField(oauth2client.FieldLogoURL, field.TypeString, value) _node.LogoURL = value } + if value, ok := _c.mutation.AllowedConnectors(); ok { + _spec.SetField(oauth2client.FieldAllowedConnectors, field.TypeJSON, value) + _node.AllowedConnectors = value + } return _node, _spec } diff --git a/storage/ent/db/oauth2client_update.go b/storage/ent/db/oauth2client_update.go index 69f2c044..3fdbdbdc 100644 --- a/storage/ent/db/oauth2client_update.go +++ b/storage/ent/db/oauth2client_update.go @@ -120,6 +120,24 @@ func (_u *OAuth2ClientUpdate) SetNillableLogoURL(v *string) *OAuth2ClientUpdate return _u } +// SetAllowedConnectors sets the "allowed_connectors" field. +func (_u *OAuth2ClientUpdate) SetAllowedConnectors(v []string) *OAuth2ClientUpdate { + _u.mutation.SetAllowedConnectors(v) + return _u +} + +// AppendAllowedConnectors appends value to the "allowed_connectors" field. +func (_u *OAuth2ClientUpdate) AppendAllowedConnectors(v []string) *OAuth2ClientUpdate { + _u.mutation.AppendAllowedConnectors(v) + return _u +} + +// ClearAllowedConnectors clears the value of the "allowed_connectors" field. +func (_u *OAuth2ClientUpdate) ClearAllowedConnectors() *OAuth2ClientUpdate { + _u.mutation.ClearAllowedConnectors() + return _u +} + // Mutation returns the OAuth2ClientMutation object of the builder. func (_u *OAuth2ClientUpdate) Mutation() *OAuth2ClientMutation { return _u.mutation @@ -218,6 +236,17 @@ func (_u *OAuth2ClientUpdate) sqlSave(ctx context.Context) (_node int, err error if value, ok := _u.mutation.LogoURL(); ok { _spec.SetField(oauth2client.FieldLogoURL, field.TypeString, value) } + if value, ok := _u.mutation.AllowedConnectors(); ok { + _spec.SetField(oauth2client.FieldAllowedConnectors, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedAllowedConnectors(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, oauth2client.FieldAllowedConnectors, value) + }) + } + if _u.mutation.AllowedConnectorsCleared() { + _spec.ClearField(oauth2client.FieldAllowedConnectors, field.TypeJSON) + } if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{oauth2client.Label} @@ -330,6 +359,24 @@ func (_u *OAuth2ClientUpdateOne) SetNillableLogoURL(v *string) *OAuth2ClientUpda return _u } +// SetAllowedConnectors sets the "allowed_connectors" field. +func (_u *OAuth2ClientUpdateOne) SetAllowedConnectors(v []string) *OAuth2ClientUpdateOne { + _u.mutation.SetAllowedConnectors(v) + return _u +} + +// AppendAllowedConnectors appends value to the "allowed_connectors" field. +func (_u *OAuth2ClientUpdateOne) AppendAllowedConnectors(v []string) *OAuth2ClientUpdateOne { + _u.mutation.AppendAllowedConnectors(v) + return _u +} + +// ClearAllowedConnectors clears the value of the "allowed_connectors" field. +func (_u *OAuth2ClientUpdateOne) ClearAllowedConnectors() *OAuth2ClientUpdateOne { + _u.mutation.ClearAllowedConnectors() + return _u +} + // Mutation returns the OAuth2ClientMutation object of the builder. func (_u *OAuth2ClientUpdateOne) Mutation() *OAuth2ClientMutation { return _u.mutation @@ -458,6 +505,17 @@ func (_u *OAuth2ClientUpdateOne) sqlSave(ctx context.Context) (_node *OAuth2Clie if value, ok := _u.mutation.LogoURL(); ok { _spec.SetField(oauth2client.FieldLogoURL, field.TypeString, value) } + if value, ok := _u.mutation.AllowedConnectors(); ok { + _spec.SetField(oauth2client.FieldAllowedConnectors, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedAllowedConnectors(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, oauth2client.FieldAllowedConnectors, value) + }) + } + if _u.mutation.AllowedConnectorsCleared() { + _spec.ClearField(oauth2client.FieldAllowedConnectors, field.TypeJSON) + } _node = &OAuth2Client{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/storage/ent/db/predicate/predicate.go b/storage/ent/db/predicate/predicate.go index ed07a071..9e977a2a 100644 --- a/storage/ent/db/predicate/predicate.go +++ b/storage/ent/db/predicate/predicate.go @@ -12,6 +12,9 @@ type AuthCode func(*sql.Selector) // AuthRequest is the predicate function for authrequest builders. type AuthRequest func(*sql.Selector) +// AuthSession is the predicate function for authsession builders. +type AuthSession func(*sql.Selector) + // Connector is the predicate function for connector builders. type Connector func(*sql.Selector) @@ -35,3 +38,6 @@ type Password func(*sql.Selector) // RefreshToken is the predicate function for refreshtoken builders. type RefreshToken func(*sql.Selector) + +// UserIdentity is the predicate function for useridentity builders. +type UserIdentity func(*sql.Selector) diff --git a/storage/ent/db/runtime.go b/storage/ent/db/runtime.go index fdb47bd7..98c12ecc 100644 --- a/storage/ent/db/runtime.go +++ b/storage/ent/db/runtime.go @@ -7,6 +7,7 @@ import ( "github.com/dexidp/dex/storage/ent/db/authcode" "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/authsession" "github.com/dexidp/dex/storage/ent/db/connector" "github.com/dexidp/dex/storage/ent/db/devicerequest" "github.com/dexidp/dex/storage/ent/db/devicetoken" @@ -15,6 +16,7 @@ import ( "github.com/dexidp/dex/storage/ent/db/offlinesession" "github.com/dexidp/dex/storage/ent/db/password" "github.com/dexidp/dex/storage/ent/db/refreshtoken" + "github.com/dexidp/dex/storage/ent/db/useridentity" "github.com/dexidp/dex/storage/ent/schema" ) @@ -86,6 +88,20 @@ func init() { authrequestDescID := authrequestFields[0].Descriptor() // authrequest.IDValidator is a validator for the "id" field. It is called by the builders before save. authrequest.IDValidator = authrequestDescID.Validators[0].(func(string) error) + authsessionFields := schema.AuthSession{}.Fields() + _ = authsessionFields + // authsessionDescIPAddress is the schema descriptor for ip_address field. + authsessionDescIPAddress := authsessionFields[4].Descriptor() + // authsession.DefaultIPAddress holds the default value on creation for the ip_address field. + authsession.DefaultIPAddress = authsessionDescIPAddress.Default.(string) + // authsessionDescUserAgent is the schema descriptor for user_agent field. + authsessionDescUserAgent := authsessionFields[5].Descriptor() + // authsession.DefaultUserAgent holds the default value on creation for the user_agent field. + authsession.DefaultUserAgent = authsessionDescUserAgent.Default.(string) + // authsessionDescID is the schema descriptor for id field. + authsessionDescID := authsessionFields[0].Descriptor() + // authsession.IDValidator is a validator for the "id" field. It is called by the builders before save. + authsession.IDValidator = authsessionDescID.Validators[0].(func(string) error) connectorFields := schema.Connector{}.Fields() _ = connectorFields // connectorDescType is the schema descriptor for type field. @@ -274,4 +290,38 @@ func init() { refreshtokenDescID := refreshtokenFields[0].Descriptor() // refreshtoken.IDValidator is a validator for the "id" field. It is called by the builders before save. refreshtoken.IDValidator = refreshtokenDescID.Validators[0].(func(string) error) + useridentityFields := schema.UserIdentity{}.Fields() + _ = useridentityFields + // useridentityDescUserID is the schema descriptor for user_id field. + useridentityDescUserID := useridentityFields[1].Descriptor() + // useridentity.UserIDValidator is a validator for the "user_id" field. It is called by the builders before save. + useridentity.UserIDValidator = useridentityDescUserID.Validators[0].(func(string) error) + // useridentityDescConnectorID is the schema descriptor for connector_id field. + useridentityDescConnectorID := useridentityFields[2].Descriptor() + // useridentity.ConnectorIDValidator is a validator for the "connector_id" field. It is called by the builders before save. + useridentity.ConnectorIDValidator = useridentityDescConnectorID.Validators[0].(func(string) error) + // useridentityDescClaimsUserID is the schema descriptor for claims_user_id field. + useridentityDescClaimsUserID := useridentityFields[3].Descriptor() + // useridentity.DefaultClaimsUserID holds the default value on creation for the claims_user_id field. + useridentity.DefaultClaimsUserID = useridentityDescClaimsUserID.Default.(string) + // useridentityDescClaimsUsername is the schema descriptor for claims_username field. + useridentityDescClaimsUsername := useridentityFields[4].Descriptor() + // useridentity.DefaultClaimsUsername holds the default value on creation for the claims_username field. + useridentity.DefaultClaimsUsername = useridentityDescClaimsUsername.Default.(string) + // useridentityDescClaimsPreferredUsername is the schema descriptor for claims_preferred_username field. + useridentityDescClaimsPreferredUsername := useridentityFields[5].Descriptor() + // useridentity.DefaultClaimsPreferredUsername holds the default value on creation for the claims_preferred_username field. + useridentity.DefaultClaimsPreferredUsername = useridentityDescClaimsPreferredUsername.Default.(string) + // useridentityDescClaimsEmail is the schema descriptor for claims_email field. + useridentityDescClaimsEmail := useridentityFields[6].Descriptor() + // useridentity.DefaultClaimsEmail holds the default value on creation for the claims_email field. + useridentity.DefaultClaimsEmail = useridentityDescClaimsEmail.Default.(string) + // useridentityDescClaimsEmailVerified is the schema descriptor for claims_email_verified field. + useridentityDescClaimsEmailVerified := useridentityFields[7].Descriptor() + // useridentity.DefaultClaimsEmailVerified holds the default value on creation for the claims_email_verified field. + useridentity.DefaultClaimsEmailVerified = useridentityDescClaimsEmailVerified.Default.(bool) + // useridentityDescID is the schema descriptor for id field. + useridentityDescID := useridentityFields[0].Descriptor() + // useridentity.IDValidator is a validator for the "id" field. It is called by the builders before save. + useridentity.IDValidator = useridentityDescID.Validators[0].(func(string) error) } diff --git a/storage/ent/db/tx.go b/storage/ent/db/tx.go index 42ba241a..94f27935 100644 --- a/storage/ent/db/tx.go +++ b/storage/ent/db/tx.go @@ -16,6 +16,8 @@ type Tx struct { AuthCode *AuthCodeClient // AuthRequest is the client for interacting with the AuthRequest builders. AuthRequest *AuthRequestClient + // AuthSession is the client for interacting with the AuthSession builders. + AuthSession *AuthSessionClient // Connector is the client for interacting with the Connector builders. Connector *ConnectorClient // DeviceRequest is the client for interacting with the DeviceRequest builders. @@ -32,6 +34,8 @@ type Tx struct { Password *PasswordClient // RefreshToken is the client for interacting with the RefreshToken builders. RefreshToken *RefreshTokenClient + // UserIdentity is the client for interacting with the UserIdentity builders. + UserIdentity *UserIdentityClient // lazily loaded. client *Client @@ -165,6 +169,7 @@ func (tx *Tx) Client() *Client { func (tx *Tx) init() { tx.AuthCode = NewAuthCodeClient(tx.config) tx.AuthRequest = NewAuthRequestClient(tx.config) + tx.AuthSession = NewAuthSessionClient(tx.config) tx.Connector = NewConnectorClient(tx.config) tx.DeviceRequest = NewDeviceRequestClient(tx.config) tx.DeviceToken = NewDeviceTokenClient(tx.config) @@ -173,6 +178,7 @@ func (tx *Tx) init() { tx.OfflineSession = NewOfflineSessionClient(tx.config) tx.Password = NewPasswordClient(tx.config) tx.RefreshToken = NewRefreshTokenClient(tx.config) + tx.UserIdentity = NewUserIdentityClient(tx.config) } // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. diff --git a/storage/ent/db/useridentity.go b/storage/ent/db/useridentity.go new file mode 100644 index 00000000..7127299b --- /dev/null +++ b/storage/ent/db/useridentity.go @@ -0,0 +1,232 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/useridentity" +) + +// UserIdentity is the model entity for the UserIdentity schema. +type UserIdentity struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // UserID holds the value of the "user_id" field. + UserID string `json:"user_id,omitempty"` + // ConnectorID holds the value of the "connector_id" field. + ConnectorID string `json:"connector_id,omitempty"` + // ClaimsUserID holds the value of the "claims_user_id" field. + ClaimsUserID string `json:"claims_user_id,omitempty"` + // ClaimsUsername holds the value of the "claims_username" field. + ClaimsUsername string `json:"claims_username,omitempty"` + // ClaimsPreferredUsername holds the value of the "claims_preferred_username" field. + ClaimsPreferredUsername string `json:"claims_preferred_username,omitempty"` + // ClaimsEmail holds the value of the "claims_email" field. + ClaimsEmail string `json:"claims_email,omitempty"` + // ClaimsEmailVerified holds the value of the "claims_email_verified" field. + ClaimsEmailVerified bool `json:"claims_email_verified,omitempty"` + // ClaimsGroups holds the value of the "claims_groups" field. + ClaimsGroups []string `json:"claims_groups,omitempty"` + // Consents holds the value of the "consents" field. + Consents []byte `json:"consents,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // LastLogin holds the value of the "last_login" field. + LastLogin time.Time `json:"last_login,omitempty"` + // BlockedUntil holds the value of the "blocked_until" field. + BlockedUntil time.Time `json:"blocked_until,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserIdentity) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case useridentity.FieldClaimsGroups, useridentity.FieldConsents: + values[i] = new([]byte) + case useridentity.FieldClaimsEmailVerified: + values[i] = new(sql.NullBool) + case useridentity.FieldID, useridentity.FieldUserID, useridentity.FieldConnectorID, useridentity.FieldClaimsUserID, useridentity.FieldClaimsUsername, useridentity.FieldClaimsPreferredUsername, useridentity.FieldClaimsEmail: + values[i] = new(sql.NullString) + case useridentity.FieldCreatedAt, useridentity.FieldLastLogin, useridentity.FieldBlockedUntil: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserIdentity fields. +func (_m *UserIdentity) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case useridentity.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + _m.ID = value.String + } + case useridentity.FieldUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.String + } + case useridentity.FieldConnectorID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field connector_id", values[i]) + } else if value.Valid { + _m.ConnectorID = value.String + } + case useridentity.FieldClaimsUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_user_id", values[i]) + } else if value.Valid { + _m.ClaimsUserID = value.String + } + case useridentity.FieldClaimsUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_username", values[i]) + } else if value.Valid { + _m.ClaimsUsername = value.String + } + case useridentity.FieldClaimsPreferredUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_preferred_username", values[i]) + } else if value.Valid { + _m.ClaimsPreferredUsername = value.String + } + case useridentity.FieldClaimsEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_email", values[i]) + } else if value.Valid { + _m.ClaimsEmail = value.String + } + case useridentity.FieldClaimsEmailVerified: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field claims_email_verified", values[i]) + } else if value.Valid { + _m.ClaimsEmailVerified = value.Bool + } + case useridentity.FieldClaimsGroups: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field claims_groups", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.ClaimsGroups); err != nil { + return fmt.Errorf("unmarshal field claims_groups: %w", err) + } + } + case useridentity.FieldConsents: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field consents", values[i]) + } else if value != nil { + _m.Consents = *value + } + case useridentity.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case useridentity.FieldLastLogin: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_login", values[i]) + } else if value.Valid { + _m.LastLogin = value.Time + } + case useridentity.FieldBlockedUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field blocked_until", values[i]) + } else if value.Valid { + _m.BlockedUntil = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UserIdentity. +// This includes values selected through modifiers, order, etc. +func (_m *UserIdentity) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this UserIdentity. +// Note that you need to call UserIdentity.Unwrap() before calling this method if this UserIdentity +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserIdentity) Update() *UserIdentityUpdateOne { + return NewUserIdentityClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserIdentity entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserIdentity) Unwrap() *UserIdentity { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("db: UserIdentity is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserIdentity) String() string { + var builder strings.Builder + builder.WriteString("UserIdentity(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("user_id=") + builder.WriteString(_m.UserID) + builder.WriteString(", ") + builder.WriteString("connector_id=") + builder.WriteString(_m.ConnectorID) + builder.WriteString(", ") + builder.WriteString("claims_user_id=") + builder.WriteString(_m.ClaimsUserID) + builder.WriteString(", ") + builder.WriteString("claims_username=") + builder.WriteString(_m.ClaimsUsername) + builder.WriteString(", ") + builder.WriteString("claims_preferred_username=") + builder.WriteString(_m.ClaimsPreferredUsername) + builder.WriteString(", ") + builder.WriteString("claims_email=") + builder.WriteString(_m.ClaimsEmail) + builder.WriteString(", ") + builder.WriteString("claims_email_verified=") + builder.WriteString(fmt.Sprintf("%v", _m.ClaimsEmailVerified)) + builder.WriteString(", ") + builder.WriteString("claims_groups=") + builder.WriteString(fmt.Sprintf("%v", _m.ClaimsGroups)) + builder.WriteString(", ") + builder.WriteString("consents=") + builder.WriteString(fmt.Sprintf("%v", _m.Consents)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("last_login=") + builder.WriteString(_m.LastLogin.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("blocked_until=") + builder.WriteString(_m.BlockedUntil.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// UserIdentities is a parsable slice of UserIdentity. +type UserIdentities []*UserIdentity diff --git a/storage/ent/db/useridentity/useridentity.go b/storage/ent/db/useridentity/useridentity.go new file mode 100644 index 00000000..f08d74ec --- /dev/null +++ b/storage/ent/db/useridentity/useridentity.go @@ -0,0 +1,144 @@ +// Code generated by ent, DO NOT EDIT. + +package useridentity + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the useridentity type in the database. + Label = "user_identity" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldConnectorID holds the string denoting the connector_id field in the database. + FieldConnectorID = "connector_id" + // FieldClaimsUserID holds the string denoting the claims_user_id field in the database. + FieldClaimsUserID = "claims_user_id" + // FieldClaimsUsername holds the string denoting the claims_username field in the database. + FieldClaimsUsername = "claims_username" + // FieldClaimsPreferredUsername holds the string denoting the claims_preferred_username field in the database. + FieldClaimsPreferredUsername = "claims_preferred_username" + // FieldClaimsEmail holds the string denoting the claims_email field in the database. + FieldClaimsEmail = "claims_email" + // FieldClaimsEmailVerified holds the string denoting the claims_email_verified field in the database. + FieldClaimsEmailVerified = "claims_email_verified" + // FieldClaimsGroups holds the string denoting the claims_groups field in the database. + FieldClaimsGroups = "claims_groups" + // FieldConsents holds the string denoting the consents field in the database. + FieldConsents = "consents" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldLastLogin holds the string denoting the last_login field in the database. + FieldLastLogin = "last_login" + // FieldBlockedUntil holds the string denoting the blocked_until field in the database. + FieldBlockedUntil = "blocked_until" + // Table holds the table name of the useridentity in the database. + Table = "user_identities" +) + +// Columns holds all SQL columns for useridentity fields. +var Columns = []string{ + FieldID, + FieldUserID, + FieldConnectorID, + FieldClaimsUserID, + FieldClaimsUsername, + FieldClaimsPreferredUsername, + FieldClaimsEmail, + FieldClaimsEmailVerified, + FieldClaimsGroups, + FieldConsents, + FieldCreatedAt, + FieldLastLogin, + FieldBlockedUntil, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // UserIDValidator is a validator for the "user_id" field. It is called by the builders before save. + UserIDValidator func(string) error + // ConnectorIDValidator is a validator for the "connector_id" field. It is called by the builders before save. + ConnectorIDValidator func(string) error + // DefaultClaimsUserID holds the default value on creation for the "claims_user_id" field. + DefaultClaimsUserID string + // DefaultClaimsUsername holds the default value on creation for the "claims_username" field. + DefaultClaimsUsername string + // DefaultClaimsPreferredUsername holds the default value on creation for the "claims_preferred_username" field. + DefaultClaimsPreferredUsername string + // DefaultClaimsEmail holds the default value on creation for the "claims_email" field. + DefaultClaimsEmail string + // DefaultClaimsEmailVerified holds the default value on creation for the "claims_email_verified" field. + DefaultClaimsEmailVerified bool + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the UserIdentity queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByConnectorID orders the results by the connector_id field. +func ByConnectorID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConnectorID, opts...).ToFunc() +} + +// ByClaimsUserID orders the results by the claims_user_id field. +func ByClaimsUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUserID, opts...).ToFunc() +} + +// ByClaimsUsername orders the results by the claims_username field. +func ByClaimsUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUsername, opts...).ToFunc() +} + +// ByClaimsPreferredUsername orders the results by the claims_preferred_username field. +func ByClaimsPreferredUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsPreferredUsername, opts...).ToFunc() +} + +// ByClaimsEmail orders the results by the claims_email field. +func ByClaimsEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmail, opts...).ToFunc() +} + +// ByClaimsEmailVerified orders the results by the claims_email_verified field. +func ByClaimsEmailVerified(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmailVerified, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByLastLogin orders the results by the last_login field. +func ByLastLogin(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastLogin, opts...).ToFunc() +} + +// ByBlockedUntil orders the results by the blocked_until field. +func ByBlockedUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBlockedUntil, opts...).ToFunc() +} diff --git a/storage/ent/db/useridentity/where.go b/storage/ent/db/useridentity/where.go new file mode 100644 index 00000000..201d340f --- /dev/null +++ b/storage/ent/db/useridentity/where.go @@ -0,0 +1,705 @@ +// Code generated by ent, DO NOT EDIT. + +package useridentity + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContainsFold(FieldID, id)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldUserID, v)) +} + +// ConnectorID applies equality check predicate on the "connector_id" field. It's identical to ConnectorIDEQ. +func ConnectorID(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldConnectorID, v)) +} + +// ClaimsUserID applies equality check predicate on the "claims_user_id" field. It's identical to ClaimsUserIDEQ. +func ClaimsUserID(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUsername applies equality check predicate on the "claims_username" field. It's identical to ClaimsUsernameEQ. +func ClaimsUsername(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsPreferredUsername applies equality check predicate on the "claims_preferred_username" field. It's identical to ClaimsPreferredUsernameEQ. +func ClaimsPreferredUsername(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsEmail applies equality check predicate on the "claims_email" field. It's identical to ClaimsEmailEQ. +func ClaimsEmail(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerified applies equality check predicate on the "claims_email_verified" field. It's identical to ClaimsEmailVerifiedEQ. +func ClaimsEmailVerified(v bool) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// Consents applies equality check predicate on the "consents" field. It's identical to ConsentsEQ. +func Consents(v []byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldConsents, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldCreatedAt, v)) +} + +// LastLogin applies equality check predicate on the "last_login" field. It's identical to LastLoginEQ. +func LastLogin(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldLastLogin, v)) +} + +// BlockedUntil applies equality check predicate on the "blocked_until" field. It's identical to BlockedUntilEQ. +func BlockedUntil(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldBlockedUntil, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldUserID, vs...)) +} + +// UserIDGT applies the GT predicate on the "user_id" field. +func UserIDGT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldUserID, v)) +} + +// UserIDGTE applies the GTE predicate on the "user_id" field. +func UserIDGTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldUserID, v)) +} + +// UserIDLT applies the LT predicate on the "user_id" field. +func UserIDLT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldUserID, v)) +} + +// UserIDLTE applies the LTE predicate on the "user_id" field. +func UserIDLTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldUserID, v)) +} + +// UserIDContains applies the Contains predicate on the "user_id" field. +func UserIDContains(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContains(FieldUserID, v)) +} + +// UserIDHasPrefix applies the HasPrefix predicate on the "user_id" field. +func UserIDHasPrefix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasPrefix(FieldUserID, v)) +} + +// UserIDHasSuffix applies the HasSuffix predicate on the "user_id" field. +func UserIDHasSuffix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasSuffix(FieldUserID, v)) +} + +// UserIDEqualFold applies the EqualFold predicate on the "user_id" field. +func UserIDEqualFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEqualFold(FieldUserID, v)) +} + +// UserIDContainsFold applies the ContainsFold predicate on the "user_id" field. +func UserIDContainsFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContainsFold(FieldUserID, v)) +} + +// ConnectorIDEQ applies the EQ predicate on the "connector_id" field. +func ConnectorIDEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldConnectorID, v)) +} + +// ConnectorIDNEQ applies the NEQ predicate on the "connector_id" field. +func ConnectorIDNEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldConnectorID, v)) +} + +// ConnectorIDIn applies the In predicate on the "connector_id" field. +func ConnectorIDIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldConnectorID, vs...)) +} + +// ConnectorIDNotIn applies the NotIn predicate on the "connector_id" field. +func ConnectorIDNotIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldConnectorID, vs...)) +} + +// ConnectorIDGT applies the GT predicate on the "connector_id" field. +func ConnectorIDGT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldConnectorID, v)) +} + +// ConnectorIDGTE applies the GTE predicate on the "connector_id" field. +func ConnectorIDGTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldConnectorID, v)) +} + +// ConnectorIDLT applies the LT predicate on the "connector_id" field. +func ConnectorIDLT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldConnectorID, v)) +} + +// ConnectorIDLTE applies the LTE predicate on the "connector_id" field. +func ConnectorIDLTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldConnectorID, v)) +} + +// ConnectorIDContains applies the Contains predicate on the "connector_id" field. +func ConnectorIDContains(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContains(FieldConnectorID, v)) +} + +// ConnectorIDHasPrefix applies the HasPrefix predicate on the "connector_id" field. +func ConnectorIDHasPrefix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasPrefix(FieldConnectorID, v)) +} + +// ConnectorIDHasSuffix applies the HasSuffix predicate on the "connector_id" field. +func ConnectorIDHasSuffix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasSuffix(FieldConnectorID, v)) +} + +// ConnectorIDEqualFold applies the EqualFold predicate on the "connector_id" field. +func ConnectorIDEqualFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEqualFold(FieldConnectorID, v)) +} + +// ConnectorIDContainsFold applies the ContainsFold predicate on the "connector_id" field. +func ConnectorIDContainsFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContainsFold(FieldConnectorID, v)) +} + +// ClaimsUserIDEQ applies the EQ predicate on the "claims_user_id" field. +func ClaimsUserIDEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDNEQ applies the NEQ predicate on the "claims_user_id" field. +func ClaimsUserIDNEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDIn applies the In predicate on the "claims_user_id" field. +func ClaimsUserIDIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDNotIn applies the NotIn predicate on the "claims_user_id" field. +func ClaimsUserIDNotIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDGT applies the GT predicate on the "claims_user_id" field. +func ClaimsUserIDGT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDGTE applies the GTE predicate on the "claims_user_id" field. +func ClaimsUserIDGTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLT applies the LT predicate on the "claims_user_id" field. +func ClaimsUserIDLT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLTE applies the LTE predicate on the "claims_user_id" field. +func ClaimsUserIDLTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContains applies the Contains predicate on the "claims_user_id" field. +func ClaimsUserIDContains(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContains(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasPrefix applies the HasPrefix predicate on the "claims_user_id" field. +func ClaimsUserIDHasPrefix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasPrefix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasSuffix applies the HasSuffix predicate on the "claims_user_id" field. +func ClaimsUserIDHasSuffix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasSuffix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDEqualFold applies the EqualFold predicate on the "claims_user_id" field. +func ClaimsUserIDEqualFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEqualFold(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContainsFold applies the ContainsFold predicate on the "claims_user_id" field. +func ClaimsUserIDContainsFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContainsFold(FieldClaimsUserID, v)) +} + +// ClaimsUsernameEQ applies the EQ predicate on the "claims_username" field. +func ClaimsUsernameEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameNEQ applies the NEQ predicate on the "claims_username" field. +func ClaimsUsernameNEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameIn applies the In predicate on the "claims_username" field. +func ClaimsUsernameIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameNotIn applies the NotIn predicate on the "claims_username" field. +func ClaimsUsernameNotIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameGT applies the GT predicate on the "claims_username" field. +func ClaimsUsernameGT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameGTE applies the GTE predicate on the "claims_username" field. +func ClaimsUsernameGTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLT applies the LT predicate on the "claims_username" field. +func ClaimsUsernameLT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLTE applies the LTE predicate on the "claims_username" field. +func ClaimsUsernameLTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContains applies the Contains predicate on the "claims_username" field. +func ClaimsUsernameContains(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContains(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasPrefix applies the HasPrefix predicate on the "claims_username" field. +func ClaimsUsernameHasPrefix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasPrefix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasSuffix applies the HasSuffix predicate on the "claims_username" field. +func ClaimsUsernameHasSuffix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasSuffix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameEqualFold applies the EqualFold predicate on the "claims_username" field. +func ClaimsUsernameEqualFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEqualFold(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContainsFold applies the ContainsFold predicate on the "claims_username" field. +func ClaimsUsernameContainsFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContainsFold(FieldClaimsUsername, v)) +} + +// ClaimsPreferredUsernameEQ applies the EQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameNEQ applies the NEQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameIn applies the In predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameNotIn applies the NotIn predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNotIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameGT applies the GT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameGTE applies the GTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLT applies the LT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLTE applies the LTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContains applies the Contains predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContains(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContains(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasPrefix applies the HasPrefix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasPrefix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasPrefix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasSuffix applies the HasSuffix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasSuffix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasSuffix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameEqualFold applies the EqualFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEqualFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEqualFold(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContainsFold applies the ContainsFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContainsFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContainsFold(FieldClaimsPreferredUsername, v)) +} + +// ClaimsEmailEQ applies the EQ predicate on the "claims_email" field. +func ClaimsEmailEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailNEQ applies the NEQ predicate on the "claims_email" field. +func ClaimsEmailNEQ(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailIn applies the In predicate on the "claims_email" field. +func ClaimsEmailIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailNotIn applies the NotIn predicate on the "claims_email" field. +func ClaimsEmailNotIn(vs ...string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailGT applies the GT predicate on the "claims_email" field. +func ClaimsEmailGT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldClaimsEmail, v)) +} + +// ClaimsEmailGTE applies the GTE predicate on the "claims_email" field. +func ClaimsEmailGTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailLT applies the LT predicate on the "claims_email" field. +func ClaimsEmailLT(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldClaimsEmail, v)) +} + +// ClaimsEmailLTE applies the LTE predicate on the "claims_email" field. +func ClaimsEmailLTE(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailContains applies the Contains predicate on the "claims_email" field. +func ClaimsEmailContains(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContains(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasPrefix applies the HasPrefix predicate on the "claims_email" field. +func ClaimsEmailHasPrefix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasPrefix(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasSuffix applies the HasSuffix predicate on the "claims_email" field. +func ClaimsEmailHasSuffix(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldHasSuffix(FieldClaimsEmail, v)) +} + +// ClaimsEmailEqualFold applies the EqualFold predicate on the "claims_email" field. +func ClaimsEmailEqualFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEqualFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailContainsFold applies the ContainsFold predicate on the "claims_email" field. +func ClaimsEmailContainsFold(v string) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldContainsFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerifiedEQ applies the EQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedEQ(v bool) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsEmailVerifiedNEQ applies the NEQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedNEQ(v bool) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsGroupsIsNil applies the IsNil predicate on the "claims_groups" field. +func ClaimsGroupsIsNil() predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIsNull(FieldClaimsGroups)) +} + +// ClaimsGroupsNotNil applies the NotNil predicate on the "claims_groups" field. +func ClaimsGroupsNotNil() predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotNull(FieldClaimsGroups)) +} + +// ConsentsEQ applies the EQ predicate on the "consents" field. +func ConsentsEQ(v []byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldConsents, v)) +} + +// ConsentsNEQ applies the NEQ predicate on the "consents" field. +func ConsentsNEQ(v []byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldConsents, v)) +} + +// ConsentsIn applies the In predicate on the "consents" field. +func ConsentsIn(vs ...[]byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldConsents, vs...)) +} + +// ConsentsNotIn applies the NotIn predicate on the "consents" field. +func ConsentsNotIn(vs ...[]byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldConsents, vs...)) +} + +// ConsentsGT applies the GT predicate on the "consents" field. +func ConsentsGT(v []byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldConsents, v)) +} + +// ConsentsGTE applies the GTE predicate on the "consents" field. +func ConsentsGTE(v []byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldConsents, v)) +} + +// ConsentsLT applies the LT predicate on the "consents" field. +func ConsentsLT(v []byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldConsents, v)) +} + +// ConsentsLTE applies the LTE predicate on the "consents" field. +func ConsentsLTE(v []byte) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldConsents, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldCreatedAt, v)) +} + +// LastLoginEQ applies the EQ predicate on the "last_login" field. +func LastLoginEQ(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldLastLogin, v)) +} + +// LastLoginNEQ applies the NEQ predicate on the "last_login" field. +func LastLoginNEQ(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldLastLogin, v)) +} + +// LastLoginIn applies the In predicate on the "last_login" field. +func LastLoginIn(vs ...time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldLastLogin, vs...)) +} + +// LastLoginNotIn applies the NotIn predicate on the "last_login" field. +func LastLoginNotIn(vs ...time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldLastLogin, vs...)) +} + +// LastLoginGT applies the GT predicate on the "last_login" field. +func LastLoginGT(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldLastLogin, v)) +} + +// LastLoginGTE applies the GTE predicate on the "last_login" field. +func LastLoginGTE(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldLastLogin, v)) +} + +// LastLoginLT applies the LT predicate on the "last_login" field. +func LastLoginLT(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldLastLogin, v)) +} + +// LastLoginLTE applies the LTE predicate on the "last_login" field. +func LastLoginLTE(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldLastLogin, v)) +} + +// BlockedUntilEQ applies the EQ predicate on the "blocked_until" field. +func BlockedUntilEQ(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldEQ(FieldBlockedUntil, v)) +} + +// BlockedUntilNEQ applies the NEQ predicate on the "blocked_until" field. +func BlockedUntilNEQ(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNEQ(FieldBlockedUntil, v)) +} + +// BlockedUntilIn applies the In predicate on the "blocked_until" field. +func BlockedUntilIn(vs ...time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldIn(FieldBlockedUntil, vs...)) +} + +// BlockedUntilNotIn applies the NotIn predicate on the "blocked_until" field. +func BlockedUntilNotIn(vs ...time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldNotIn(FieldBlockedUntil, vs...)) +} + +// BlockedUntilGT applies the GT predicate on the "blocked_until" field. +func BlockedUntilGT(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGT(FieldBlockedUntil, v)) +} + +// BlockedUntilGTE applies the GTE predicate on the "blocked_until" field. +func BlockedUntilGTE(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldGTE(FieldBlockedUntil, v)) +} + +// BlockedUntilLT applies the LT predicate on the "blocked_until" field. +func BlockedUntilLT(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLT(FieldBlockedUntil, v)) +} + +// BlockedUntilLTE applies the LTE predicate on the "blocked_until" field. +func BlockedUntilLTE(v time.Time) predicate.UserIdentity { + return predicate.UserIdentity(sql.FieldLTE(FieldBlockedUntil, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserIdentity) predicate.UserIdentity { + return predicate.UserIdentity(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserIdentity) predicate.UserIdentity { + return predicate.UserIdentity(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserIdentity) predicate.UserIdentity { + return predicate.UserIdentity(sql.NotPredicates(p)) +} diff --git a/storage/ent/db/useridentity_create.go b/storage/ent/db/useridentity_create.go new file mode 100644 index 00000000..336d5c30 --- /dev/null +++ b/storage/ent/db/useridentity_create.go @@ -0,0 +1,416 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/useridentity" +) + +// UserIdentityCreate is the builder for creating a UserIdentity entity. +type UserIdentityCreate struct { + config + mutation *UserIdentityMutation + hooks []Hook +} + +// SetUserID sets the "user_id" field. +func (_c *UserIdentityCreate) SetUserID(v string) *UserIdentityCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetConnectorID sets the "connector_id" field. +func (_c *UserIdentityCreate) SetConnectorID(v string) *UserIdentityCreate { + _c.mutation.SetConnectorID(v) + return _c +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (_c *UserIdentityCreate) SetClaimsUserID(v string) *UserIdentityCreate { + _c.mutation.SetClaimsUserID(v) + return _c +} + +// SetNillableClaimsUserID sets the "claims_user_id" field if the given value is not nil. +func (_c *UserIdentityCreate) SetNillableClaimsUserID(v *string) *UserIdentityCreate { + if v != nil { + _c.SetClaimsUserID(*v) + } + return _c +} + +// SetClaimsUsername sets the "claims_username" field. +func (_c *UserIdentityCreate) SetClaimsUsername(v string) *UserIdentityCreate { + _c.mutation.SetClaimsUsername(v) + return _c +} + +// SetNillableClaimsUsername sets the "claims_username" field if the given value is not nil. +func (_c *UserIdentityCreate) SetNillableClaimsUsername(v *string) *UserIdentityCreate { + if v != nil { + _c.SetClaimsUsername(*v) + } + return _c +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (_c *UserIdentityCreate) SetClaimsPreferredUsername(v string) *UserIdentityCreate { + _c.mutation.SetClaimsPreferredUsername(v) + return _c +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (_c *UserIdentityCreate) SetNillableClaimsPreferredUsername(v *string) *UserIdentityCreate { + if v != nil { + _c.SetClaimsPreferredUsername(*v) + } + return _c +} + +// SetClaimsEmail sets the "claims_email" field. +func (_c *UserIdentityCreate) SetClaimsEmail(v string) *UserIdentityCreate { + _c.mutation.SetClaimsEmail(v) + return _c +} + +// SetNillableClaimsEmail sets the "claims_email" field if the given value is not nil. +func (_c *UserIdentityCreate) SetNillableClaimsEmail(v *string) *UserIdentityCreate { + if v != nil { + _c.SetClaimsEmail(*v) + } + return _c +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (_c *UserIdentityCreate) SetClaimsEmailVerified(v bool) *UserIdentityCreate { + _c.mutation.SetClaimsEmailVerified(v) + return _c +} + +// SetNillableClaimsEmailVerified sets the "claims_email_verified" field if the given value is not nil. +func (_c *UserIdentityCreate) SetNillableClaimsEmailVerified(v *bool) *UserIdentityCreate { + if v != nil { + _c.SetClaimsEmailVerified(*v) + } + return _c +} + +// SetClaimsGroups sets the "claims_groups" field. +func (_c *UserIdentityCreate) SetClaimsGroups(v []string) *UserIdentityCreate { + _c.mutation.SetClaimsGroups(v) + return _c +} + +// SetConsents sets the "consents" field. +func (_c *UserIdentityCreate) SetConsents(v []byte) *UserIdentityCreate { + _c.mutation.SetConsents(v) + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserIdentityCreate) SetCreatedAt(v time.Time) *UserIdentityCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetLastLogin sets the "last_login" field. +func (_c *UserIdentityCreate) SetLastLogin(v time.Time) *UserIdentityCreate { + _c.mutation.SetLastLogin(v) + return _c +} + +// SetBlockedUntil sets the "blocked_until" field. +func (_c *UserIdentityCreate) SetBlockedUntil(v time.Time) *UserIdentityCreate { + _c.mutation.SetBlockedUntil(v) + return _c +} + +// SetID sets the "id" field. +func (_c *UserIdentityCreate) SetID(v string) *UserIdentityCreate { + _c.mutation.SetID(v) + return _c +} + +// Mutation returns the UserIdentityMutation object of the builder. +func (_c *UserIdentityCreate) Mutation() *UserIdentityMutation { + return _c.mutation +} + +// Save creates the UserIdentity in the database. +func (_c *UserIdentityCreate) Save(ctx context.Context) (*UserIdentity, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserIdentityCreate) SaveX(ctx context.Context) *UserIdentity { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserIdentityCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserIdentityCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserIdentityCreate) defaults() { + if _, ok := _c.mutation.ClaimsUserID(); !ok { + v := useridentity.DefaultClaimsUserID + _c.mutation.SetClaimsUserID(v) + } + if _, ok := _c.mutation.ClaimsUsername(); !ok { + v := useridentity.DefaultClaimsUsername + _c.mutation.SetClaimsUsername(v) + } + if _, ok := _c.mutation.ClaimsPreferredUsername(); !ok { + v := useridentity.DefaultClaimsPreferredUsername + _c.mutation.SetClaimsPreferredUsername(v) + } + if _, ok := _c.mutation.ClaimsEmail(); !ok { + v := useridentity.DefaultClaimsEmail + _c.mutation.SetClaimsEmail(v) + } + if _, ok := _c.mutation.ClaimsEmailVerified(); !ok { + v := useridentity.DefaultClaimsEmailVerified + _c.mutation.SetClaimsEmailVerified(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserIdentityCreate) check() error { + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`db: missing required field "UserIdentity.user_id"`)} + } + if v, ok := _c.mutation.UserID(); ok { + if err := useridentity.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "UserIdentity.user_id": %w`, err)} + } + } + if _, ok := _c.mutation.ConnectorID(); !ok { + return &ValidationError{Name: "connector_id", err: errors.New(`db: missing required field "UserIdentity.connector_id"`)} + } + if v, ok := _c.mutation.ConnectorID(); ok { + if err := useridentity.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "UserIdentity.connector_id": %w`, err)} + } + } + if _, ok := _c.mutation.ClaimsUserID(); !ok { + return &ValidationError{Name: "claims_user_id", err: errors.New(`db: missing required field "UserIdentity.claims_user_id"`)} + } + if _, ok := _c.mutation.ClaimsUsername(); !ok { + return &ValidationError{Name: "claims_username", err: errors.New(`db: missing required field "UserIdentity.claims_username"`)} + } + if _, ok := _c.mutation.ClaimsPreferredUsername(); !ok { + return &ValidationError{Name: "claims_preferred_username", err: errors.New(`db: missing required field "UserIdentity.claims_preferred_username"`)} + } + if _, ok := _c.mutation.ClaimsEmail(); !ok { + return &ValidationError{Name: "claims_email", err: errors.New(`db: missing required field "UserIdentity.claims_email"`)} + } + if _, ok := _c.mutation.ClaimsEmailVerified(); !ok { + return &ValidationError{Name: "claims_email_verified", err: errors.New(`db: missing required field "UserIdentity.claims_email_verified"`)} + } + if _, ok := _c.mutation.Consents(); !ok { + return &ValidationError{Name: "consents", err: errors.New(`db: missing required field "UserIdentity.consents"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "UserIdentity.created_at"`)} + } + if _, ok := _c.mutation.LastLogin(); !ok { + return &ValidationError{Name: "last_login", err: errors.New(`db: missing required field "UserIdentity.last_login"`)} + } + if _, ok := _c.mutation.BlockedUntil(); !ok { + return &ValidationError{Name: "blocked_until", err: errors.New(`db: missing required field "UserIdentity.blocked_until"`)} + } + if v, ok := _c.mutation.ID(); ok { + if err := useridentity.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "UserIdentity.id": %w`, err)} + } + } + return nil +} + +func (_c *UserIdentityCreate) sqlSave(ctx context.Context) (*UserIdentity, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected UserIdentity.ID type: %T", _spec.ID.Value) + } + } + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserIdentityCreate) createSpec() (*UserIdentity, *sqlgraph.CreateSpec) { + var ( + _node = &UserIdentity{config: _c.config} + _spec = sqlgraph.NewCreateSpec(useridentity.Table, sqlgraph.NewFieldSpec(useridentity.FieldID, field.TypeString)) + ) + if id, ok := _c.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := _c.mutation.UserID(); ok { + _spec.SetField(useridentity.FieldUserID, field.TypeString, value) + _node.UserID = value + } + if value, ok := _c.mutation.ConnectorID(); ok { + _spec.SetField(useridentity.FieldConnectorID, field.TypeString, value) + _node.ConnectorID = value + } + if value, ok := _c.mutation.ClaimsUserID(); ok { + _spec.SetField(useridentity.FieldClaimsUserID, field.TypeString, value) + _node.ClaimsUserID = value + } + if value, ok := _c.mutation.ClaimsUsername(); ok { + _spec.SetField(useridentity.FieldClaimsUsername, field.TypeString, value) + _node.ClaimsUsername = value + } + if value, ok := _c.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(useridentity.FieldClaimsPreferredUsername, field.TypeString, value) + _node.ClaimsPreferredUsername = value + } + if value, ok := _c.mutation.ClaimsEmail(); ok { + _spec.SetField(useridentity.FieldClaimsEmail, field.TypeString, value) + _node.ClaimsEmail = value + } + if value, ok := _c.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(useridentity.FieldClaimsEmailVerified, field.TypeBool, value) + _node.ClaimsEmailVerified = value + } + if value, ok := _c.mutation.ClaimsGroups(); ok { + _spec.SetField(useridentity.FieldClaimsGroups, field.TypeJSON, value) + _node.ClaimsGroups = value + } + if value, ok := _c.mutation.Consents(); ok { + _spec.SetField(useridentity.FieldConsents, field.TypeBytes, value) + _node.Consents = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(useridentity.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.LastLogin(); ok { + _spec.SetField(useridentity.FieldLastLogin, field.TypeTime, value) + _node.LastLogin = value + } + if value, ok := _c.mutation.BlockedUntil(); ok { + _spec.SetField(useridentity.FieldBlockedUntil, field.TypeTime, value) + _node.BlockedUntil = value + } + return _node, _spec +} + +// UserIdentityCreateBulk is the builder for creating many UserIdentity entities in bulk. +type UserIdentityCreateBulk struct { + config + err error + builders []*UserIdentityCreate +} + +// Save creates the UserIdentity entities in the database. +func (_c *UserIdentityCreateBulk) Save(ctx context.Context) ([]*UserIdentity, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserIdentity, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserIdentityMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserIdentityCreateBulk) SaveX(ctx context.Context) []*UserIdentity { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserIdentityCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserIdentityCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/storage/ent/db/useridentity_delete.go b/storage/ent/db/useridentity_delete.go new file mode 100644 index 00000000..0bc51e24 --- /dev/null +++ b/storage/ent/db/useridentity_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/predicate" + "github.com/dexidp/dex/storage/ent/db/useridentity" +) + +// UserIdentityDelete is the builder for deleting a UserIdentity entity. +type UserIdentityDelete struct { + config + hooks []Hook + mutation *UserIdentityMutation +} + +// Where appends a list predicates to the UserIdentityDelete builder. +func (_d *UserIdentityDelete) Where(ps ...predicate.UserIdentity) *UserIdentityDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserIdentityDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserIdentityDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserIdentityDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(useridentity.Table, sqlgraph.NewFieldSpec(useridentity.FieldID, field.TypeString)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserIdentityDeleteOne is the builder for deleting a single UserIdentity entity. +type UserIdentityDeleteOne struct { + _d *UserIdentityDelete +} + +// Where appends a list predicates to the UserIdentityDelete builder. +func (_d *UserIdentityDeleteOne) Where(ps ...predicate.UserIdentity) *UserIdentityDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserIdentityDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{useridentity.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserIdentityDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/storage/ent/db/useridentity_query.go b/storage/ent/db/useridentity_query.go new file mode 100644 index 00000000..3e509038 --- /dev/null +++ b/storage/ent/db/useridentity_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/predicate" + "github.com/dexidp/dex/storage/ent/db/useridentity" +) + +// UserIdentityQuery is the builder for querying UserIdentity entities. +type UserIdentityQuery struct { + config + ctx *QueryContext + order []useridentity.OrderOption + inters []Interceptor + predicates []predicate.UserIdentity + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserIdentityQuery builder. +func (_q *UserIdentityQuery) Where(ps ...predicate.UserIdentity) *UserIdentityQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserIdentityQuery) Limit(limit int) *UserIdentityQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserIdentityQuery) Offset(offset int) *UserIdentityQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserIdentityQuery) Unique(unique bool) *UserIdentityQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserIdentityQuery) Order(o ...useridentity.OrderOption) *UserIdentityQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first UserIdentity entity from the query. +// Returns a *NotFoundError when no UserIdentity was found. +func (_q *UserIdentityQuery) First(ctx context.Context) (*UserIdentity, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{useridentity.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserIdentityQuery) FirstX(ctx context.Context) *UserIdentity { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UserIdentity ID from the query. +// Returns a *NotFoundError when no UserIdentity ID was found. +func (_q *UserIdentityQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{useridentity.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserIdentityQuery) FirstIDX(ctx context.Context) string { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UserIdentity entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserIdentity entity is found. +// Returns a *NotFoundError when no UserIdentity entities are found. +func (_q *UserIdentityQuery) Only(ctx context.Context) (*UserIdentity, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{useridentity.Label} + default: + return nil, &NotSingularError{useridentity.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserIdentityQuery) OnlyX(ctx context.Context) *UserIdentity { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UserIdentity ID in the query. +// Returns a *NotSingularError when more than one UserIdentity ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserIdentityQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{useridentity.Label} + default: + err = &NotSingularError{useridentity.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserIdentityQuery) OnlyIDX(ctx context.Context) string { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UserIdentities. +func (_q *UserIdentityQuery) All(ctx context.Context) ([]*UserIdentity, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserIdentity, *UserIdentityQuery]() + return withInterceptors[[]*UserIdentity](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserIdentityQuery) AllX(ctx context.Context) []*UserIdentity { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UserIdentity IDs. +func (_q *UserIdentityQuery) IDs(ctx context.Context) (ids []string, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(useridentity.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserIdentityQuery) IDsX(ctx context.Context) []string { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserIdentityQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserIdentityQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserIdentityQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserIdentityQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserIdentityQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserIdentityQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserIdentityQuery) Clone() *UserIdentityQuery { + if _q == nil { + return nil + } + return &UserIdentityQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]useridentity.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserIdentity{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// UserID string `json:"user_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserIdentity.Query(). +// GroupBy(useridentity.FieldUserID). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (_q *UserIdentityQuery) GroupBy(field string, fields ...string) *UserIdentityGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserIdentityGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = useridentity.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// UserID string `json:"user_id,omitempty"` +// } +// +// client.UserIdentity.Query(). +// Select(useridentity.FieldUserID). +// Scan(ctx, &v) +func (_q *UserIdentityQuery) Select(fields ...string) *UserIdentitySelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserIdentitySelect{UserIdentityQuery: _q} + sbuild.label = useridentity.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserIdentitySelect configured with the given aggregations. +func (_q *UserIdentityQuery) Aggregate(fns ...AggregateFunc) *UserIdentitySelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserIdentityQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !useridentity.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserIdentityQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserIdentity, error) { + var ( + nodes = []*UserIdentity{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserIdentity).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserIdentity{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *UserIdentityQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserIdentityQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(useridentity.Table, useridentity.Columns, sqlgraph.NewFieldSpec(useridentity.FieldID, field.TypeString)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, useridentity.FieldID) + for i := range fields { + if fields[i] != useridentity.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserIdentityQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(useridentity.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = useridentity.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserIdentityGroupBy is the group-by builder for UserIdentity entities. +type UserIdentityGroupBy struct { + selector + build *UserIdentityQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserIdentityGroupBy) Aggregate(fns ...AggregateFunc) *UserIdentityGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserIdentityGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserIdentityQuery, *UserIdentityGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserIdentityGroupBy) sqlScan(ctx context.Context, root *UserIdentityQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserIdentitySelect is the builder for selecting fields of UserIdentity entities. +type UserIdentitySelect struct { + *UserIdentityQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserIdentitySelect) Aggregate(fns ...AggregateFunc) *UserIdentitySelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserIdentitySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserIdentityQuery, *UserIdentitySelect](ctx, _s.UserIdentityQuery, _s, _s.inters, v) +} + +func (_s *UserIdentitySelect) sqlScan(ctx context.Context, root *UserIdentityQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/storage/ent/db/useridentity_update.go b/storage/ent/db/useridentity_update.go new file mode 100644 index 00000000..27ee0d3a --- /dev/null +++ b/storage/ent/db/useridentity_update.go @@ -0,0 +1,629 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/predicate" + "github.com/dexidp/dex/storage/ent/db/useridentity" +) + +// UserIdentityUpdate is the builder for updating UserIdentity entities. +type UserIdentityUpdate struct { + config + hooks []Hook + mutation *UserIdentityMutation +} + +// Where appends a list predicates to the UserIdentityUpdate builder. +func (_u *UserIdentityUpdate) Where(ps ...predicate.UserIdentity) *UserIdentityUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserIdentityUpdate) SetUserID(v string) *UserIdentityUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableUserID(v *string) *UserIdentityUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetConnectorID sets the "connector_id" field. +func (_u *UserIdentityUpdate) SetConnectorID(v string) *UserIdentityUpdate { + _u.mutation.SetConnectorID(v) + return _u +} + +// SetNillableConnectorID sets the "connector_id" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableConnectorID(v *string) *UserIdentityUpdate { + if v != nil { + _u.SetConnectorID(*v) + } + return _u +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (_u *UserIdentityUpdate) SetClaimsUserID(v string) *UserIdentityUpdate { + _u.mutation.SetClaimsUserID(v) + return _u +} + +// SetNillableClaimsUserID sets the "claims_user_id" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableClaimsUserID(v *string) *UserIdentityUpdate { + if v != nil { + _u.SetClaimsUserID(*v) + } + return _u +} + +// SetClaimsUsername sets the "claims_username" field. +func (_u *UserIdentityUpdate) SetClaimsUsername(v string) *UserIdentityUpdate { + _u.mutation.SetClaimsUsername(v) + return _u +} + +// SetNillableClaimsUsername sets the "claims_username" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableClaimsUsername(v *string) *UserIdentityUpdate { + if v != nil { + _u.SetClaimsUsername(*v) + } + return _u +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (_u *UserIdentityUpdate) SetClaimsPreferredUsername(v string) *UserIdentityUpdate { + _u.mutation.SetClaimsPreferredUsername(v) + return _u +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableClaimsPreferredUsername(v *string) *UserIdentityUpdate { + if v != nil { + _u.SetClaimsPreferredUsername(*v) + } + return _u +} + +// SetClaimsEmail sets the "claims_email" field. +func (_u *UserIdentityUpdate) SetClaimsEmail(v string) *UserIdentityUpdate { + _u.mutation.SetClaimsEmail(v) + return _u +} + +// SetNillableClaimsEmail sets the "claims_email" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableClaimsEmail(v *string) *UserIdentityUpdate { + if v != nil { + _u.SetClaimsEmail(*v) + } + return _u +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (_u *UserIdentityUpdate) SetClaimsEmailVerified(v bool) *UserIdentityUpdate { + _u.mutation.SetClaimsEmailVerified(v) + return _u +} + +// SetNillableClaimsEmailVerified sets the "claims_email_verified" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableClaimsEmailVerified(v *bool) *UserIdentityUpdate { + if v != nil { + _u.SetClaimsEmailVerified(*v) + } + return _u +} + +// SetClaimsGroups sets the "claims_groups" field. +func (_u *UserIdentityUpdate) SetClaimsGroups(v []string) *UserIdentityUpdate { + _u.mutation.SetClaimsGroups(v) + return _u +} + +// AppendClaimsGroups appends value to the "claims_groups" field. +func (_u *UserIdentityUpdate) AppendClaimsGroups(v []string) *UserIdentityUpdate { + _u.mutation.AppendClaimsGroups(v) + return _u +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (_u *UserIdentityUpdate) ClearClaimsGroups() *UserIdentityUpdate { + _u.mutation.ClearClaimsGroups() + return _u +} + +// SetConsents sets the "consents" field. +func (_u *UserIdentityUpdate) SetConsents(v []byte) *UserIdentityUpdate { + _u.mutation.SetConsents(v) + return _u +} + +// SetCreatedAt sets the "created_at" field. +func (_u *UserIdentityUpdate) SetCreatedAt(v time.Time) *UserIdentityUpdate { + _u.mutation.SetCreatedAt(v) + return _u +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableCreatedAt(v *time.Time) *UserIdentityUpdate { + if v != nil { + _u.SetCreatedAt(*v) + } + return _u +} + +// SetLastLogin sets the "last_login" field. +func (_u *UserIdentityUpdate) SetLastLogin(v time.Time) *UserIdentityUpdate { + _u.mutation.SetLastLogin(v) + return _u +} + +// SetNillableLastLogin sets the "last_login" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableLastLogin(v *time.Time) *UserIdentityUpdate { + if v != nil { + _u.SetLastLogin(*v) + } + return _u +} + +// SetBlockedUntil sets the "blocked_until" field. +func (_u *UserIdentityUpdate) SetBlockedUntil(v time.Time) *UserIdentityUpdate { + _u.mutation.SetBlockedUntil(v) + return _u +} + +// SetNillableBlockedUntil sets the "blocked_until" field if the given value is not nil. +func (_u *UserIdentityUpdate) SetNillableBlockedUntil(v *time.Time) *UserIdentityUpdate { + if v != nil { + _u.SetBlockedUntil(*v) + } + return _u +} + +// Mutation returns the UserIdentityMutation object of the builder. +func (_u *UserIdentityUpdate) Mutation() *UserIdentityMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserIdentityUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserIdentityUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserIdentityUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserIdentityUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserIdentityUpdate) check() error { + if v, ok := _u.mutation.UserID(); ok { + if err := useridentity.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "UserIdentity.user_id": %w`, err)} + } + } + if v, ok := _u.mutation.ConnectorID(); ok { + if err := useridentity.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "UserIdentity.connector_id": %w`, err)} + } + } + return nil +} + +func (_u *UserIdentityUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(useridentity.Table, useridentity.Columns, sqlgraph.NewFieldSpec(useridentity.FieldID, field.TypeString)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UserID(); ok { + _spec.SetField(useridentity.FieldUserID, field.TypeString, value) + } + if value, ok := _u.mutation.ConnectorID(); ok { + _spec.SetField(useridentity.FieldConnectorID, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsUserID(); ok { + _spec.SetField(useridentity.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsUsername(); ok { + _spec.SetField(useridentity.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(useridentity.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsEmail(); ok { + _spec.SetField(useridentity.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(useridentity.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := _u.mutation.ClaimsGroups(); ok { + _spec.SetField(useridentity.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, useridentity.FieldClaimsGroups, value) + }) + } + if _u.mutation.ClaimsGroupsCleared() { + _spec.ClearField(useridentity.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := _u.mutation.Consents(); ok { + _spec.SetField(useridentity.FieldConsents, field.TypeBytes, value) + } + if value, ok := _u.mutation.CreatedAt(); ok { + _spec.SetField(useridentity.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.LastLogin(); ok { + _spec.SetField(useridentity.FieldLastLogin, field.TypeTime, value) + } + if value, ok := _u.mutation.BlockedUntil(); ok { + _spec.SetField(useridentity.FieldBlockedUntil, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{useridentity.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserIdentityUpdateOne is the builder for updating a single UserIdentity entity. +type UserIdentityUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserIdentityMutation +} + +// SetUserID sets the "user_id" field. +func (_u *UserIdentityUpdateOne) SetUserID(v string) *UserIdentityUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableUserID(v *string) *UserIdentityUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetConnectorID sets the "connector_id" field. +func (_u *UserIdentityUpdateOne) SetConnectorID(v string) *UserIdentityUpdateOne { + _u.mutation.SetConnectorID(v) + return _u +} + +// SetNillableConnectorID sets the "connector_id" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableConnectorID(v *string) *UserIdentityUpdateOne { + if v != nil { + _u.SetConnectorID(*v) + } + return _u +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (_u *UserIdentityUpdateOne) SetClaimsUserID(v string) *UserIdentityUpdateOne { + _u.mutation.SetClaimsUserID(v) + return _u +} + +// SetNillableClaimsUserID sets the "claims_user_id" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableClaimsUserID(v *string) *UserIdentityUpdateOne { + if v != nil { + _u.SetClaimsUserID(*v) + } + return _u +} + +// SetClaimsUsername sets the "claims_username" field. +func (_u *UserIdentityUpdateOne) SetClaimsUsername(v string) *UserIdentityUpdateOne { + _u.mutation.SetClaimsUsername(v) + return _u +} + +// SetNillableClaimsUsername sets the "claims_username" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableClaimsUsername(v *string) *UserIdentityUpdateOne { + if v != nil { + _u.SetClaimsUsername(*v) + } + return _u +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (_u *UserIdentityUpdateOne) SetClaimsPreferredUsername(v string) *UserIdentityUpdateOne { + _u.mutation.SetClaimsPreferredUsername(v) + return _u +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableClaimsPreferredUsername(v *string) *UserIdentityUpdateOne { + if v != nil { + _u.SetClaimsPreferredUsername(*v) + } + return _u +} + +// SetClaimsEmail sets the "claims_email" field. +func (_u *UserIdentityUpdateOne) SetClaimsEmail(v string) *UserIdentityUpdateOne { + _u.mutation.SetClaimsEmail(v) + return _u +} + +// SetNillableClaimsEmail sets the "claims_email" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableClaimsEmail(v *string) *UserIdentityUpdateOne { + if v != nil { + _u.SetClaimsEmail(*v) + } + return _u +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (_u *UserIdentityUpdateOne) SetClaimsEmailVerified(v bool) *UserIdentityUpdateOne { + _u.mutation.SetClaimsEmailVerified(v) + return _u +} + +// SetNillableClaimsEmailVerified sets the "claims_email_verified" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableClaimsEmailVerified(v *bool) *UserIdentityUpdateOne { + if v != nil { + _u.SetClaimsEmailVerified(*v) + } + return _u +} + +// SetClaimsGroups sets the "claims_groups" field. +func (_u *UserIdentityUpdateOne) SetClaimsGroups(v []string) *UserIdentityUpdateOne { + _u.mutation.SetClaimsGroups(v) + return _u +} + +// AppendClaimsGroups appends value to the "claims_groups" field. +func (_u *UserIdentityUpdateOne) AppendClaimsGroups(v []string) *UserIdentityUpdateOne { + _u.mutation.AppendClaimsGroups(v) + return _u +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (_u *UserIdentityUpdateOne) ClearClaimsGroups() *UserIdentityUpdateOne { + _u.mutation.ClearClaimsGroups() + return _u +} + +// SetConsents sets the "consents" field. +func (_u *UserIdentityUpdateOne) SetConsents(v []byte) *UserIdentityUpdateOne { + _u.mutation.SetConsents(v) + return _u +} + +// SetCreatedAt sets the "created_at" field. +func (_u *UserIdentityUpdateOne) SetCreatedAt(v time.Time) *UserIdentityUpdateOne { + _u.mutation.SetCreatedAt(v) + return _u +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableCreatedAt(v *time.Time) *UserIdentityUpdateOne { + if v != nil { + _u.SetCreatedAt(*v) + } + return _u +} + +// SetLastLogin sets the "last_login" field. +func (_u *UserIdentityUpdateOne) SetLastLogin(v time.Time) *UserIdentityUpdateOne { + _u.mutation.SetLastLogin(v) + return _u +} + +// SetNillableLastLogin sets the "last_login" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableLastLogin(v *time.Time) *UserIdentityUpdateOne { + if v != nil { + _u.SetLastLogin(*v) + } + return _u +} + +// SetBlockedUntil sets the "blocked_until" field. +func (_u *UserIdentityUpdateOne) SetBlockedUntil(v time.Time) *UserIdentityUpdateOne { + _u.mutation.SetBlockedUntil(v) + return _u +} + +// SetNillableBlockedUntil sets the "blocked_until" field if the given value is not nil. +func (_u *UserIdentityUpdateOne) SetNillableBlockedUntil(v *time.Time) *UserIdentityUpdateOne { + if v != nil { + _u.SetBlockedUntil(*v) + } + return _u +} + +// Mutation returns the UserIdentityMutation object of the builder. +func (_u *UserIdentityUpdateOne) Mutation() *UserIdentityMutation { + return _u.mutation +} + +// Where appends a list predicates to the UserIdentityUpdate builder. +func (_u *UserIdentityUpdateOne) Where(ps ...predicate.UserIdentity) *UserIdentityUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserIdentityUpdateOne) Select(field string, fields ...string) *UserIdentityUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserIdentity entity. +func (_u *UserIdentityUpdateOne) Save(ctx context.Context) (*UserIdentity, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserIdentityUpdateOne) SaveX(ctx context.Context) *UserIdentity { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserIdentityUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserIdentityUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserIdentityUpdateOne) check() error { + if v, ok := _u.mutation.UserID(); ok { + if err := useridentity.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "UserIdentity.user_id": %w`, err)} + } + } + if v, ok := _u.mutation.ConnectorID(); ok { + if err := useridentity.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "UserIdentity.connector_id": %w`, err)} + } + } + return nil +} + +func (_u *UserIdentityUpdateOne) sqlSave(ctx context.Context) (_node *UserIdentity, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(useridentity.Table, useridentity.Columns, sqlgraph.NewFieldSpec(useridentity.FieldID, field.TypeString)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "UserIdentity.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, useridentity.FieldID) + for _, f := range fields { + if !useridentity.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != useridentity.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UserID(); ok { + _spec.SetField(useridentity.FieldUserID, field.TypeString, value) + } + if value, ok := _u.mutation.ConnectorID(); ok { + _spec.SetField(useridentity.FieldConnectorID, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsUserID(); ok { + _spec.SetField(useridentity.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsUsername(); ok { + _spec.SetField(useridentity.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(useridentity.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsEmail(); ok { + _spec.SetField(useridentity.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := _u.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(useridentity.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := _u.mutation.ClaimsGroups(); ok { + _spec.SetField(useridentity.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, useridentity.FieldClaimsGroups, value) + }) + } + if _u.mutation.ClaimsGroupsCleared() { + _spec.ClearField(useridentity.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := _u.mutation.Consents(); ok { + _spec.SetField(useridentity.FieldConsents, field.TypeBytes, value) + } + if value, ok := _u.mutation.CreatedAt(); ok { + _spec.SetField(useridentity.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.LastLogin(); ok { + _spec.SetField(useridentity.FieldLastLogin, field.TypeTime, value) + } + if value, ok := _u.mutation.BlockedUntil(); ok { + _spec.SetField(useridentity.FieldBlockedUntil, field.TypeTime, value) + } + _node = &UserIdentity{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{useridentity.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/storage/ent/mysql_test.go b/storage/ent/mysql_test.go index a1baffb3..d7a06ffa 100644 --- a/storage/ent/mysql_test.go +++ b/storage/ent/mysql_test.go @@ -18,6 +18,12 @@ const ( MySQLEntDatabaseEnv = "DEX_MYSQL_ENT_DATABASE" MySQLEntUserEnv = "DEX_MYSQL_ENT_USER" MySQLEntPasswordEnv = "DEX_MYSQL_ENT_PASSWORD" + + MySQL8EntHostEnv = "DEX_MYSQL8_ENT_HOST" + MySQL8EntPortEnv = "DEX_MYSQL8_ENT_PORT" + MySQL8EntDatabaseEnv = "DEX_MYSQL8_ENT_DATABASE" + MySQL8EntUserEnv = "DEX_MYSQL8_ENT_USER" + MySQL8EntPasswordEnv = "DEX_MYSQL8_ENT_PASSWORD" ) func mysqlTestConfig(host string, port uint64) *MySQL { @@ -40,6 +46,35 @@ func mysqlTestConfig(host string, port uint64) *MySQL { } } +func mysql8TestConfig(host string, port uint64) *MySQL { + return &MySQL{ + NetworkDB: NetworkDB{ + Database: getenv(MySQL8EntDatabaseEnv, "mysql"), + User: getenv(MySQL8EntUserEnv, "mysql"), + Password: getenv(MySQL8EntPasswordEnv, "mysql"), + Host: host, + Port: uint16(port), + }, + SSL: SSL{ + Mode: mysqlSSLFalse, + }, + params: map[string]string{ + "innodb_lock_wait_timeout": "1", + }, + } +} + +func newMySQL8Storage(t *testing.T, host string, port uint64) storage.Storage { + logger := slog.New(slog.NewTextHandler(t.Output(), &slog.HandlerOptions{Level: slog.LevelDebug})) + + cfg := mysql8TestConfig(host, port) + s, err := cfg.Open(logger) + if err != nil { + panic(err) + } + return s +} + func newMySQLStorage(t *testing.T, host string, port uint64) storage.Storage { logger := slog.New(slog.NewTextHandler(t.Output(), &slog.HandlerOptions{Level: slog.LevelDebug})) @@ -70,6 +105,37 @@ func TestMySQL(t *testing.T) { } conformance.RunTests(t, newStorage) conformance.RunTransactionTests(t, newStorage) + + // TODO(nabokihms): ent MySQL does not retry on deadlocks (Error 1213, SQLSTATE 40001: + // Deadlock found when trying to get lock; try restarting transaction). + // Under high contention most updates fail. + // conformance.RunConcurrencyTests(t, newStorage) +} + +func TestMySQL8(t *testing.T) { + host := os.Getenv(MySQL8EntHostEnv) + if host == "" { + t.Skipf("test environment variable %s not set, skipping", MySQL8EntHostEnv) + } + + port := uint64(3306) + if rawPort := os.Getenv(MySQL8EntPortEnv); rawPort != "" { + var err error + + port, err = strconv.ParseUint(rawPort, 10, 32) + require.NoError(t, err, "invalid mysql port %q: %s", rawPort, err) + } + + newStorage := func(t *testing.T) storage.Storage { + return newMySQL8Storage(t, host, port) + } + conformance.RunTests(t, newStorage) + conformance.RunTransactionTests(t, newStorage) + + // TODO(nabokihms): ent MySQL 8 does not retry on deadlocks (Error 1213, SQLSTATE 40001: + // Deadlock found when trying to get lock; try restarting transaction). + // Under high contention most updates fail. + // conformance.RunConcurrencyTests(t, newStorage) } func TestMySQLDSN(t *testing.T) { diff --git a/storage/ent/postgres_test.go b/storage/ent/postgres_test.go index bbbde38e..b53ed382 100644 --- a/storage/ent/postgres_test.go +++ b/storage/ent/postgres_test.go @@ -65,6 +65,11 @@ func TestPostgres(t *testing.T) { } conformance.RunTests(t, newStorage) conformance.RunTransactionTests(t, newStorage) + + // TODO(nabokihms): ent Postgres uses SERIALIZABLE transaction isolation for UpdateRefreshToken, + // but does not retry on serialization failures (pq: could not serialize access due to + // concurrent update, SQLSTATE 40001). Under high contention most updates fail immediately. + // conformance.RunConcurrencyTests(t, newStorage) } func TestPostgresDSN(t *testing.T) { diff --git a/storage/ent/schema/authsession.go b/storage/ent/schema/authsession.go new file mode 100644 index 00000000..f0e57110 --- /dev/null +++ b/storage/ent/schema/authsession.go @@ -0,0 +1,37 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +// AuthSession holds the schema definition for the AuthSession entity. +type AuthSession struct { + ent.Schema +} + +// Fields of the AuthSession. +func (AuthSession) Fields() []ent.Field { + return []ent.Field{ + field.Text("id"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Bytes("client_states"), + field.Time("created_at"). + SchemaType(timeSchema), + field.Time("last_activity"). + SchemaType(timeSchema), + field.Text("ip_address"). + SchemaType(textSchema). + Default(""), + field.Text("user_agent"). + SchemaType(textSchema). + Default(""), + } +} + +// Edges of the AuthSession. +func (AuthSession) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/storage/ent/schema/client.go b/storage/ent/schema/client.go index b897c52a..f0e10606 100644 --- a/storage/ent/schema/client.go +++ b/storage/ent/schema/client.go @@ -45,6 +45,8 @@ func (OAuth2Client) Fields() []ent.Field { field.Text("logo_url"). SchemaType(textSchema). NotEmpty(), + field.JSON("allowed_connectors", []string{}). + Optional(), } } diff --git a/storage/ent/schema/connector.go b/storage/ent/schema/connector.go index 41b65eb4..191092c5 100644 --- a/storage/ent/schema/connector.go +++ b/storage/ent/schema/connector.go @@ -38,6 +38,8 @@ func (Connector) Fields() []ent.Field { field.Text("resource_version"). SchemaType(textSchema), field.Bytes("config"), + field.JSON("grant_types", []string{}). + Optional(), } } diff --git a/storage/ent/schema/useridentity.go b/storage/ent/schema/useridentity.go new file mode 100644 index 00000000..a4928240 --- /dev/null +++ b/storage/ent/schema/useridentity.go @@ -0,0 +1,56 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +// UserIdentity holds the schema definition for the UserIdentity entity. +type UserIdentity struct { + ent.Schema +} + +// Fields of the UserIdentity. +func (UserIdentity) Fields() []ent.Field { + return []ent.Field{ + // Using id field here because it's impossible to create multi-key primary yet + field.Text("id"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Text("user_id"). + SchemaType(textSchema). + NotEmpty(), + field.Text("connector_id"). + SchemaType(textSchema). + NotEmpty(), + field.Text("claims_user_id"). + SchemaType(textSchema). + Default(""), + field.Text("claims_username"). + SchemaType(textSchema). + Default(""), + field.Text("claims_preferred_username"). + SchemaType(textSchema). + Default(""), + field.Text("claims_email"). + SchemaType(textSchema). + Default(""), + field.Bool("claims_email_verified"). + Default(false), + field.JSON("claims_groups", []string{}). + Optional(), + field.Bytes("consents"), + field.Time("created_at"). + SchemaType(timeSchema), + field.Time("last_login"). + SchemaType(timeSchema), + field.Time("blocked_until"). + SchemaType(timeSchema), + } +} + +// Edges of the UserIdentity. +func (UserIdentity) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/storage/ent/sqlite_test.go b/storage/ent/sqlite_test.go index 55c1b5c5..54638c19 100644 --- a/storage/ent/sqlite_test.go +++ b/storage/ent/sqlite_test.go @@ -21,4 +21,5 @@ func newSQLiteStorage(t *testing.T) storage.Storage { func TestSQLite3(t *testing.T) { conformance.RunTests(t, newSQLiteStorage) + conformance.RunConcurrencyTests(t, newSQLiteStorage) } diff --git a/storage/etcd/config.go b/storage/etcd/config.go index a8aee39a..b4850f3a 100644 --- a/storage/etcd/config.go +++ b/storage/etcd/config.go @@ -15,10 +15,10 @@ var defaultDialTimeout = 2 * time.Second // SSL represents SSL options for etcd databases. type SSL struct { - ServerName string `json:"serverName" yaml:"serverName"` - CAFile string `json:"caFile" yaml:"caFile"` - KeyFile string `json:"keyFile" yaml:"keyFile"` - CertFile string `json:"certFile" yaml:"certFile"` + ServerName string `json:"serverName"` + CAFile string `json:"caFile"` + KeyFile string `json:"keyFile"` + CertFile string `json:"certFile"` } // Etcd options for connecting to etcd databases. @@ -26,11 +26,11 @@ type SSL struct { // configure an etcd namespace either via Namespace field or using `etcd grpc-proxy // --namespace=` type Etcd struct { - Endpoints []string `json:"endpoints" yaml:"endpoints"` - Namespace string `json:"namespace" yaml:"namespace"` - Username string `json:"username" yaml:"username"` - Password string `json:"password" yaml:"password"` - SSL SSL `json:"ssl" yaml:"ssl"` + Endpoints []string `json:"endpoints"` + Namespace string `json:"namespace"` + Username string `json:"username"` + Password string `json:"password"` + SSL SSL `json:"ssl"` } // Open creates a new storage implementation backed by Etcd diff --git a/storage/etcd/etcd.go b/storage/etcd/etcd.go index 8ccf502f..c05f5631 100644 --- a/storage/etcd/etcd.go +++ b/storage/etcd/etcd.go @@ -24,6 +24,8 @@ const ( keysName = "openid-connect-keys" deviceRequestPrefix = "device_req/" deviceTokenPrefix = "device_token/" + userIdentityPrefix = "user_identity/" + authSessionPrefix = "auth_session/" // defaultStorageTimeout will be applied to all storage's operations. defaultStorageTimeout = 5 * time.Second @@ -366,6 +368,116 @@ func (c *conn) DeleteOfflineSessions(ctx context.Context, userID string, connID return c.deleteKey(ctx, keySession(userID, connID)) } +func (c *conn) CreateUserIdentity(ctx context.Context, u storage.UserIdentity) error { + return c.txnCreate(ctx, keyUserIdentity(u.UserID, u.ConnectorID), fromStorageUserIdentity(u)) +} + +func (c *conn) GetUserIdentity(ctx context.Context, userID, connectorID string) (u storage.UserIdentity, err error) { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + var ui UserIdentity + if err = c.getKey(ctx, keyUserIdentity(userID, connectorID), &ui); err != nil { + return + } + return toStorageUserIdentity(ui), nil +} + +func (c *conn) UpdateUserIdentity(ctx context.Context, userID, connectorID string, updater func(u storage.UserIdentity) (storage.UserIdentity, error)) error { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyUserIdentity(userID, connectorID), func(currentValue []byte) ([]byte, error) { + var current UserIdentity + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(toStorageUserIdentity(current)) + if err != nil { + return nil, err + } + return json.Marshal(fromStorageUserIdentity(updated)) + }) +} + +func (c *conn) DeleteUserIdentity(ctx context.Context, userID, connectorID string) error { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyUserIdentity(userID, connectorID)) +} + +func (c *conn) ListUserIdentities(ctx context.Context) (identities []storage.UserIdentity, err error) { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + res, err := c.db.Get(ctx, userIdentityPrefix, clientv3.WithPrefix()) + if err != nil { + return identities, err + } + for _, v := range res.Kvs { + var ui UserIdentity + if err = json.Unmarshal(v.Value, &ui); err != nil { + return identities, err + } + identities = append(identities, toStorageUserIdentity(ui)) + } + return identities, nil +} + +func (c *conn) CreateAuthSession(ctx context.Context, s storage.AuthSession) error { + return c.txnCreate(ctx, keyAuthSession(s.ID), fromStorageAuthSession(s)) +} + +func (c *conn) GetAuthSession(ctx context.Context, sessionID string) (storage.AuthSession, error) { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + var s AuthSession + if err := c.getKey(ctx, keyAuthSession(sessionID), &s); err != nil { + return storage.AuthSession{}, err + } + return toStorageAuthSession(s), nil +} + +func (c *conn) UpdateAuthSession(ctx context.Context, sessionID string, updater func(s storage.AuthSession) (storage.AuthSession, error)) error { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyAuthSession(sessionID), func(currentValue []byte) ([]byte, error) { + var current AuthSession + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(toStorageAuthSession(current)) + if err != nil { + return nil, err + } + return json.Marshal(fromStorageAuthSession(updated)) + }) +} + +func (c *conn) ListAuthSessions(ctx context.Context) (sessions []storage.AuthSession, err error) { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + res, err := c.db.Get(ctx, authSessionPrefix, clientv3.WithPrefix()) + if err != nil { + return sessions, err + } + for _, v := range res.Kvs { + var s AuthSession + if err = json.Unmarshal(v.Value, &s); err != nil { + return sessions, err + } + sessions = append(sessions, toStorageAuthSession(s)) + } + return sessions, nil +} + +func (c *conn) DeleteAuthSession(ctx context.Context, sessionID string) error { + ctx, cancel := context.WithTimeout(ctx, defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyAuthSession(sessionID)) +} + func (c *conn) CreateConnector(ctx context.Context, connector storage.Connector) error { return c.txnCreate(ctx, keyID(connectorPrefix, connector.ID), connector) } @@ -557,6 +669,14 @@ func keySession(userID, connID string) string { return offlineSessionPrefix + strings.ToLower(userID+"|"+connID) } +func keyUserIdentity(userID, connectorID string) string { + return userIdentityPrefix + strings.ToLower(userID+"|"+connectorID) +} + +func keyAuthSession(sessionID string) string { + return strings.ToLower(authSessionPrefix + sessionID) +} + func (c *conn) CreateDeviceRequest(ctx context.Context, d storage.DeviceRequest) error { return c.txnCreate(ctx, keyID(deviceRequestPrefix, d.UserCode), fromStorageDeviceRequest(d)) } diff --git a/storage/etcd/etcd_test.go b/storage/etcd/etcd_test.go index 6783c25b..55501b49 100644 --- a/storage/etcd/etcd_test.go +++ b/storage/etcd/etcd_test.go @@ -89,4 +89,11 @@ func TestEtcd(t *testing.T) { withTimeout(time.Minute*1, func() { conformance.RunTransactionTests(t, newStorage) }) + + // TODO(nabokihms): etcd uses compare-and-swap (txnUpdate) for UpdateRefreshToken, + // but does not retry on CAS conflicts ("concurrent conflicting update happened"). + // Under high contention virtually all updates fail — only the first writer succeeds. + // withTimeout(time.Minute*1, func() { + // conformance.RunConcurrencyTests(t, newStorage) + // }) } diff --git a/storage/etcd/types.go b/storage/etcd/types.go index b3756604..3624de32 100644 --- a/storage/etcd/types.go +++ b/storage/etcd/types.go @@ -256,6 +256,82 @@ func toStorageOfflineSessions(o OfflineSessions) storage.OfflineSessions { return s } +// UserIdentity is a mirrored struct from storage with JSON struct tags +type UserIdentity struct { + UserID string `json:"user_id,omitempty"` + ConnectorID string `json:"connector_id,omitempty"` + Claims Claims `json:"claims,omitempty"` + Consents map[string][]string `json:"consents,omitempty"` + CreatedAt time.Time `json:"created_at"` + LastLogin time.Time `json:"last_login"` + BlockedUntil time.Time `json:"blocked_until"` +} + +func fromStorageUserIdentity(u storage.UserIdentity) UserIdentity { + return UserIdentity{ + UserID: u.UserID, + ConnectorID: u.ConnectorID, + Claims: fromStorageClaims(u.Claims), + Consents: u.Consents, + CreatedAt: u.CreatedAt, + LastLogin: u.LastLogin, + BlockedUntil: u.BlockedUntil, + } +} + +func toStorageUserIdentity(u UserIdentity) storage.UserIdentity { + s := storage.UserIdentity{ + UserID: u.UserID, + ConnectorID: u.ConnectorID, + Claims: toStorageClaims(u.Claims), + Consents: u.Consents, + CreatedAt: u.CreatedAt, + LastLogin: u.LastLogin, + BlockedUntil: u.BlockedUntil, + } + if s.Consents == nil { + // Server code assumes this will be non-nil. + s.Consents = make(map[string][]string) + } + return s +} + +// AuthSession is a mirrored struct from storage with JSON struct tags. +type AuthSession struct { + ID string `json:"id,omitempty"` + ClientStates map[string]*storage.ClientAuthState `json:"client_states,omitempty"` + CreatedAt time.Time `json:"created_at"` + LastActivity time.Time `json:"last_activity"` + IPAddress string `json:"ip_address,omitempty"` + UserAgent string `json:"user_agent,omitempty"` +} + +func fromStorageAuthSession(s storage.AuthSession) AuthSession { + return AuthSession{ + ID: s.ID, + ClientStates: s.ClientStates, + CreatedAt: s.CreatedAt, + LastActivity: s.LastActivity, + IPAddress: s.IPAddress, + UserAgent: s.UserAgent, + } +} + +func toStorageAuthSession(s AuthSession) storage.AuthSession { + result := storage.AuthSession{ + ID: s.ID, + ClientStates: s.ClientStates, + CreatedAt: s.CreatedAt, + LastActivity: s.LastActivity, + IPAddress: s.IPAddress, + UserAgent: s.UserAgent, + } + if result.ClientStates == nil { + result.ClientStates = make(map[string]*storage.ClientAuthState) + } + return result +} + // DeviceRequest is a mirrored struct from storage with JSON struct tags type DeviceRequest struct { UserCode string `json:"user_code"` diff --git a/storage/kubernetes/lock.go b/storage/kubernetes/lock.go index c67380dc..092f6d0e 100644 --- a/storage/kubernetes/lock.go +++ b/storage/kubernetes/lock.go @@ -22,8 +22,18 @@ var ( // - Some of OIDC providers could use the refresh token rotation feature which requires calling refresh only once. // - Providers can limit the rate of requests to the token endpoint, which will lead to the error // in case of many concurrent requests. +// +// The lock uses a Kubernetes annotation on the refresh token resource as a mutex. +// Only one goroutine can hold the lock at a time; others poll until the annotation +// is removed (unlocked) or expires (broken). The Kubernetes resourceVersion on put +// acts as compare-and-swap: if two goroutines race to set the annotation, only one +// succeeds and the other gets a 409 Conflict. type refreshTokenLock struct { - cli *client + cli *client + // waitingState tracks whether this lock instance has lost a compare-and-swap race + // and is now polling for the lock to be released. Used by Unlock to skip the + // annotation removal — only the goroutine that successfully wrote the annotation + // should remove it. waitingState bool } @@ -31,8 +41,10 @@ func newRefreshTokenLock(cli *client) *refreshTokenLock { return &refreshTokenLock{cli: cli} } +// Lock polls until the lock annotation can be set on the refresh token resource. +// Returns nil when the lock is acquired, or an error on timeout (200 attempts × 100ms). func (l *refreshTokenLock) Lock(id string) error { - for i := 0; i <= 60; i++ { + for i := 0; i <= 200; i++ { ok, err := l.setLockAnnotation(id) if err != nil { return err @@ -45,9 +57,12 @@ func (l *refreshTokenLock) Lock(id string) error { return fmt.Errorf("timeout waiting for refresh token %s lock", id) } +// Unlock removes the lock annotation from the refresh token resource. +// Only the holder of the lock (waitingState == false) performs the removal. func (l *refreshTokenLock) Unlock(id string) { if l.waitingState { - // Do not need to unlock for waiting goroutines, because the have not set it. + // This goroutine never successfully wrote the annotation, so there's + // nothing to remove. Another goroutine holds (or held) the lock. return } @@ -64,6 +79,13 @@ func (l *refreshTokenLock) Unlock(id string) { } } +// setLockAnnotation attempts to acquire the lock by writing an annotation with +// an expiration timestamp. Returns (true, nil) when the caller should keep waiting, +// (false, nil) when the lock is acquired, or (false, err) on a non-retriable error. +// +// The locking protocol relies on Kubernetes optimistic concurrency: every put +// includes the resource's current resourceVersion, so concurrent writes to the +// same object result in a 409 Conflict for all but one writer. func (l *refreshTokenLock) setLockAnnotation(id string) (bool, error) { r, err := l.cli.getRefreshToken(id) if err != nil { @@ -77,13 +99,14 @@ func (l *refreshTokenLock) setLockAnnotation(id string) (bool, error) { val, ok := r.Annotations[lockAnnotation] if !ok { - if l.waitingState { - return false, nil - } - + // No annotation means the lock is free. Every goroutine — whether it's + // a first-time caller or was previously waiting — must compete by writing + // the annotation. The put uses the current resourceVersion, so only one + // writer succeeds; the rest get a 409 Conflict and go back to polling. r.Annotations = lockData err := l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r) if err == nil { + l.waitingState = false return false, nil } @@ -100,24 +123,24 @@ func (l *refreshTokenLock) setLockAnnotation(id string) (bool, error) { } if !currentTime.After(until) { - // waiting for the lock to be released + // Lock is held by another goroutine and has not expired yet — keep polling. l.waitingState = true return true, nil } - // Lock time is out, lets break the lock and take the advantage + // Lock has expired (holder crashed or is too slow). Attempt to break it by + // overwriting the annotation with a new expiration. Again, only one writer + // can win the compare-and-swap race. r.Annotations = lockData err = l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r) if err == nil { - // break lock annotation return false, nil } l.cli.logger.Debug("break lock annotation", "error", err) if isKubernetesAPIConflictError(err) { l.waitingState = true - // after breaking error waiting for the lock to be released return true, nil } return false, err diff --git a/storage/kubernetes/storage.go b/storage/kubernetes/storage.go index 028ced7e..55ea7455 100644 --- a/storage/kubernetes/storage.go +++ b/storage/kubernetes/storage.go @@ -25,6 +25,8 @@ const ( kindConnector = "Connector" kindDeviceRequest = "DeviceRequest" kindDeviceToken = "DeviceToken" + kindUserIdentity = "UserIdentity" + kindAuthSession = "AuthSession" ) const ( @@ -38,6 +40,8 @@ const ( resourceConnector = "connectors" resourceDeviceRequest = "devicerequests" resourceDeviceToken = "devicetokens" + resourceUserIdentity = "useridentities" + resourceAuthSession = "authsessions" ) const ( @@ -743,6 +747,118 @@ func (cli *client) UpdateDeviceToken(ctx context.Context, deviceCode string, upd }) } +func (cli *client) CreateUserIdentity(ctx context.Context, u storage.UserIdentity) error { + return cli.post(resourceUserIdentity, cli.fromStorageUserIdentity(u)) +} + +func (cli *client) GetUserIdentity(ctx context.Context, userID, connectorID string) (storage.UserIdentity, error) { + u, err := cli.getUserIdentity(userID, connectorID) + if err != nil { + return storage.UserIdentity{}, err + } + return toStorageUserIdentity(u), nil +} + +func (cli *client) getUserIdentity(userID, connectorID string) (u UserIdentity, err error) { + name := cli.offlineTokenName(userID, connectorID) + if err = cli.get(resourceUserIdentity, name, &u); err != nil { + return UserIdentity{}, err + } + if userID != u.UserID || connectorID != u.ConnectorID { + return UserIdentity{}, fmt.Errorf("get user identity: wrong identity retrieved") + } + return u, nil +} + +func (cli *client) UpdateUserIdentity(ctx context.Context, userID, connectorID string, updater func(old storage.UserIdentity) (storage.UserIdentity, error)) error { + return retryOnConflict(ctx, func() error { + u, err := cli.getUserIdentity(userID, connectorID) + if err != nil { + return err + } + + updated, err := updater(toStorageUserIdentity(u)) + if err != nil { + return err + } + + newUserIdentity := cli.fromStorageUserIdentity(updated) + newUserIdentity.ObjectMeta = u.ObjectMeta + return cli.put(resourceUserIdentity, u.ObjectMeta.Name, newUserIdentity) + }) +} + +func (cli *client) DeleteUserIdentity(ctx context.Context, userID, connectorID string) error { + // Check for hash collision. + u, err := cli.getUserIdentity(userID, connectorID) + if err != nil { + return err + } + return cli.delete(resourceUserIdentity, u.ObjectMeta.Name) +} + +func (cli *client) ListUserIdentities(ctx context.Context) ([]storage.UserIdentity, error) { + var userIdentityList UserIdentityList + if err := cli.list(resourceUserIdentity, &userIdentityList); err != nil { + return nil, fmt.Errorf("failed to list user identities: %v", err) + } + + userIdentities := make([]storage.UserIdentity, len(userIdentityList.UserIdentities)) + for i, u := range userIdentityList.UserIdentities { + userIdentities[i] = toStorageUserIdentity(u) + } + + return userIdentities, nil +} + +func (cli *client) CreateAuthSession(ctx context.Context, s storage.AuthSession) error { + return cli.post(resourceAuthSession, cli.fromStorageAuthSession(s)) +} + +func (cli *client) GetAuthSession(ctx context.Context, sessionID string) (storage.AuthSession, error) { + var s AuthSession + if err := cli.get(resourceAuthSession, sessionID, &s); err != nil { + return storage.AuthSession{}, err + } + return toStorageAuthSession(s), nil +} + +func (cli *client) UpdateAuthSession(ctx context.Context, sessionID string, updater func(old storage.AuthSession) (storage.AuthSession, error)) error { + return retryOnConflict(ctx, func() error { + var s AuthSession + if err := cli.get(resourceAuthSession, sessionID, &s); err != nil { + return err + } + + updated, err := updater(toStorageAuthSession(s)) + if err != nil { + return err + } + + newSession := cli.fromStorageAuthSession(updated) + newSession.ObjectMeta = s.ObjectMeta + return cli.put(resourceAuthSession, sessionID, newSession) + }) +} + +func (cli *client) ListAuthSessions(ctx context.Context) ([]storage.AuthSession, error) { + var authSessionList AuthSessionList + if err := cli.list(resourceAuthSession, &authSessionList); err != nil { + return nil, fmt.Errorf("failed to list auth sessions: %v", err) + } + + sessions := make([]storage.AuthSession, len(authSessionList.AuthSessions)) + for i, s := range authSessionList.AuthSessions { + sessions[i] = toStorageAuthSession(s) + } + + return sessions, nil +} + +func (cli *client) DeleteAuthSession(ctx context.Context, sessionID string) error { + return cli.delete(resourceAuthSession, sessionID) +} + func isKubernetesAPIConflictError(err error) bool { if httpErr, ok := err.(httpError); ok { if httpErr.StatusCode() == http.StatusConflict { diff --git a/storage/kubernetes/storage_test.go b/storage/kubernetes/storage_test.go index 98ef25fa..906e5ce5 100644 --- a/storage/kubernetes/storage_test.go +++ b/storage/kubernetes/storage_test.go @@ -85,6 +85,7 @@ func (s *StorageTestSuite) TestStorage() { } conformance.RunTests(s.T(), newStorage) + conformance.RunConcurrencyTests(s.T(), newStorage) conformance.RunTransactionTests(s.T(), newStorage) } diff --git a/storage/kubernetes/types.go b/storage/kubernetes/types.go index 9e0f7e68..473f59cc 100644 --- a/storage/kubernetes/types.go +++ b/storage/kubernetes/types.go @@ -226,6 +226,40 @@ func customResourceDefinitions(apiVersion string) []k8sapi.CustomResourceDefinit }, }, }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "useridentities.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "useridentities", + Singular: "useridentity", + Kind: "UserIdentity", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "authsessions.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "authsessions", + Singular: "authsession", + Kind: "AuthSession", + }, + }, + }, } } @@ -251,6 +285,8 @@ type Client struct { Name string `json:"name,omitempty"` LogoURL string `json:"logoURL,omitempty"` + + AllowedConnectors []string `json:"allowedConnectors,omitempty"` } // ClientList is a list of Clients. @@ -270,25 +306,27 @@ func (cli *client) fromStorageClient(c storage.Client) Client { Name: cli.idToName(c.ID), Namespace: cli.namespace, }, - ID: c.ID, - Secret: c.Secret, - RedirectURIs: c.RedirectURIs, - TrustedPeers: c.TrustedPeers, - Public: c.Public, - Name: c.Name, - LogoURL: c.LogoURL, + ID: c.ID, + Secret: c.Secret, + RedirectURIs: c.RedirectURIs, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + Name: c.Name, + LogoURL: c.LogoURL, + AllowedConnectors: c.AllowedConnectors, } } func toStorageClient(c Client) storage.Client { return storage.Client{ - ID: c.ID, - Secret: c.Secret, - RedirectURIs: c.RedirectURIs, - TrustedPeers: c.TrustedPeers, - Public: c.Public, - Name: c.Name, - LogoURL: c.LogoURL, + ID: c.ID, + Secret: c.Secret, + RedirectURIs: c.RedirectURIs, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + Name: c.Name, + LogoURL: c.LogoURL, + AllowedConnectors: c.AllowedConnectors, } } @@ -721,6 +759,8 @@ type Connector struct { Name string `json:"name,omitempty"` // Config holds connector specific configuration information Config []byte `json:"config,omitempty"` + // GrantTypes is a list of grant types that this connector is allowed to be used with. + GrantTypes []string `json:"grantTypes,omitempty"` } func (cli *client) fromStorageConnector(c storage.Connector) Connector { @@ -733,10 +773,11 @@ func (cli *client) fromStorageConnector(c storage.Connector) Connector { Name: c.ID, Namespace: cli.namespace, }, - ID: c.ID, - Type: c.Type, - Name: c.Name, - Config: c.Config, + ID: c.ID, + Type: c.Type, + Name: c.Name, + Config: c.Config, + GrantTypes: c.GrantTypes, } } @@ -747,6 +788,7 @@ func toStorageConnector(c Connector) storage.Connector { Name: c.Name, ResourceVersion: c.ObjectMeta.ResourceVersion, Config: c.Config, + GrantTypes: c.GrantTypes, } } @@ -864,3 +906,114 @@ func toStorageDeviceToken(t DeviceToken) storage.DeviceToken { }, } } + +// UserIdentity is a mirrored struct from storage with JSON struct tags and Kubernetes +// type metadata. +type UserIdentity struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + UserID string `json:"userID,omitempty"` + ConnectorID string `json:"connectorID,omitempty"` + Claims Claims `json:"claims,omitempty"` + Consents map[string][]string `json:"consents,omitempty"` + CreatedAt time.Time `json:"createdAt,omitempty"` + LastLogin time.Time `json:"lastLogin,omitempty"` + BlockedUntil time.Time `json:"blockedUntil,omitempty"` +} + +// UserIdentityList is a list of UserIdentities. +type UserIdentityList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + UserIdentities []UserIdentity `json:"items"` +} + +func (cli *client) fromStorageUserIdentity(u storage.UserIdentity) UserIdentity { + return UserIdentity{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindUserIdentity, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: cli.offlineTokenName(u.UserID, u.ConnectorID), + Namespace: cli.namespace, + }, + UserID: u.UserID, + ConnectorID: u.ConnectorID, + Claims: fromStorageClaims(u.Claims), + Consents: u.Consents, + CreatedAt: u.CreatedAt, + LastLogin: u.LastLogin, + BlockedUntil: u.BlockedUntil, + } +} + +func toStorageUserIdentity(u UserIdentity) storage.UserIdentity { + s := storage.UserIdentity{ + UserID: u.UserID, + ConnectorID: u.ConnectorID, + Claims: toStorageClaims(u.Claims), + Consents: u.Consents, + CreatedAt: u.CreatedAt, + LastLogin: u.LastLogin, + BlockedUntil: u.BlockedUntil, + } + if s.Consents == nil { + // Server code assumes this will be non-nil. + s.Consents = make(map[string][]string) + } + return s +} + +// AuthSession is a Kubernetes representation of a storage AuthSession. +type AuthSession struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + ClientStates map[string]*storage.ClientAuthState `json:"clientStates,omitempty"` + CreatedAt time.Time `json:"createdAt,omitempty"` + LastActivity time.Time `json:"lastActivity,omitempty"` + IPAddress string `json:"ipAddress,omitempty"` + UserAgent string `json:"userAgent,omitempty"` +} + +// AuthSessionList is a list of AuthSessions. +type AuthSessionList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + AuthSessions []AuthSession `json:"items"` +} + +func (cli *client) fromStorageAuthSession(s storage.AuthSession) AuthSession { + return AuthSession{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindAuthSession, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: s.ID, + Namespace: cli.namespace, + }, + ClientStates: s.ClientStates, + CreatedAt: s.CreatedAt, + LastActivity: s.LastActivity, + IPAddress: s.IPAddress, + UserAgent: s.UserAgent, + } +} + +func toStorageAuthSession(s AuthSession) storage.AuthSession { + result := storage.AuthSession{ + ID: s.ObjectMeta.Name, + ClientStates: s.ClientStates, + CreatedAt: s.CreatedAt, + LastActivity: s.LastActivity, + IPAddress: s.IPAddress, + UserAgent: s.UserAgent, + } + if result.ClientStates == nil { + result.ClientStates = make(map[string]*storage.ClientAuthState) + } + return result +} diff --git a/storage/memory/memory.go b/storage/memory/memory.go index eff75e71..483ed246 100644 --- a/storage/memory/memory.go +++ b/storage/memory/memory.go @@ -21,7 +21,9 @@ func New(logger *slog.Logger) storage.Storage { refreshTokens: make(map[string]storage.RefreshToken), authReqs: make(map[string]storage.AuthRequest), passwords: make(map[string]storage.Password), - offlineSessions: make(map[offlineSessionID]storage.OfflineSessions), + offlineSessions: make(map[compositeKeyID]storage.OfflineSessions), + userIdentities: make(map[compositeKeyID]storage.UserIdentity), + authSessions: make(map[string]storage.AuthSession), connectors: make(map[string]storage.Connector), deviceRequests: make(map[string]storage.DeviceRequest), deviceTokens: make(map[string]storage.DeviceToken), @@ -48,7 +50,9 @@ type memStorage struct { refreshTokens map[string]storage.RefreshToken authReqs map[string]storage.AuthRequest passwords map[string]storage.Password - offlineSessions map[offlineSessionID]storage.OfflineSessions + offlineSessions map[compositeKeyID]storage.OfflineSessions + userIdentities map[compositeKeyID]storage.UserIdentity + authSessions map[string]storage.AuthSession connectors map[string]storage.Connector deviceRequests map[string]storage.DeviceRequest deviceTokens map[string]storage.DeviceToken @@ -58,7 +62,7 @@ type memStorage struct { logger *slog.Logger } -type offlineSessionID struct { +type compositeKeyID struct { userID string connID string } @@ -158,7 +162,7 @@ func (s *memStorage) CreatePassword(ctx context.Context, p storage.Password) (er } func (s *memStorage) CreateOfflineSessions(ctx context.Context, o storage.OfflineSessions) (err error) { - id := offlineSessionID{ + id := compositeKeyID{ userID: o.UserID, connID: o.ConnID, } @@ -172,6 +176,133 @@ func (s *memStorage) CreateOfflineSessions(ctx context.Context, o storage.Offlin return } +func (s *memStorage) CreateUserIdentity(ctx context.Context, u storage.UserIdentity) (err error) { + id := compositeKeyID{ + userID: u.UserID, + connID: u.ConnectorID, + } + s.tx(func() { + if _, ok := s.userIdentities[id]; ok { + err = storage.ErrAlreadyExists + } else { + s.userIdentities[id] = u + } + }) + return +} + +func (s *memStorage) GetUserIdentity(ctx context.Context, userID, connectorID string) (u storage.UserIdentity, err error) { + id := compositeKeyID{ + userID: userID, + connID: connectorID, + } + s.tx(func() { + var ok bool + if u, ok = s.userIdentities[id]; !ok { + err = storage.ErrNotFound + return + } + }) + return +} + +func (s *memStorage) UpdateUserIdentity(ctx context.Context, userID, connectorID string, updater func(u storage.UserIdentity) (storage.UserIdentity, error)) (err error) { + id := compositeKeyID{ + userID: userID, + connID: connectorID, + } + s.tx(func() { + r, ok := s.userIdentities[id] + if !ok { + err = storage.ErrNotFound + return + } + if r, err = updater(r); err == nil { + s.userIdentities[id] = r + } + }) + return +} + +func (s *memStorage) DeleteUserIdentity(ctx context.Context, userID, connectorID string) (err error) { + id := compositeKeyID{ + userID: userID, + connID: connectorID, + } + s.tx(func() { + if _, ok := s.userIdentities[id]; !ok { + err = storage.ErrNotFound + return + } + delete(s.userIdentities, id) + }) + return +} + +func (s *memStorage) ListUserIdentities(ctx context.Context) (identities []storage.UserIdentity, err error) { + s.tx(func() { + for _, u := range s.userIdentities { + identities = append(identities, u) + } + }) + return +} + +func (s *memStorage) ListAuthSessions(ctx context.Context) (sessions []storage.AuthSession, err error) { + s.tx(func() { + for _, session := range s.authSessions { + sessions = append(sessions, session) + } + }) + return +} + +func (s *memStorage) CreateAuthSession(ctx context.Context, session storage.AuthSession) (err error) { + s.tx(func() { + if _, ok := s.authSessions[session.ID]; ok { + err = storage.ErrAlreadyExists + } else { + s.authSessions[session.ID] = session + } + }) + return +} + +func (s *memStorage) GetAuthSession(ctx context.Context, sessionID string) (session storage.AuthSession, err error) { + s.tx(func() { + var ok bool + if session, ok = s.authSessions[sessionID]; !ok { + err = storage.ErrNotFound + } + }) + return +} + +func (s *memStorage) UpdateAuthSession(ctx context.Context, sessionID string, updater func(s storage.AuthSession) (storage.AuthSession, error)) (err error) { + s.tx(func() { + r, ok := s.authSessions[sessionID] + if !ok { + err = storage.ErrNotFound + return + } + if r, err = updater(r); err == nil { + s.authSessions[sessionID] = r + } + }) + return +} + +func (s *memStorage) DeleteAuthSession(ctx context.Context, sessionID string) (err error) { + s.tx(func() { + if _, ok := s.authSessions[sessionID]; !ok { + err = storage.ErrNotFound + return + } + delete(s.authSessions, sessionID) + }) + return +} + func (s *memStorage) CreateConnector(ctx context.Context, connector storage.Connector) (err error) { s.tx(func() { if _, ok := s.connectors[connector.ID]; ok { @@ -243,7 +374,7 @@ func (s *memStorage) GetAuthRequest(ctx context.Context, id string) (req storage } func (s *memStorage) GetOfflineSessions(ctx context.Context, userID string, connID string) (o storage.OfflineSessions, err error) { - id := offlineSessionID{ + id := compositeKeyID{ userID: userID, connID: connID, } @@ -360,7 +491,7 @@ func (s *memStorage) DeleteAuthRequest(ctx context.Context, id string) (err erro } func (s *memStorage) DeleteOfflineSessions(ctx context.Context, userID string, connID string) (err error) { - id := offlineSessionID{ + id := compositeKeyID{ userID: userID, connID: connID, } @@ -453,7 +584,7 @@ func (s *memStorage) UpdateRefreshToken(ctx context.Context, id string, updater } func (s *memStorage) UpdateOfflineSessions(ctx context.Context, userID string, connID string, updater func(o storage.OfflineSessions) (storage.OfflineSessions, error)) (err error) { - id := offlineSessionID{ + id := compositeKeyID{ userID: userID, connID: connID, } diff --git a/storage/memory/memory_test.go b/storage/memory/memory_test.go index e6e8232f..acd0be1b 100644 --- a/storage/memory/memory_test.go +++ b/storage/memory/memory_test.go @@ -15,4 +15,5 @@ func TestStorage(t *testing.T) { return New(logger) } conformance.RunTests(t, newStorage) + conformance.RunConcurrencyTests(t, newStorage) } diff --git a/storage/sql/config.go b/storage/sql/config.go index 222b263a..602e7f8a 100644 --- a/storage/sql/config.go +++ b/storage/sql/config.go @@ -78,7 +78,7 @@ type SSL struct { type Postgres struct { NetworkDB - SSL SSL `json:"ssl" yaml:"ssl"` + SSL SSL `json:"ssl"` } // Open creates a new storage implementation backed by Postgres. @@ -206,7 +206,7 @@ func (p *Postgres) open(logger *slog.Logger) (*conn, error) { type MySQL struct { NetworkDB - SSL SSL `json:"ssl" yaml:"ssl"` + SSL SSL `json:"ssl"` // TODO(pborzenkov): used by tests to reduce lock wait timeout. Should // we make it exported and allow users to provide arbitrary params? diff --git a/storage/sql/config_test.go b/storage/sql/config_test.go index 93a593ea..c098919b 100644 --- a/storage/sql/config_test.go +++ b/storage/sql/config_test.go @@ -50,7 +50,7 @@ type opener interface { open(logger *slog.Logger) (*conn, error) } -func testDB(t *testing.T, o opener, withTransactions bool) { +func testDB(t *testing.T, o opener, withTransactions, withConcurrentTests bool) { // t.Fatal has a bad habit of not actually printing the error fatal := func(i any) { fmt.Fprintln(os.Stdout, i) @@ -71,11 +71,18 @@ func testDB(t *testing.T, o opener, withTransactions bool) { withTimeout(time.Minute*1, func() { conformance.RunTests(t, newStorage) }) + if withTransactions { withTimeout(time.Minute*1, func() { conformance.RunTransactionTests(t, newStorage) }) } + + if withConcurrentTests { + withTimeout(time.Minute*1, func() { + conformance.RunConcurrencyTests(t, newStorage) + }) + } } func getenv(key, defaultVal string) string { @@ -236,7 +243,7 @@ func TestPostgres(t *testing.T) { Mode: pgSSLDisable, // Postgres container doesn't support SSL. }, } - testDB(t, p, true) + testDB(t, p, true, false) } const testMySQLEnv = "DEX_MYSQL_HOST" @@ -273,5 +280,42 @@ func TestMySQL(t *testing.T) { "innodb_lock_wait_timeout": "3", }, } - testDB(t, s, true) + testDB(t, s, true, false) +} + +const testMySQL8Env = "DEX_MYSQL8_HOST" + +func TestMySQL8(t *testing.T) { + host := os.Getenv(testMySQL8Env) + if host == "" { + t.Skipf("test environment variable %q not set, skipping", testMySQL8Env) + } + + port := uint64(3306) + if rawPort := os.Getenv("DEX_MYSQL8_PORT"); rawPort != "" { + var err error + + port, err = strconv.ParseUint(rawPort, 10, 32) + if err != nil { + t.Fatalf("invalid mysql port %q: %s", rawPort, err) + } + } + + s := &MySQL{ + NetworkDB: NetworkDB{ + Database: getenv("DEX_MYSQL8_DATABASE", "mysql"), + User: getenv("DEX_MYSQL8_USER", "mysql"), + Password: getenv("DEX_MYSQL8_PASSWORD", "mysql"), + Host: host, + Port: uint16(port), + ConnectionTimeout: 5, + }, + SSL: SSL{ + Mode: mysqlSSLFalse, + }, + params: map[string]string{ + "innodb_lock_wait_timeout": "3", + }, + } + testDB(t, s, true, false) } diff --git a/storage/sql/crud.go b/storage/sql/crud.go index e60f29d5..ab11713a 100644 --- a/storage/sql/crud.go +++ b/storage/sql/crud.go @@ -513,9 +513,10 @@ func (c *conn) UpdateClient(ctx context.Context, id string, updater func(old sto trusted_peers = $3, public = $4, name = $5, - logo_url = $6 - where id = $7; - `, nc.Secret, encoder(nc.RedirectURIs), encoder(nc.TrustedPeers), nc.Public, nc.Name, nc.LogoURL, id, + logo_url = $6, + allowed_connectors = $7 + where id = $8; + `, nc.Secret, encoder(nc.RedirectURIs), encoder(nc.TrustedPeers), nc.Public, nc.Name, nc.LogoURL, encoder(nc.AllowedConnectors), id, ) if err != nil { return fmt.Errorf("update client: %v", err) @@ -527,12 +528,12 @@ func (c *conn) UpdateClient(ctx context.Context, id string, updater func(old sto func (c *conn) CreateClient(ctx context.Context, cli storage.Client) error { _, err := c.Exec(` insert into client ( - id, secret, redirect_uris, trusted_peers, public, name, logo_url + id, secret, redirect_uris, trusted_peers, public, name, logo_url, allowed_connectors ) - values ($1, $2, $3, $4, $5, $6, $7); + values ($1, $2, $3, $4, $5, $6, $7, $8); `, cli.ID, cli.Secret, encoder(cli.RedirectURIs), encoder(cli.TrustedPeers), - cli.Public, cli.Name, cli.LogoURL, + cli.Public, cli.Name, cli.LogoURL, encoder(cli.AllowedConnectors), ) if err != nil { if c.alreadyExistsCheck(err) { @@ -546,7 +547,7 @@ func (c *conn) CreateClient(ctx context.Context, cli storage.Client) error { func getClient(ctx context.Context, q querier, id string) (storage.Client, error) { return scanClient(q.QueryRow(` select - id, secret, redirect_uris, trusted_peers, public, name, logo_url + id, secret, redirect_uris, trusted_peers, public, name, logo_url, allowed_connectors from client where id = $1; `, id)) } @@ -558,7 +559,7 @@ func (c *conn) GetClient(ctx context.Context, id string) (storage.Client, error) func (c *conn) ListClients(ctx context.Context) ([]storage.Client, error) { rows, err := c.Query(` select - id, secret, redirect_uris, trusted_peers, public, name, logo_url + id, secret, redirect_uris, trusted_peers, public, name, logo_url, allowed_connectors from client; `) if err != nil { @@ -581,9 +582,10 @@ func (c *conn) ListClients(ctx context.Context) ([]storage.Client, error) { } func scanClient(s scanner) (cli storage.Client, err error) { + var allowedConnectors []byte err = s.Scan( &cli.ID, &cli.Secret, decoder(&cli.RedirectURIs), decoder(&cli.TrustedPeers), - &cli.Public, &cli.Name, &cli.LogoURL, + &cli.Public, &cli.Name, &cli.LogoURL, &allowedConnectors, ) if err != nil { if err == sql.ErrNoRows { @@ -591,6 +593,11 @@ func scanClient(s scanner) (cli storage.Client, err error) { } return cli, fmt.Errorf("get client: %v", err) } + if len(allowedConnectors) > 0 { + if err := json.Unmarshal(allowedConnectors, &cli.AllowedConnectors); err != nil { + return cli, fmt.Errorf("unmarshal client allowed connectors: %v", err) + } + } return cli, nil } @@ -768,16 +775,301 @@ func scanOfflineSessions(s scanner) (o storage.OfflineSessions, err error) { return o, nil } -func (c *conn) CreateConnector(ctx context.Context, connector storage.Connector) error { +func (c *conn) CreateUserIdentity(ctx context.Context, u storage.UserIdentity) error { + _, err := c.Exec(` + insert into user_identity ( + user_id, connector_id, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + consents, + created_at, last_login, blocked_until + ) + values ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12 + ); + `, + u.UserID, u.ConnectorID, + u.Claims.UserID, u.Claims.Username, u.Claims.PreferredUsername, + u.Claims.Email, u.Claims.EmailVerified, encoder(u.Claims.Groups), + encoder(u.Consents), + u.CreatedAt, u.LastLogin, u.BlockedUntil, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert user identity: %v", err) + } + return nil +} + +func (c *conn) UpdateUserIdentity(ctx context.Context, userID, connectorID string, updater func(u storage.UserIdentity) (storage.UserIdentity, error)) error { + return c.ExecTx(func(tx *trans) error { + u, err := getUserIdentity(ctx, tx, userID, connectorID) + if err != nil { + return err + } + + newIdentity, err := updater(u) + if err != nil { + return err + } + _, err = tx.Exec(` + update user_identity + set + claims_user_id = $1, + claims_username = $2, + claims_preferred_username = $3, + claims_email = $4, + claims_email_verified = $5, + claims_groups = $6, + consents = $7, + created_at = $8, + last_login = $9, + blocked_until = $10 + where user_id = $11 AND connector_id = $12; + `, + newIdentity.Claims.UserID, newIdentity.Claims.Username, newIdentity.Claims.PreferredUsername, + newIdentity.Claims.Email, newIdentity.Claims.EmailVerified, encoder(newIdentity.Claims.Groups), + encoder(newIdentity.Consents), + newIdentity.CreatedAt, newIdentity.LastLogin, newIdentity.BlockedUntil, + u.UserID, u.ConnectorID, + ) + if err != nil { + return fmt.Errorf("update user identity: %v", err) + } + return nil + }) +} + +func (c *conn) GetUserIdentity(ctx context.Context, userID, connectorID string) (storage.UserIdentity, error) { + return getUserIdentity(ctx, c, userID, connectorID) +} + +func getUserIdentity(ctx context.Context, q querier, userID, connectorID string) (storage.UserIdentity, error) { + return scanUserIdentity(q.QueryRow(` + select + user_id, connector_id, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + consents, + created_at, last_login, blocked_until + from user_identity + where user_id = $1 AND connector_id = $2; + `, userID, connectorID)) +} + +func (c *conn) ListUserIdentities(ctx context.Context) ([]storage.UserIdentity, error) { + rows, err := c.Query(` + select + user_id, connector_id, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + consents, + created_at, last_login, blocked_until + from user_identity; + `) + if err != nil { + return nil, fmt.Errorf("query: %v", err) + } + defer rows.Close() + + var identities []storage.UserIdentity + for rows.Next() { + u, err := scanUserIdentity(rows) + if err != nil { + return nil, err + } + identities = append(identities, u) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("scan: %v", err) + } + return identities, nil +} + +func scanUserIdentity(s scanner) (u storage.UserIdentity, err error) { + err = s.Scan( + &u.UserID, &u.ConnectorID, + &u.Claims.UserID, &u.Claims.Username, &u.Claims.PreferredUsername, + &u.Claims.Email, &u.Claims.EmailVerified, decoder(&u.Claims.Groups), + decoder(&u.Consents), + &u.CreatedAt, &u.LastLogin, &u.BlockedUntil, + ) + if err != nil { + if err == sql.ErrNoRows { + return u, storage.ErrNotFound + } + return u, fmt.Errorf("select user identity: %v", err) + } + if u.Consents == nil { + u.Consents = make(map[string][]string) + } + return u, nil +} + +func (c *conn) DeleteUserIdentity(ctx context.Context, userID, connectorID string) error { + result, err := c.Exec(`delete from user_identity where user_id = $1 AND connector_id = $2`, userID, connectorID) + if err != nil { + return fmt.Errorf("delete user_identity: user_id = %s, connector_id = %s: %w", userID, connectorID, err) + } + + // For now mandate that the driver implements RowsAffected. If we ever need to support + // a driver that doesn't implement this, we can run this in a transaction with a get beforehand. + n, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("rows affected: %v", err) + } + if n < 1 { + return storage.ErrNotFound + } + return nil +} + +func (c *conn) CreateAuthSession(ctx context.Context, s storage.AuthSession) error { _, err := c.Exec(` + insert into auth_session ( + id, client_states, + created_at, last_activity, + ip_address, user_agent + ) + values ($1, $2, $3, $4, $5, $6); + `, + s.ID, encoder(s.ClientStates), + s.CreatedAt, s.LastActivity, + s.IPAddress, s.UserAgent, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert auth session: %v", err) + } + return nil +} + +func (c *conn) UpdateAuthSession(ctx context.Context, sessionID string, updater func(s storage.AuthSession) (storage.AuthSession, error)) error { + return c.ExecTx(func(tx *trans) error { + s, err := getAuthSession(ctx, tx, sessionID) + if err != nil { + return err + } + + newSession, err := updater(s) + if err != nil { + return err + } + _, err = tx.Exec(` + update auth_session + set + client_states = $1, + last_activity = $2, + ip_address = $3, + user_agent = $4 + where id = $5; + `, + encoder(newSession.ClientStates), + newSession.LastActivity, + newSession.IPAddress, newSession.UserAgent, + sessionID, + ) + if err != nil { + return fmt.Errorf("update auth session: %v", err) + } + return nil + }) +} + +func (c *conn) GetAuthSession(ctx context.Context, sessionID string) (storage.AuthSession, error) { + return getAuthSession(ctx, c, sessionID) +} + +func getAuthSession(ctx context.Context, q querier, sessionID string) (storage.AuthSession, error) { + return scanAuthSession(q.QueryRow(` + select + id, client_states, + created_at, last_activity, + ip_address, user_agent + from auth_session + where id = $1; + `, sessionID)) +} + +func scanAuthSession(s scanner) (session storage.AuthSession, err error) { + err = s.Scan( + &session.ID, decoder(&session.ClientStates), + &session.CreatedAt, &session.LastActivity, + &session.IPAddress, &session.UserAgent, + ) + if err != nil { + if err == sql.ErrNoRows { + return session, storage.ErrNotFound + } + return session, fmt.Errorf("select auth session: %v", err) + } + if session.ClientStates == nil { + session.ClientStates = make(map[string]*storage.ClientAuthState) + } + return session, nil +} + +func (c *conn) ListAuthSessions(ctx context.Context) ([]storage.AuthSession, error) { + rows, err := c.Query(` + select + id, client_states, + created_at, last_activity, + ip_address, user_agent + from auth_session; + `) + if err != nil { + return nil, fmt.Errorf("query: %v", err) + } + defer rows.Close() + + var sessions []storage.AuthSession + for rows.Next() { + s, err := scanAuthSession(rows) + if err != nil { + return nil, err + } + sessions = append(sessions, s) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("scan: %v", err) + } + return sessions, nil +} + +func (c *conn) DeleteAuthSession(ctx context.Context, sessionID string) error { + result, err := c.Exec(`delete from auth_session where id = $1`, sessionID) + if err != nil { + return fmt.Errorf("delete auth_session: id = %s: %w", sessionID, err) + } + + n, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("rows affected: %v", err) + } + if n < 1 { + return storage.ErrNotFound + } + return nil +} + +func (c *conn) CreateConnector(ctx context.Context, connector storage.Connector) error { + grantTypes, err := json.Marshal(connector.GrantTypes) + if err != nil { + return fmt.Errorf("marshal connector grant types: %v", err) + } + _, err = c.Exec(` insert into connector ( - id, type, name, resource_version, config + id, type, name, resource_version, config, grant_types ) values ( - $1, $2, $3, $4, $5 + $1, $2, $3, $4, $5, $6 ); `, - connector.ID, connector.Type, connector.Name, connector.ResourceVersion, connector.Config, + connector.ID, connector.Type, connector.Name, connector.ResourceVersion, connector.Config, grantTypes, ) if err != nil { if c.alreadyExistsCheck(err) { @@ -799,16 +1091,21 @@ func (c *conn) UpdateConnector(ctx context.Context, id string, updater func(s st if err != nil { return err } + grantTypes, err := json.Marshal(newConn.GrantTypes) + if err != nil { + return fmt.Errorf("marshal connector grant types: %v", err) + } _, err = tx.Exec(` update connector set type = $1, name = $2, resource_version = $3, - config = $4 - where id = $5; + config = $4, + grant_types = $5 + where id = $6; `, - newConn.Type, newConn.Name, newConn.ResourceVersion, newConn.Config, connector.ID, + newConn.Type, newConn.Name, newConn.ResourceVersion, newConn.Config, grantTypes, connector.ID, ) if err != nil { return fmt.Errorf("update connector: %v", err) @@ -824,15 +1121,16 @@ func (c *conn) GetConnector(ctx context.Context, id string) (storage.Connector, func getConnector(ctx context.Context, q querier, id string) (storage.Connector, error) { return scanConnector(q.QueryRow(` select - id, type, name, resource_version, config + id, type, name, resource_version, config, grant_types from connector where id = $1; `, id)) } func scanConnector(s scanner) (c storage.Connector, err error) { + var grantTypes []byte err = s.Scan( - &c.ID, &c.Type, &c.Name, &c.ResourceVersion, &c.Config, + &c.ID, &c.Type, &c.Name, &c.ResourceVersion, &c.Config, &grantTypes, ) if err != nil { if err == sql.ErrNoRows { @@ -840,13 +1138,18 @@ func scanConnector(s scanner) (c storage.Connector, err error) { } return c, fmt.Errorf("select connector: %v", err) } + if len(grantTypes) > 0 { + if err := json.Unmarshal(grantTypes, &c.GrantTypes); err != nil { + return c, fmt.Errorf("unmarshal connector grant types: %v", err) + } + } return c, nil } func (c *conn) ListConnectors(ctx context.Context) ([]storage.Connector, error) { rows, err := c.Query(` select - id, type, name, resource_version, config + id, type, name, resource_version, config, grant_types from connector; `) if err != nil { diff --git a/storage/sql/migrate.go b/storage/sql/migrate.go index 9a1807c9..7561d146 100644 --- a/storage/sql/migrate.go +++ b/storage/sql/migrate.go @@ -374,4 +374,52 @@ var migrations = []migration{ }, flavor: &flavorMySQL, }, + { + stmts: []string{ + ` + alter table connector + add column grant_types bytea;`, + }, + }, + // Migration for adding allowed_connectors to client table + { + stmts: []string{ + ` + alter table client + add column allowed_connectors bytea;`, + }, + }, + { + stmts: []string{ + ` + create table user_identity ( + user_id text not null, + connector_id text not null, + claims_user_id text not null, + claims_username text not null, + claims_preferred_username text not null default '', + claims_email text not null, + claims_email_verified boolean not null, + claims_groups bytea not null, + consents bytea not null, + created_at timestamptz not null, + last_login timestamptz not null, + blocked_until timestamptz not null, + PRIMARY KEY (user_id, connector_id) + );`, + }, + }, + { + stmts: []string{ + ` + create table auth_session ( + id text not null primary key, + client_states bytea not null, + created_at timestamptz not null, + last_activity timestamptz not null, + ip_address text not null default '', + user_agent text not null default '' + );`, + }, + }, } diff --git a/storage/sql/sqlite_test.go b/storage/sql/sqlite_test.go index 89d06aee..e21cbcc2 100644 --- a/storage/sql/sqlite_test.go +++ b/storage/sql/sqlite_test.go @@ -8,5 +8,5 @@ import ( ) func TestSQLite3(t *testing.T) { - testDB(t, &SQLite3{":memory:"}, false) + testDB(t, &SQLite3{":memory:"}, false, true) } diff --git a/storage/storage.go b/storage/storage.go index b49e0fd9..963c7c67 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -83,6 +83,8 @@ type Storage interface { CreateRefresh(ctx context.Context, r RefreshToken) error CreatePassword(ctx context.Context, p Password) error CreateOfflineSessions(ctx context.Context, s OfflineSessions) error + CreateUserIdentity(ctx context.Context, u UserIdentity) error + CreateAuthSession(ctx context.Context, s AuthSession) error CreateConnector(ctx context.Context, c Connector) error CreateDeviceRequest(ctx context.Context, d DeviceRequest) error CreateDeviceToken(ctx context.Context, d DeviceToken) error @@ -96,6 +98,8 @@ type Storage interface { GetRefresh(ctx context.Context, id string) (RefreshToken, error) GetPassword(ctx context.Context, email string) (Password, error) GetOfflineSessions(ctx context.Context, userID string, connID string) (OfflineSessions, error) + GetUserIdentity(ctx context.Context, userID, connectorID string) (UserIdentity, error) + GetAuthSession(ctx context.Context, sessionID string) (AuthSession, error) GetConnector(ctx context.Context, id string) (Connector, error) GetDeviceRequest(ctx context.Context, userCode string) (DeviceRequest, error) GetDeviceToken(ctx context.Context, deviceCode string) (DeviceToken, error) @@ -104,6 +108,8 @@ type Storage interface { ListRefreshTokens(ctx context.Context) ([]RefreshToken, error) ListPasswords(ctx context.Context) ([]Password, error) ListConnectors(ctx context.Context) ([]Connector, error) + ListUserIdentities(ctx context.Context) ([]UserIdentity, error) + ListAuthSessions(ctx context.Context) ([]AuthSession, error) // Delete methods MUST be atomic. DeleteAuthRequest(ctx context.Context, id string) error @@ -112,6 +118,8 @@ type Storage interface { DeleteRefresh(ctx context.Context, id string) error DeletePassword(ctx context.Context, email string) error DeleteOfflineSessions(ctx context.Context, userID string, connID string) error + DeleteUserIdentity(ctx context.Context, userID, connectorID string) error + DeleteAuthSession(ctx context.Context, sessionID string) error DeleteConnector(ctx context.Context, id string) error // Update methods take a function for updating an object then performs that update within @@ -134,6 +142,8 @@ type Storage interface { UpdateRefreshToken(ctx context.Context, id string, updater func(r RefreshToken) (RefreshToken, error)) error UpdatePassword(ctx context.Context, email string, updater func(p Password) (Password, error)) error UpdateOfflineSessions(ctx context.Context, userID string, connID string, updater func(s OfflineSessions) (OfflineSessions, error)) error + UpdateUserIdentity(ctx context.Context, userID, connectorID string, updater func(u UserIdentity) (UserIdentity, error)) error + UpdateAuthSession(ctx context.Context, sessionID string, updater func(s AuthSession) (AuthSession, error)) error UpdateConnector(ctx context.Context, id string, updater func(c Connector) (Connector, error)) error UpdateDeviceToken(ctx context.Context, deviceCode string, updater func(t DeviceToken) (DeviceToken, error)) error @@ -149,28 +159,32 @@ type Storage interface { // - Public clients: https://developers.google.com/api-client-library/python/auth/installed-app type Client struct { // Client ID and secret used to identify the client. - ID string `json:"id" yaml:"id"` - IDEnv string `json:"idEnv" yaml:"idEnv"` - Secret string `json:"secret" yaml:"secret"` - SecretEnv string `json:"secretEnv" yaml:"secretEnv"` + ID string `json:"id"` + IDEnv string `json:"idEnv"` + Secret string `json:"secret"` + SecretEnv string `json:"secretEnv"` // A registered set of redirect URIs. When redirecting from dex to the client, the URI // requested to redirect to MUST match one of these values, unless the client is "public". - RedirectURIs []string `json:"redirectURIs" yaml:"redirectURIs"` + RedirectURIs []string `json:"redirectURIs"` // TrustedPeers are a list of peers which can issue tokens on this client's behalf using // the dynamic "oauth2:server:client_id:(client_id)" scope. If a peer makes such a request, // this client's ID will appear as the ID Token's audience. // // Clients inherently trust themselves. - TrustedPeers []string `json:"trustedPeers" yaml:"trustedPeers"` + TrustedPeers []string `json:"trustedPeers"` // Public clients must use either use a redirectURL 127.0.0.1:X or "urn:ietf:wg:oauth:2.0:oob" - Public bool `json:"public" yaml:"public"` + Public bool `json:"public"` // Name and LogoURL used when displaying this client to the end user. - Name string `json:"name" yaml:"name"` - LogoURL string `json:"logoURL" yaml:"logoURL"` + Name string `json:"name"` + LogoURL string `json:"logoURL"` + + // AllowedConnectors is a list of connector IDs that the client is allowed to use for authentication. + // If empty, all connectors are allowed. + AllowedConnectors []string `json:"allowedConnectors"` } // Claims represents the ID Token claims supported by the server. @@ -316,6 +330,37 @@ type RefreshTokenRef struct { LastUsed time.Time } +// UserIdentity represents persistent per-user identity data. +type UserIdentity struct { + UserID string + ConnectorID string + Claims Claims + Consents map[string][]string // clientID -> approved scopes + CreatedAt time.Time + LastLogin time.Time + BlockedUntil time.Time +} + +// ClientAuthState represents the authentication state for a specific client within a session. +type ClientAuthState struct { + UserID string + ConnectorID string + Active bool + ExpiresAt time.Time + LastActivity time.Time + LastTokenIssuedAt time.Time +} + +// AuthSession represents a browser-bound authentication session. +type AuthSession struct { + ID string + ClientStates map[string]*ClientAuthState // clientID -> auth state + CreatedAt time.Time + LastActivity time.Time + IPAddress string + UserAgent string +} + // OfflineSessions objects are sessions pertaining to users with refresh tokens. type OfflineSessions struct { // UserID of an end user who has logged into the server. @@ -388,6 +433,10 @@ type Connector struct { // However, fixing this requires migrating Kubernetes objects for all previously created connectors, // or making Dex reading both tags and act accordingly. Config []byte `json:"email"` + + // GrantTypes is a list of grant types that this connector is allowed to be used with. + // If empty, all grant types are allowed. + GrantTypes []string `json:"grantTypes,omitempty"` } // VerificationKey is a rotated signing key which can still be used to verify diff --git a/web/static/img/mock-icon.svg b/web/static/img/mock-icon.svg new file mode 100644 index 00000000..fda905ca --- /dev/null +++ b/web/static/img/mock-icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/web/static/main.css b/web/static/main.css index f5c61d7f..47e5c9a6 100644 --- a/web/static/main.css +++ b/web/static/main.css @@ -8,28 +8,29 @@ body { .dex-container { color: #333; - margin: 45px auto; - max-width: 500px; + margin: 60px auto; + max-width: 480px; min-width: 320px; + padding: 0 16px; text-align: center; } .dex-btn { - border-radius: 4px; + border-radius: 8px; border: 0; - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.25), 0 0 1px rgba(0, 0, 0, 0.25); cursor: pointer; - font-size: 16px; + font-size: 15px; padding: 0; + transition: background-color 0.15s ease, box-shadow 0.15s ease, transform 0.1s ease; } -.dex-btn:focus { - outline: none; +.dex-btn:focus-visible { + outline: 2px solid #4A90D9; + outline-offset: 2px; } .dex-btn:active { - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - outline: none; + transform: scale(0.98); } .dex-btn:disabled { @@ -40,17 +41,17 @@ body { .dex-btn-icon { background-position: center; background-repeat: no-repeat; - background-size: 24px; - border-radius: 4px 0 0 4px; + background-size: 20px; + border-radius: 8px 0 0 8px; float: left; - height: 36px; - margin-right: 5px; - width: 36px; + height: 40px; + margin-right: 4px; + width: 40px; } .dex-btn-icon--google { background-color: #FFFFFF; - background-image: url(../static/img/google-icon.svg);; + background-image: url(../static/img/google-icon.svg); } .dex-btn-icon--local { @@ -115,39 +116,43 @@ body { background-image: url(../static/img/microsoft-icon.svg); } +.dex-btn-icon--mockCallback, +.dex-btn-icon--mockPassword { + background-color: #6c5ce7; + background-image: url(../static/img/mock-icon.svg); +} + .dex-btn-text { font-weight: 600; - line-height: 36px; - padding: 6px 12px; + line-height: 40px; + padding: 6px 14px; text-align: center; } .dex-subtle-text { - color: #999; - font-size: 12px; + color: #888; + font-size: 13px; } .dex-separator { - color: #999; + color: #aaa; } .dex-list { - color: #999; + color: #888; display: inline-block; - font-size: 12px; + font-size: 13px; list-style: circle; text-align: left; } .dex-error-box { - background-color: #DD1327; + background-color: #e5383b; + border-radius: 6px; color: #fff; font-size: 14px; font-weight: normal; - max-width: 320px; - padding: 4px 0; -} - -.dex-error-box { margin: 20px auto; + max-width: 320px; + padding: 8px 12px; } diff --git a/web/themes/dark/styles.css b/web/themes/dark/styles.css index edf30412..153f17eb 100644 --- a/web/themes/dark/styles.css +++ b/web/themes/dark/styles.css @@ -1,122 +1,127 @@ .theme-body { - background-color: #0f1218; - color: #c8d1d9; - font-family: 'Source Sans Pro', Helvetica, sans-serif; + background-color: #131519; + color: #b8bcc4; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Helvetica, Arial, sans-serif; } .theme-navbar { - background-color: #161b22; - box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); - color: #161B2B; + background-color: #1a1d23; + border-bottom: 1px solid #2a2d35; + color: #b8bcc4; font-size: 13px; - font-weight: 100; - height: 46px; + font-weight: 400; + height: 52px; overflow: hidden; - padding: 0 10px; + padding: 0 16px; } .theme-navbar__logo-wrap { display: inline-block; height: 100%; overflow: hidden; - padding: 10px 15px; + padding: 12px 15px; width: 300px; } .theme-navbar__logo { height: 100%; - max-height: 25px; + max-height: 26px; } .theme-heading { + color: #dcdfe5; font-size: 20px; - font-weight: 500; + font-weight: 600; + margin-bottom: 16px; margin-top: 0; - color: #c8d1d9; } .theme-panel { - background-color: #161b22; - box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - padding: 30px; + background-color: #1a1d23; + border: 1px solid #2a2d35; + border-radius: 12px; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.25); + padding: 32px; } .theme-btn-provider { - background-color: #1e242d; - color: #c8d1d9; - border: 1px solid #30373c; - min-width: 250px; + background-color: #22252c; + border: 1px solid #33363e; + color: #b8bcc4; + min-width: 260px; } .theme-btn-provider:hover { - background-color: #212731; - color: #ffffff; + background-color: #2a2d35; + border-color: #3e414a; + color: #dcdfe5; } .theme-btn--primary { - background-color: #1e242d; + background-color: #3d3f47; border: none; - color: #c8d1d9; + color: #e8eaed; min-width: 200px; - padding: 6px 12px; + padding: 8px 16px; } .theme-btn--primary:hover { - background-color: #212731; - color: #e9e9e9; + background-color: #4a4c55; + color: #fff; } .theme-btn--success { - background-color: #1891bb; - color: #e9e9e9; - width: 250px; + background-color: #2d7d9a; + color: #e8eaed; + width: 260px; } .theme-btn--success:hover { - background-color: #1da5d4; + background-color: #358fae; } .theme-form-row { display: block; - margin: 20px auto; + margin: 16px auto; } .theme-form-input { + background-color: #131519; + border: 1px solid #33363e; + border-radius: 8px; + color: #b8bcc4; display: block; - height: 36px; - padding: 6px 12px; font-size: 14px; - line-height: 1.42857143; - border: 1px solid #515559; - border-radius: 4px; - color: #c8d1d9; - background-color: #0f1218; - box-shadow: inset 0 1px 1px rgb(27, 40, 46); - width: 250px; + height: 40px; + line-height: 1.5; margin: auto; + padding: 8px 12px; + transition: border-color 0.15s ease, box-shadow 0.15s ease; + width: 260px; } .theme-form-input:focus, .theme-form-input:active { + border-color: #5a9bb5; + box-shadow: 0 0 0 3px rgba(90, 155, 181, 0.15); + color: #dcdfe5; outline: none; - border-color: #f8f9f9; - color: #c8d1d9; } .theme-form-label { - width: 250px; + color: #b8bcc4; + font-size: 14px; + font-weight: 500; margin: 4px auto; - text-align: left; position: relative; - font-size: 13px; - font-weight: 600; - color: #c8d1d9; + text-align: left; + width: 260px; } .theme-link-back { - margin-top: 4px; + margin-top: 8px; } .dex-container { - color: #c8d1d9; + color: #b8bcc4; } diff --git a/web/themes/light/styles.css b/web/themes/light/styles.css index 2d920571..61586722 100644 --- a/web/themes/light/styles.css +++ b/web/themes/light/styles.css @@ -1,113 +1,119 @@ .theme-body { - background-color: #efefef; - color: #333; - font-family: 'Source Sans Pro', Helvetica, sans-serif; + background-color: #f4f5f7; + color: #1a1a1a; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Helvetica, Arial, sans-serif; } .theme-navbar { background-color: #fff; - box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); - color: #333; + border-bottom: 1px solid #e1e4e8; + color: #1a1a1a; font-size: 13px; - font-weight: 100; - height: 46px; + font-weight: 400; + height: 52px; overflow: hidden; - padding: 0 10px; + padding: 0 16px; } .theme-navbar__logo-wrap { display: inline-block; height: 100%; overflow: hidden; - padding: 10px 15px; + padding: 12px 15px; width: 300px; } .theme-navbar__logo { height: 100%; - max-height: 25px; + max-height: 26px; } .theme-heading { font-size: 20px; - font-weight: 500; - margin-bottom: 10px; + font-weight: 600; + margin-bottom: 16px; margin-top: 0; } .theme-panel { background-color: #fff; - box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - padding: 30px; + border: 1px solid #e1e4e8; + border-radius: 12px; + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.08), 0 1px 2px rgba(0, 0, 0, 0.06); + padding: 32px; } .theme-btn-provider { background-color: #fff; - color: #333; - min-width: 250px; + border: 1px solid #d0d5dd; + color: #1a1a1a; + min-width: 260px; } .theme-btn-provider:hover { - color: #999; + background-color: #f9fafb; + border-color: #b0b5bd; + color: #1a1a1a; } .theme-btn--primary { - background-color: #333; + background-color: #1a1a1a; border: none; color: #fff; min-width: 200px; - padding: 6px 12px; + padding: 8px 16px; } .theme-btn--primary:hover { - background-color: #666; + background-color: #333; color: #fff; } .theme-btn--success { - background-color: #2FC98E; + background-color: #16a34a; color: #fff; - width: 250px; + width: 260px; } .theme-btn--success:hover { - background-color: #49E3A8; + background-color: #15803d; } .theme-form-row { display: block; - margin: 20px auto; + margin: 16px auto; } .theme-form-input { - border-radius: 4px; - border: 1px solid #CCC; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - color: #666; + border-radius: 8px; + border: 1px solid #d0d5dd; + color: #1a1a1a; display: block; font-size: 14px; - height: 36px; - line-height: 1.42857143; + height: 40px; + line-height: 1.5; margin: auto; - padding: 6px 12px; - width: 250px; + padding: 8px 12px; + transition: border-color 0.15s ease, box-shadow 0.15s ease; + width: 260px; } .theme-form-input:focus, .theme-form-input:active { - border-color: #66AFE9; + border-color: #4A90D9; + box-shadow: 0 0 0 3px rgba(74, 144, 217, 0.15); outline: none; } .theme-form-label { - font-size: 13px; - font-weight: 600; + font-size: 14px; + font-weight: 500; margin: 4px auto; position: relative; text-align: left; - width: 250px; + width: 260px; } .theme-link-back { - margin-top: 4px; + margin-top: 8px; }