mirror of https://github.com/dexidp/dex.git
28 changed files with 4075 additions and 22 deletions
@ -0,0 +1,72 @@
|
||||
# Authentication through SAML 2.0 |
||||
|
||||
## Overview |
||||
|
||||
The experimental SAML provider allows authentication through the SAML 2.0 HTTP POST binding. |
||||
|
||||
The connector uses the value of the `NameID` element as the user's unique identifier which dex assumes is both unique and never changes. Use the `nameIDPolicyFormat` to ensure this is set to a value which satisfies these requirements. |
||||
|
||||
## Caveats |
||||
|
||||
There are known issues with the XML signature validation for this connector. In addition work is still being done to ensure this connector implements best security practices for SAML 2.0. |
||||
|
||||
The connector doesn't support signed AuthnRequests or encrypted attributes. |
||||
|
||||
The connector doesn't support refresh tokens since the SAML 2.0 protocol doesn't provide a way to requery a provider without interaction. |
||||
|
||||
## Configuration |
||||
|
||||
```yaml |
||||
connectors: |
||||
- type: samlExperimental # will be changed to "saml" later without support for the "samlExperimental" value |
||||
id: saml |
||||
config: |
||||
# Issuer used for validating the SAML response. |
||||
issuer: https://saml.example.com |
||||
# SSO URL used for POST value. |
||||
ssoURL: https://saml.example.com/sso |
||||
|
||||
# CA to use when validating the SAML response. |
||||
ca: /path/to/ca.pem |
||||
|
||||
# CA's can also be provided inline as a base64'd blob. |
||||
# |
||||
# catData: ( RAW base64'd PEM encoded CA ) |
||||
|
||||
# To skip signature validation, uncomment the following field. This should |
||||
# only be used during testing and may be removed in the future. |
||||
# |
||||
# insucreSkipSignatureValidation: true |
||||
|
||||
# Dex's callback URL. Must match the "Destination" attribute of all responses |
||||
# exactly. |
||||
redirectURI: https://dex.example.com/callback |
||||
|
||||
# Name of attributes in the returned assertions to map to ID token claims. |
||||
usernameAttr: name |
||||
emailAttr: email |
||||
groupsAttr: groups # optional |
||||
|
||||
# By default, multiple groups are assumed to be represented as multiple |
||||
# attributes with the same name. |
||||
# |
||||
# If "groupsDelim" is provided groups are assumed to be represented as a |
||||
# single attribute and the delimiter is used to split the attribute's value |
||||
# into multiple groups. |
||||
# |
||||
# groupsDelim: ", " |
||||
|
||||
|
||||
# Requested format of the NameID. The NameID value is is mapped to the ID Token |
||||
# 'sub' claim. This can be an abbreviated form of the full URI with just the last |
||||
# component. For example, if this value is set to "emailAddress" the format will |
||||
# resolve to: |
||||
# |
||||
# urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress |
||||
# |
||||
# If no value is specified, this value defaults to: |
||||
# |
||||
# urn:oasis:names:tc:SAML:2.0:nameid-format:persistent |
||||
# |
||||
nameIDPolicyFormat: persistent |
||||
``` |
||||
@ -0,0 +1,387 @@
|
||||
// Package saml contains login methods for SAML.
|
||||
package saml |
||||
|
||||
import ( |
||||
"bytes" |
||||
"compress/flate" |
||||
"crypto/rand" |
||||
"crypto/x509" |
||||
"encoding/base64" |
||||
"encoding/hex" |
||||
"encoding/pem" |
||||
"encoding/xml" |
||||
"errors" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/Sirupsen/logrus" |
||||
"github.com/beevik/etree" |
||||
dsig "github.com/russellhaering/goxmldsig" |
||||
|
||||
"github.com/coreos/dex/connector" |
||||
) |
||||
|
||||
const ( |
||||
bindingRedirect = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" |
||||
bindingPOST = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" |
||||
|
||||
nameIDFormatEmailAddress = "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" |
||||
nameIDFormatUnspecified = "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" |
||||
nameIDFormatX509Subject = "urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName" |
||||
nameIDFormatWindowsDN = "urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName" |
||||
nameIDFormatEncrypted = "urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted" |
||||
nameIDFormatEntity = "urn:oasis:names:tc:SAML:2.0:nameid-format:entity" |
||||
nameIDFormatKerberos = "urn:oasis:names:tc:SAML:2.0:nameid-format:kerberos" |
||||
nameIDFormatPersistent = "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" |
||||
nameIDformatTransient = "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" |
||||
) |
||||
|
||||
var ( |
||||
nameIDFormats = []string{ |
||||
nameIDFormatEmailAddress, |
||||
nameIDFormatUnspecified, |
||||
nameIDFormatX509Subject, |
||||
nameIDFormatWindowsDN, |
||||
nameIDFormatEncrypted, |
||||
nameIDFormatEntity, |
||||
nameIDFormatKerberos, |
||||
nameIDFormatPersistent, |
||||
nameIDformatTransient, |
||||
} |
||||
nameIDFormatLookup = make(map[string]string) |
||||
) |
||||
|
||||
func init() { |
||||
suffix := func(s, sep string) string { |
||||
if i := strings.LastIndex(s, sep); i > 0 { |
||||
return s[i+1:] |
||||
} |
||||
return s |
||||
} |
||||
for _, format := range nameIDFormats { |
||||
nameIDFormatLookup[suffix(format, ":")] = format |
||||
nameIDFormatLookup[format] = format |
||||
} |
||||
} |
||||
|
||||
// Config represents configuration options for the SAML provider.
|
||||
type Config struct { |
||||
// TODO(ericchiang): A bunch of these fields could be auto-filled if
|
||||
// we supported SAML metadata discovery.
|
||||
//
|
||||
// https://www.oasis-open.org/committees/download.php/35391/sstc-saml-metadata-errata-2.0-wd-04-diff.pdf
|
||||
|
||||
Issuer string `json:"issuer"` |
||||
SSOURL string `json:"ssoURL"` |
||||
|
||||
// X509 CA file or raw data to verify XML signatures.
|
||||
CA string `json:"ca"` |
||||
CAData []byte `json:"caData"` |
||||
|
||||
InsecureSkipSignatureValidation bool `json:"insecureSkipSignatureValidation"` |
||||
|
||||
// Assertion attribute names to lookup various claims with.
|
||||
UsernameAttr string `json:"usernameAttr"` |
||||
EmailAttr string `json:"emailAttr"` |
||||
GroupsAttr string `json:"groupsAttr"` |
||||
// If GroupsDelim is supplied the connector assumes groups are returned as a
|
||||
// single string instead of multiple attribute values. This delimiter will be
|
||||
// used split the groups string.
|
||||
GroupsDelim string `json:"groupsDelim"` |
||||
|
||||
RedirectURI string `json:"redirectURI"` |
||||
|
||||
// Requested format of the NameID. The NameID value is is mapped to the ID Token
|
||||
// 'sub' claim.
|
||||
//
|
||||
// This can be an abbreviated form of the full URI with just the last component. For
|
||||
// example, if this value is set to "emailAddress" the format will resolve to:
|
||||
//
|
||||
// urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress
|
||||
//
|
||||
// If no value is specified, this value defaults to:
|
||||
//
|
||||
// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent
|
||||
//
|
||||
NameIDPolicyFormat string `json:"nameIDPolicyFormat"` |
||||
} |
||||
|
||||
type certStore struct { |
||||
certs []*x509.Certificate |
||||
} |
||||
|
||||
func (c certStore) Certificates() (roots []*x509.Certificate, err error) { |
||||
return c.certs, nil |
||||
} |
||||
|
||||
// Open validates the config and returns a connector. It does not actually
|
||||
// validate connectivity with the provider.
|
||||
func (c *Config) Open(logger logrus.FieldLogger) (connector.Connector, error) { |
||||
return c.openConnector(logger) |
||||
} |
||||
|
||||
func (c *Config) openConnector(logger logrus.FieldLogger) (interface { |
||||
connector.SAMLConnector |
||||
}, error) { |
||||
requiredFields := []struct { |
||||
name, val string |
||||
}{ |
||||
{"issuer", c.Issuer}, |
||||
{"ssoURL", c.SSOURL}, |
||||
{"usernameAttr", c.UsernameAttr}, |
||||
{"emailAttr", c.EmailAttr}, |
||||
{"redirectURI", c.RedirectURI}, |
||||
} |
||||
var missing []string |
||||
for _, f := range requiredFields { |
||||
if f.val == "" { |
||||
missing = append(missing, f.name) |
||||
} |
||||
} |
||||
switch len(missing) { |
||||
case 0: |
||||
case 1: |
||||
return nil, fmt.Errorf("missing required field %q", missing[0]) |
||||
default: |
||||
return nil, fmt.Errorf("missing required fields %q", missing) |
||||
} |
||||
|
||||
p := &provider{ |
||||
issuer: c.Issuer, |
||||
ssoURL: c.SSOURL, |
||||
now: time.Now, |
||||
usernameAttr: c.UsernameAttr, |
||||
emailAttr: c.EmailAttr, |
||||
groupsAttr: c.GroupsAttr, |
||||
groupsDelim: c.GroupsDelim, |
||||
redirectURI: c.RedirectURI, |
||||
logger: logger, |
||||
|
||||
nameIDPolicyFormat: c.NameIDPolicyFormat, |
||||
} |
||||
|
||||
if p.nameIDPolicyFormat == "" { |
||||
p.nameIDPolicyFormat = nameIDFormatPersistent |
||||
} else { |
||||
if format, ok := nameIDFormatLookup[p.nameIDPolicyFormat]; ok { |
||||
p.nameIDPolicyFormat = format |
||||
} else { |
||||
return nil, fmt.Errorf("invalid nameIDPolicyFormat: %q", p.nameIDPolicyFormat) |
||||
} |
||||
} |
||||
|
||||
if !c.InsecureSkipSignatureValidation { |
||||
if (c.CA == "") == (c.CAData == nil) { |
||||
return nil, errors.New("must provide either 'ca' or 'caData'") |
||||
} |
||||
|
||||
var caData []byte |
||||
if c.CA != "" { |
||||
data, err := ioutil.ReadFile(c.CA) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("read ca file: %v", err) |
||||
} |
||||
caData = data |
||||
} else { |
||||
caData = c.CAData |
||||
} |
||||
|
||||
var ( |
||||
certs []*x509.Certificate |
||||
block *pem.Block |
||||
) |
||||
for { |
||||
block, caData = pem.Decode(caData) |
||||
if block == nil { |
||||
break |
||||
} |
||||
cert, err := x509.ParseCertificate(block.Bytes) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("parse cert: %v", err) |
||||
} |
||||
certs = append(certs, cert) |
||||
} |
||||
if len(certs) == 0 { |
||||
return nil, errors.New("no certificates found in ca data") |
||||
} |
||||
p.validator = dsig.NewDefaultValidationContext(certStore{certs}) |
||||
} |
||||
return p, nil |
||||
} |
||||
|
||||
type provider struct { |
||||
issuer string |
||||
ssoURL string |
||||
|
||||
now func() time.Time |
||||
|
||||
// If nil, don't do signature validation.
|
||||
validator *dsig.ValidationContext |
||||
|
||||
// Attribute mappings
|
||||
usernameAttr string |
||||
emailAttr string |
||||
groupsAttr string |
||||
groupsDelim string |
||||
|
||||
redirectURI string |
||||
|
||||
nameIDPolicyFormat string |
||||
|
||||
logger logrus.FieldLogger |
||||
} |
||||
|
||||
func (p *provider) POSTData(s connector.Scopes) (action, value string, err error) { |
||||
|
||||
// NOTE(ericchiang): If we can't follow up with the identity provider, can we
|
||||
// support refresh tokens?
|
||||
if s.OfflineAccess { |
||||
return "", "", fmt.Errorf("SAML does not support offline access") |
||||
} |
||||
|
||||
r := &authnRequest{ |
||||
ProtocolBinding: bindingPOST, |
||||
ID: "_" + uuidv4(), |
||||
IssueInstant: xmlTime(p.now()), |
||||
Destination: p.ssoURL, |
||||
Issuer: &issuer{ |
||||
Issuer: p.issuer, |
||||
}, |
||||
NameIDPolicy: &nameIDPolicy{ |
||||
AllowCreate: true, |
||||
Format: p.nameIDPolicyFormat, |
||||
}, |
||||
} |
||||
|
||||
data, err := xml.MarshalIndent(r, "", " ") |
||||
if err != nil { |
||||
return "", "", fmt.Errorf("marshal authn request: %v", err) |
||||
} |
||||
|
||||
buff := new(bytes.Buffer) |
||||
fw, err := flate.NewWriter(buff, flate.DefaultCompression) |
||||
if err != nil { |
||||
return "", "", fmt.Errorf("new flate writer: %v", err) |
||||
} |
||||
if _, err := fw.Write(data); err != nil { |
||||
return "", "", fmt.Errorf("compress message: %v", err) |
||||
} |
||||
if err := fw.Close(); err != nil { |
||||
return "", "", fmt.Errorf("flush message: %v", err) |
||||
} |
||||
|
||||
return p.ssoURL, base64.StdEncoding.EncodeToString(buff.Bytes()), nil |
||||
} |
||||
|
||||
func (p *provider) HandlePOST(s connector.Scopes, samlResponse string) (ident connector.Identity, err error) { |
||||
rawResp, err := base64.StdEncoding.DecodeString(samlResponse) |
||||
if err != nil { |
||||
return ident, fmt.Errorf("decode response: %v", err) |
||||
} |
||||
if p.validator != nil { |
||||
if rawResp, err = verify(p.validator, rawResp); err != nil { |
||||
return ident, fmt.Errorf("verify signature: %v", err) |
||||
} |
||||
} |
||||
|
||||
var resp response |
||||
if err := xml.Unmarshal(rawResp, &resp); err != nil { |
||||
return ident, fmt.Errorf("unmarshal response: %v", err) |
||||
} |
||||
|
||||
if resp.Destination != "" && resp.Destination != p.redirectURI { |
||||
return ident, fmt.Errorf("expected destination %q got %q", p.redirectURI, resp.Destination) |
||||
|
||||
} |
||||
|
||||
assertion := resp.Assertion |
||||
if assertion == nil { |
||||
return ident, fmt.Errorf("response did not contain an assertion") |
||||
} |
||||
subject := assertion.Subject |
||||
if subject == nil { |
||||
return ident, fmt.Errorf("response did not contain a subject") |
||||
} |
||||
|
||||
switch { |
||||
case subject.NameID != nil: |
||||
if ident.UserID = subject.NameID.Value; ident.UserID == "" { |
||||
return ident, fmt.Errorf("NameID element does not contain a value") |
||||
} |
||||
default: |
||||
return ident, fmt.Errorf("subject does not contain an NameID element") |
||||
} |
||||
|
||||
attributes := assertion.AttributeStatement |
||||
if attributes == nil { |
||||
return ident, fmt.Errorf("response did not contain a AttributeStatement") |
||||
} |
||||
|
||||
if ident.Email, _ = attributes.get(p.emailAttr); ident.Email == "" { |
||||
return ident, fmt.Errorf("no attribute with name %q", p.emailAttr) |
||||
} |
||||
ident.EmailVerified = true |
||||
|
||||
if ident.Username, _ = attributes.get(p.usernameAttr); ident.Username == "" { |
||||
return ident, fmt.Errorf("no attribute with name %q", p.usernameAttr) |
||||
} |
||||
|
||||
if s.Groups && p.groupsAttr != "" { |
||||
if p.groupsDelim != "" { |
||||
groupsStr, ok := attributes.get(p.groupsAttr) |
||||
if !ok { |
||||
return ident, fmt.Errorf("no attribute with name %q", p.groupsAttr) |
||||
} |
||||
// TOOD(ericchiang): Do we need to further trim whitespace?
|
||||
ident.Groups = strings.Split(groupsStr, p.groupsDelim) |
||||
} else { |
||||
groups, ok := attributes.all(p.groupsAttr) |
||||
if !ok { |
||||
return ident, fmt.Errorf("no attribute with name %q", p.groupsAttr) |
||||
} |
||||
ident.Groups = groups |
||||
} |
||||
} |
||||
|
||||
return ident, nil |
||||
} |
||||
|
||||
// verify checks the signature info of a XML document and returns
|
||||
// the signed elements.
|
||||
func verify(validator *dsig.ValidationContext, data []byte) (signed []byte, err error) { |
||||
doc := etree.NewDocument() |
||||
if err := doc.ReadFromBytes(data); err != nil { |
||||
return nil, fmt.Errorf("parse document: %v", err) |
||||
} |
||||
|
||||
result, err := validator.Validate(doc.Root()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
doc.SetRoot(result) |
||||
return doc.WriteToBytes() |
||||
} |
||||
|
||||
func uuidv4() string { |
||||
u := make([]byte, 16) |
||||
if _, err := rand.Read(u); err != nil { |
||||
panic(err) |
||||
} |
||||
u[6] = (u[6] | 0x40) & 0x4F |
||||
u[8] = (u[8] | 0x80) & 0xBF |
||||
|
||||
r := make([]byte, 36) |
||||
r[8] = '-' |
||||
r[13] = '-' |
||||
r[18] = '-' |
||||
r[23] = '-' |
||||
hex.Encode(r, u[0:4]) |
||||
hex.Encode(r[9:], u[4:6]) |
||||
hex.Encode(r[14:], u[6:8]) |
||||
hex.Encode(r[19:], u[8:10]) |
||||
hex.Encode(r[24:], u[10:]) |
||||
|
||||
return string(r) |
||||
} |
||||
@ -0,0 +1,42 @@
|
||||
package saml |
||||
|
||||
import ( |
||||
"crypto/x509" |
||||
"encoding/pem" |
||||
"errors" |
||||
"io/ioutil" |
||||
"testing" |
||||
|
||||
sdig "github.com/russellhaering/goxmldsig" |
||||
) |
||||
|
||||
func loadCert(ca string) (*x509.Certificate, error) { |
||||
data, err := ioutil.ReadFile(ca) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
block, _ := pem.Decode(data) |
||||
if block == nil { |
||||
return nil, errors.New("ca file didn't contain any PEM data") |
||||
} |
||||
return x509.ParseCertificate(block.Bytes) |
||||
} |
||||
|
||||
func TestVerify(t *testing.T) { |
||||
cert, err := loadCert("testdata/okta-ca.pem") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
s := certStore{[]*x509.Certificate{cert}} |
||||
|
||||
validator := sdig.NewDefaultValidationContext(s) |
||||
|
||||
data, err := ioutil.ReadFile("testdata/okta-resp.xml") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if _, err := verify(validator, data); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE----- |
||||
MIIDpDCCAoygAwIBAgIGAVjgvNroMA0GCSqGSIb3DQEBBQUAMIGSMQswCQYDVQQGEwJVUzETMBEG |
||||
A1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEU |
||||
MBIGA1UECwwLU1NPUHJvdmlkZXIxEzARBgNVBAMMCmRldi05NjkyNDQxHDAaBgkqhkiG9w0BCQEW |
||||
DWluZm9Ab2t0YS5jb20wHhcNMTYxMjA4MjMxOTIzWhcNMjYxMjA4MjMyMDIzWjCBkjELMAkGA1UE |
||||
BhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNV |
||||
BAoMBE9rdGExFDASBgNVBAsMC1NTT1Byb3ZpZGVyMRMwEQYDVQQDDApkZXYtOTY5MjQ0MRwwGgYJ |
||||
KoZIhvcNAQkBFg1pbmZvQG9rdGEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA |
||||
4dW2YlcXjZwnTmLnV7IOBq8hhrdbqlNwdjCHRyx1BizOk3RbVP56grgPdyWScTCPpJ6vZZ8rtrY0 |
||||
m1rwr+cxifNuQGKTlE33g2hReo/N9f3LFUMITlnnNH80Yium3SYuEqGeHLYerelXOnEKx6x+X5qD |
||||
eg2DRW6I9/v/mfN2KAQEDqF9aSNlNFWZWmb52kukMv3tLWw0puaevicIZ/nZrW+D3CLDVVfWHeVt |
||||
46EF2bkLdgbIJOU3GzLoolgBOCkydX9x6xTw6knwQaqYsRGflacw6571IzWEwjmd17uJXkarnhM1 |
||||
51pqwIoksTzycbjinIg6B1rNpGFDN7Ah+9EnVQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQDQOtlj |
||||
7Yk1j4GV/135zLOlsaonq8zfsu05VfY5XydNPFEkyIVcegLa8RiqALyFf+FjY/wVyhXtARw7NBUo |
||||
u/Jh63cVya/VEoP1SVa0XQLf/ial+XuwdBBL1yc+7o2XHOfluDaXw/v2FWYpZtICf3a39giGaNWp |
||||
eCT4g2TDWM4Jf/X7/mRbLX9tQO7XRural1CXx8VIMcmbKNbUtQiO8yEtVQ+FJKOsl7KOSzkgqNiL |
||||
rJy+Y0D9biLZVKp07KWAY2FPCEtCkBrvo8BhvWbxWMA8CVQNAiTylM27Pc6kbc64pNr7C1Jx1wuE |
||||
mVy9Fgb4PA2N3hPeD7mBmGGp7CfDbGcy |
||||
-----END CERTIFICATE----- |
||||
@ -0,0 +1,33 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?><saml2p:Response xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol" Destination="http://localhost:5556/dex/callback" ID="id108965453120986171998428970" InResponseTo="_fd1b3ef9-ec09-44a7-a66b-0d39c250f6a0" IssueInstant="2016-12-20T22:18:23.771Z" Version="2.0"><saml2:Issuer xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://www.okta.com/exk91cb99lKkKSYoy0h7</saml2:Issuer><ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:SignedInfo><ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"/><ds:Reference URI="#id108965453120986171998428970"><ds:Transforms><ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/><ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/></ds:Transforms><ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256"/><ds:DigestValue>Phu93l0D97JSMIYDZBdVeNLN0pwBVHhzUDWxbh4sc6g=</ds:DigestValue></ds:Reference></ds:SignedInfo><ds:SignatureValue>M2gMHOmnMAFgh2apq/2jHwDYmisUkYMUqxrWkQJf3RHFotl4EeDlcqq/FzOboJc3NcbKBqQY3CWsWhWh5cNWHDgNneaahW4czww+9DCM0R/zz5c6GuMYFEh5df2sDn/dWk/jbKMiAMgPdKJ2x/+5Xk9q4axC52TdQrrbZtzAAAn4CgrT6Kf11qfMl5wpDarg3qPw7ANxWn2DKzCsvCkOIwM2+AXh+sEXmTvvZIQ0vpv098FH/ZTGt4sCwb1bmRZ3UZLhBcxVc/sjuEW/sQ6pbQHkjrXIR5bxXzGNUxYpcGjrp9HGF+In0BAc+Ds/A0H142e1rgtcX8LH2pbG8URJSQ==</ds:SignatureValue><ds:KeyInfo><ds:X509Data><ds:X509Certificate>MIIDpDCCAoygAwIBAgIGAVjgvNroMA0GCSqGSIb3DQEBBQUAMIGSMQswCQYDVQQGEwJVUzETMBEG |
||||
A1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEU |
||||
MBIGA1UECwwLU1NPUHJvdmlkZXIxEzARBgNVBAMMCmRldi05NjkyNDQxHDAaBgkqhkiG9w0BCQEW |
||||
DWluZm9Ab2t0YS5jb20wHhcNMTYxMjA4MjMxOTIzWhcNMjYxMjA4MjMyMDIzWjCBkjELMAkGA1UE |
||||
BhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNV |
||||
BAoMBE9rdGExFDASBgNVBAsMC1NTT1Byb3ZpZGVyMRMwEQYDVQQDDApkZXYtOTY5MjQ0MRwwGgYJ |
||||
KoZIhvcNAQkBFg1pbmZvQG9rdGEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA |
||||
4dW2YlcXjZwnTmLnV7IOBq8hhrdbqlNwdjCHRyx1BizOk3RbVP56grgPdyWScTCPpJ6vZZ8rtrY0 |
||||
m1rwr+cxifNuQGKTlE33g2hReo/N9f3LFUMITlnnNH80Yium3SYuEqGeHLYerelXOnEKx6x+X5qD |
||||
eg2DRW6I9/v/mfN2KAQEDqF9aSNlNFWZWmb52kukMv3tLWw0puaevicIZ/nZrW+D3CLDVVfWHeVt |
||||
46EF2bkLdgbIJOU3GzLoolgBOCkydX9x6xTw6knwQaqYsRGflacw6571IzWEwjmd17uJXkarnhM1 |
||||
51pqwIoksTzycbjinIg6B1rNpGFDN7Ah+9EnVQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQDQOtlj |
||||
7Yk1j4GV/135zLOlsaonq8zfsu05VfY5XydNPFEkyIVcegLa8RiqALyFf+FjY/wVyhXtARw7NBUo |
||||
u/Jh63cVya/VEoP1SVa0XQLf/ial+XuwdBBL1yc+7o2XHOfluDaXw/v2FWYpZtICf3a39giGaNWp |
||||
eCT4g2TDWM4Jf/X7/mRbLX9tQO7XRural1CXx8VIMcmbKNbUtQiO8yEtVQ+FJKOsl7KOSzkgqNiL |
||||
rJy+Y0D9biLZVKp07KWAY2FPCEtCkBrvo8BhvWbxWMA8CVQNAiTylM27Pc6kbc64pNr7C1Jx1wuE |
||||
mVy9Fgb4PA2N3hPeD7mBmGGp7CfDbGcy</ds:X509Certificate></ds:X509Data></ds:KeyInfo></ds:Signature><saml2p:Status xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol"><saml2p:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success"/></saml2p:Status><saml2:Assertion xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion" ID="id10896545312129779529177535" IssueInstant="2016-12-20T22:18:23.771Z" Version="2.0"><saml2:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity" xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion">http://www.okta.com/exk91cb99lKkKSYoy0h7</saml2:Issuer><ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:SignedInfo><ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"/><ds:Reference URI="#id10896545312129779529177535"><ds:Transforms><ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/><ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/></ds:Transforms><ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256"/><ds:DigestValue>ufwWUjecX6I/aQb4WW9P9ZMLG3C8hN6LaZyyb/EATIs=</ds:DigestValue></ds:Reference></ds:SignedInfo><ds:SignatureValue>jKtNBzxAL67ssuzWkkbf0yzqRyZ51y2JjBQ9C6bW8io/JOYQB2v7Bix7Eu/RjJslO7OBqD+3tPrK7ZBOy2+LFuAh3cDNa3U5NhO0raLrn/2YoJXfjj3XX3hyQv6GVxo0EY1KJNXOzWxjp9RVDpHslPTIL1yDC/oy0Mlzxu6pXBEerz9J2/Caenq66Skb5/DAT8FvrJ2s1bxuMagShs3APhC1hD8mvktZ+ZcN8ujs2SebteGK4IoOCx+e8+v2CyycBv1l5l+v5I+D2HnbAw4LfvHnW4rZOJT2AvoI47p1YBK1qDsJutG3jUPKy4Yx5YF73Xi1oytr+rrHyx/lfFPd2A==</ds:SignatureValue><ds:KeyInfo><ds:X509Data><ds:X509Certificate>MIIDpDCCAoygAwIBAgIGAVjgvNroMA0GCSqGSIb3DQEBBQUAMIGSMQswCQYDVQQGEwJVUzETMBEG |
||||
A1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEU |
||||
MBIGA1UECwwLU1NPUHJvdmlkZXIxEzARBgNVBAMMCmRldi05NjkyNDQxHDAaBgkqhkiG9w0BCQEW |
||||
DWluZm9Ab2t0YS5jb20wHhcNMTYxMjA4MjMxOTIzWhcNMjYxMjA4MjMyMDIzWjCBkjELMAkGA1UE |
||||
BhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNV |
||||
BAoMBE9rdGExFDASBgNVBAsMC1NTT1Byb3ZpZGVyMRMwEQYDVQQDDApkZXYtOTY5MjQ0MRwwGgYJ |
||||
KoZIhvcNAQkBFg1pbmZvQG9rdGEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA |
||||
4dW2YlcXjZwnTmLnV7IOBq8hhrdbqlNwdjCHRyx1BizOk3RbVP56grgPdyWScTCPpJ6vZZ8rtrY0 |
||||
m1rwr+cxifNuQGKTlE33g2hReo/N9f3LFUMITlnnNH80Yium3SYuEqGeHLYerelXOnEKx6x+X5qD |
||||
eg2DRW6I9/v/mfN2KAQEDqF9aSNlNFWZWmb52kukMv3tLWw0puaevicIZ/nZrW+D3CLDVVfWHeVt |
||||
46EF2bkLdgbIJOU3GzLoolgBOCkydX9x6xTw6knwQaqYsRGflacw6571IzWEwjmd17uJXkarnhM1 |
||||
51pqwIoksTzycbjinIg6B1rNpGFDN7Ah+9EnVQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQDQOtlj |
||||
7Yk1j4GV/135zLOlsaonq8zfsu05VfY5XydNPFEkyIVcegLa8RiqALyFf+FjY/wVyhXtARw7NBUo |
||||
u/Jh63cVya/VEoP1SVa0XQLf/ial+XuwdBBL1yc+7o2XHOfluDaXw/v2FWYpZtICf3a39giGaNWp |
||||
eCT4g2TDWM4Jf/X7/mRbLX9tQO7XRural1CXx8VIMcmbKNbUtQiO8yEtVQ+FJKOsl7KOSzkgqNiL |
||||
rJy+Y0D9biLZVKp07KWAY2FPCEtCkBrvo8BhvWbxWMA8CVQNAiTylM27Pc6kbc64pNr7C1Jx1wuE |
||||
mVy9Fgb4PA2N3hPeD7mBmGGp7CfDbGcy</ds:X509Certificate></ds:X509Data></ds:KeyInfo></ds:Signature><saml2:Subject xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion"><saml2:NameID Format="urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified">eric.chiang+okta@coreos.com</saml2:NameID><saml2:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer"><saml2:SubjectConfirmationData InResponseTo="_fd1b3ef9-ec09-44a7-a66b-0d39c250f6a0" NotOnOrAfter="2016-12-20T22:23:23.772Z" Recipient="http://localhost:5556/dex/callback"/></saml2:SubjectConfirmation></saml2:Subject><saml2:Conditions NotBefore="2016-12-20T22:13:23.772Z" NotOnOrAfter="2016-12-20T22:23:23.772Z" xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion"><saml2:AudienceRestriction><saml2:Audience>http://localhost:5556/dex/callback</saml2:Audience></saml2:AudienceRestriction></saml2:Conditions><saml2:AuthnStatement AuthnInstant="2016-12-20T22:18:23.771Z" SessionIndex="_fd1b3ef9-ec09-44a7-a66b-0d39c250f6a0" xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion"><saml2:AuthnContext><saml2:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport</saml2:AuthnContextClassRef></saml2:AuthnContext></saml2:AuthnStatement></saml2:Assertion></saml2p:Response> |
||||
@ -0,0 +1,177 @@
|
||||
package saml |
||||
|
||||
import ( |
||||
"encoding/xml" |
||||
"fmt" |
||||
"time" |
||||
) |
||||
|
||||
const timeFormat = "2006-01-02T15:04:05Z" |
||||
|
||||
type xmlTime time.Time |
||||
|
||||
func (t xmlTime) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { |
||||
return xml.Attr{ |
||||
Name: name, |
||||
Value: time.Time(t).UTC().Format(timeFormat), |
||||
}, nil |
||||
} |
||||
|
||||
func (t *xmlTime) UnmarshalXMLAttr(attr xml.Attr) error { |
||||
got, err := time.Parse(timeFormat, attr.Value) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
*t = xmlTime(got) |
||||
return nil |
||||
} |
||||
|
||||
type samlVersion struct{} |
||||
|
||||
func (s samlVersion) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { |
||||
return xml.Attr{ |
||||
Name: name, |
||||
Value: "2.0", |
||||
}, nil |
||||
} |
||||
|
||||
func (s *samlVersion) UnmarshalXMLAttr(attr xml.Attr) error { |
||||
if attr.Value != "2.0" { |
||||
return fmt.Errorf(`saml version expected "2.0" got %q`, attr.Value) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
type authnRequest struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol AuthnRequest"` |
||||
|
||||
ID string `xml:"ID,attr"` |
||||
Version samlVersion `xml:"Version,attr"` |
||||
|
||||
ProviderName string `xml:"ProviderName,attr,omitempty"` |
||||
IssueInstant xmlTime `xml:"IssueInstant,attr,omitempty"` |
||||
Consent bool `xml:"Consent,attr,omitempty"` |
||||
Destination string `xml:"Destination,attr,omitempty"` |
||||
|
||||
ForceAuthn bool `xml:"ForceAuthn,attr,omitempty"` |
||||
IsPassive bool `xml:"IsPassive,attr,omitempty"` |
||||
ProtocolBinding string `xml:"ProtocolBinding,attr,omitempty"` |
||||
|
||||
Subject *subject `xml:"Subject,omitempty"` |
||||
Issuer *issuer `xml:"Issuer,omitempty"` |
||||
NameIDPolicy *nameIDPolicy `xml:"NameIDPolicy,omitempty"` |
||||
|
||||
// TODO(ericchiang): Make this configurable and determine appropriate default values.
|
||||
RequestAuthnContext *requestAuthnContext `xml:"RequestAuthnContext,omitempty"` |
||||
} |
||||
|
||||
type subject struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Subject"` |
||||
|
||||
NameID *nameID `xml:"NameID,omitempty"` |
||||
|
||||
// TODO(ericchiang): Do we need to deal with baseID?
|
||||
} |
||||
|
||||
type nameID struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion NameID"` |
||||
|
||||
Format string `xml:"Format,omitempty"` |
||||
Value string `xml:",chardata"` |
||||
} |
||||
|
||||
type issuer struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Issuer"` |
||||
Issuer string `xml:",chardata"` |
||||
} |
||||
|
||||
type nameIDPolicy struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol NameIDPolicy"` |
||||
AllowCreate bool `xml:"AllowCreate,attr,omitempty"` |
||||
Format string `xml:"Format,attr,omitempty"` |
||||
} |
||||
|
||||
type requestAuthnContext struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol RequestAuthnContext"` |
||||
|
||||
AuthnContextClassRefs []authnContextClassRef |
||||
} |
||||
|
||||
type authnContextClassRef struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol AuthnContextClassRef"` |
||||
Value string `xml:",chardata"` |
||||
} |
||||
|
||||
type response struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol Response"` |
||||
|
||||
ID string `xml:"ID,attr"` |
||||
Version samlVersion `xml:"Version,attr"` |
||||
|
||||
Destination string `xml:"Destination,attr,omitempty"` |
||||
|
||||
Issuer *issuer `xml:"Issuer,omitempty"` |
||||
|
||||
// TODO(ericchiang): How do deal with multiple assertions?
|
||||
Assertion *assertion `xml:"Assertion,omitempty"` |
||||
} |
||||
|
||||
type assertion struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Assertion"` |
||||
|
||||
Version samlVersion `xml:"Version,attr"` |
||||
ID string `xml:"ID,attr"` |
||||
IssueInstance xmlTime `xml:"IssueInstance,attr"` |
||||
|
||||
Issuer issuer `xml:"Issuer"` |
||||
|
||||
Subject *subject `xml:"Subject,omitempty"` |
||||
|
||||
AttributeStatement *attributeStatement `xml:"AttributeStatement,omitempty"` |
||||
} |
||||
|
||||
type attributeStatement struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion AttributeStatement"` |
||||
|
||||
Attributes []attribute `xml:"Attribute"` |
||||
} |
||||
|
||||
func (a *attributeStatement) get(name string) (s string, ok bool) { |
||||
for _, attr := range a.Attributes { |
||||
if attr.Name == name { |
||||
ok = true |
||||
if len(attr.AttributeValues) > 0 { |
||||
return attr.AttributeValues[0].Value, true |
||||
} |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func (a *attributeStatement) all(name string) (s []string, ok bool) { |
||||
for _, attr := range a.Attributes { |
||||
if attr.Name == name { |
||||
ok = true |
||||
for _, val := range attr.AttributeValues { |
||||
s = append(s, val.Value) |
||||
} |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
type attribute struct { |
||||
XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Attribute"` |
||||
|
||||
Name string `xml:"Name,attr"` |
||||
|
||||
NameFormat string `xml:"NameFormat,attr,omitempty"` |
||||
FriendlyName string `xml:"FriendlyName,attr,omitempty"` |
||||
|
||||
AttributeValues []attributeValue `xml:"AttributeValue,omitempty"` |
||||
} |
||||
|
||||
type attributeValue struct { |
||||
XMLName xml.Name `xml:"AttributeValue"` |
||||
Value string `xml:",chardata"` |
||||
} |
||||
@ -0,0 +1,24 @@
|
||||
Copyright 2015 Brett Vickers. All rights reserved. |
||||
|
||||
Redistribution and use in source and binary forms, with or without |
||||
modification, are permitted provided that the following conditions |
||||
are met: |
||||
|
||||
1. Redistributions of source code must retain the above copyright |
||||
notice, this list of conditions and the following disclaimer. |
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright |
||||
notice, this list of conditions and the following disclaimer in the |
||||
documentation and/or other materials provided with the distribution. |
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY |
||||
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
||||
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR |
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
||||
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||
@ -0,0 +1,938 @@
|
||||
// Copyright 2015 Brett Vickers.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package etree provides XML services through an Element Tree
|
||||
// abstraction.
|
||||
package etree |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"encoding/xml" |
||||
"errors" |
||||
"io" |
||||
"os" |
||||
"strings" |
||||
) |
||||
|
||||
const ( |
||||
// NoIndent is used with Indent to disable all indenting.
|
||||
NoIndent = -1 |
||||
) |
||||
|
||||
// ErrXML is returned when XML parsing fails due to incorrect formatting.
|
||||
var ErrXML = errors.New("etree: invalid XML format") |
||||
|
||||
// ReadSettings allow for changing the default behavior of the ReadFrom*
|
||||
// methods.
|
||||
type ReadSettings struct { |
||||
// CharsetReader to be passed to standard xml.Decoder. Default: nil.
|
||||
CharsetReader func(charset string, input io.Reader) (io.Reader, error) |
||||
} |
||||
|
||||
// newReadSettings creates a default ReadSettings record.
|
||||
func newReadSettings() ReadSettings { |
||||
return ReadSettings{} |
||||
} |
||||
|
||||
// WriteSettings allow for changing the serialization behavior of the WriteTo*
|
||||
// methods.
|
||||
type WriteSettings struct { |
||||
// CanonicalEndTags forces the production of XML end tags, even for
|
||||
// elements that have no child elements. Default: false.
|
||||
CanonicalEndTags bool |
||||
|
||||
// CanonicalText forces the production of XML character references for
|
||||
// text data characters &, <, and >. If false, XML character references
|
||||
// are also produced for " and '. Default: false.
|
||||
CanonicalText bool |
||||
|
||||
// CanonicalAttrVal forces the production of XML character references for
|
||||
// attribute value characters &, < and ". If false, XML character
|
||||
// references are also produced for > and '. Default: false.
|
||||
CanonicalAttrVal bool |
||||
} |
||||
|
||||
// newWriteSettings creates a default WriteSettings record.
|
||||
func newWriteSettings() WriteSettings { |
||||
return WriteSettings{ |
||||
CanonicalEndTags: false, |
||||
CanonicalText: false, |
||||
CanonicalAttrVal: false, |
||||
} |
||||
} |
||||
|
||||
// A Token is an empty interface that represents an Element, CharData,
|
||||
// Comment, Directive, or ProcInst.
|
||||
type Token interface { |
||||
Parent() *Element |
||||
dup(parent *Element) Token |
||||
setParent(parent *Element) |
||||
writeTo(w *bufio.Writer, s *WriteSettings) |
||||
} |
||||
|
||||
// A Document is a container holding a complete XML hierarchy. Its embedded
|
||||
// element contains zero or more children, one of which is usually the root
|
||||
// element. The embedded element may include other children such as
|
||||
// processing instructions or BOM CharData tokens.
|
||||
type Document struct { |
||||
Element |
||||
ReadSettings ReadSettings |
||||
WriteSettings WriteSettings |
||||
} |
||||
|
||||
// An Element represents an XML element, its attributes, and its child tokens.
|
||||
type Element struct { |
||||
Space, Tag string // namespace and tag
|
||||
Attr []Attr // key-value attribute pairs
|
||||
Child []Token // child tokens (elements, comments, etc.)
|
||||
parent *Element // parent element
|
||||
} |
||||
|
||||
// An Attr represents a key-value attribute of an XML element.
|
||||
type Attr struct { |
||||
Space, Key string // The attribute's namespace and key
|
||||
Value string // The attribute value string
|
||||
} |
||||
|
||||
// CharData represents character data within XML.
|
||||
type CharData struct { |
||||
Data string |
||||
parent *Element |
||||
whitespace bool |
||||
} |
||||
|
||||
// A Comment represents an XML comment.
|
||||
type Comment struct { |
||||
Data string |
||||
parent *Element |
||||
} |
||||
|
||||
// A Directive represents an XML directive.
|
||||
type Directive struct { |
||||
Data string |
||||
parent *Element |
||||
} |
||||
|
||||
// A ProcInst represents an XML processing instruction.
|
||||
type ProcInst struct { |
||||
Target string |
||||
Inst string |
||||
parent *Element |
||||
} |
||||
|
||||
// NewDocument creates an XML document without a root element.
|
||||
func NewDocument() *Document { |
||||
return &Document{ |
||||
Element{Child: make([]Token, 0)}, |
||||
newReadSettings(), |
||||
newWriteSettings(), |
||||
} |
||||
} |
||||
|
||||
// Copy returns a recursive, deep copy of the document.
|
||||
func (d *Document) Copy() *Document { |
||||
return &Document{*(d.dup(nil).(*Element)), d.ReadSettings, d.WriteSettings} |
||||
} |
||||
|
||||
// Root returns the root element of the document, or nil if there is no root
|
||||
// element.
|
||||
func (d *Document) Root() *Element { |
||||
for _, t := range d.Child { |
||||
if c, ok := t.(*Element); ok { |
||||
return c |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// SetRoot replaces the document's root element with e. If the document
|
||||
// already has a root when this function is called, then the document's
|
||||
// original root is unbound first. If the element e is bound to another
|
||||
// document (or to another element within a document), then it is unbound
|
||||
// first.
|
||||
func (d *Document) SetRoot(e *Element) { |
||||
if e.parent != nil { |
||||
e.parent.RemoveChild(e) |
||||
} |
||||
e.setParent(&d.Element) |
||||
|
||||
for i, t := range d.Child { |
||||
if _, ok := t.(*Element); ok { |
||||
t.setParent(nil) |
||||
d.Child[i] = e |
||||
return |
||||
} |
||||
} |
||||
d.Child = append(d.Child, e) |
||||
} |
||||
|
||||
// ReadFrom reads XML from the reader r into the document d. It returns the
|
||||
// number of bytes read and any error encountered.
|
||||
func (d *Document) ReadFrom(r io.Reader) (n int64, err error) { |
||||
return d.Element.readFrom(r, d.ReadSettings.CharsetReader) |
||||
} |
||||
|
||||
// ReadFromFile reads XML from the string s into the document d.
|
||||
func (d *Document) ReadFromFile(filename string) error { |
||||
f, err := os.Open(filename) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer f.Close() |
||||
_, err = d.ReadFrom(f) |
||||
return err |
||||
} |
||||
|
||||
// ReadFromBytes reads XML from the byte slice b into the document d.
|
||||
func (d *Document) ReadFromBytes(b []byte) error { |
||||
_, err := d.ReadFrom(bytes.NewReader(b)) |
||||
return err |
||||
} |
||||
|
||||
// ReadFromString reads XML from the string s into the document d.
|
||||
func (d *Document) ReadFromString(s string) error { |
||||
_, err := d.ReadFrom(strings.NewReader(s)) |
||||
return err |
||||
} |
||||
|
||||
// WriteTo serializes an XML document into the writer w. It
|
||||
// returns the number of bytes written and any error encountered.
|
||||
func (d *Document) WriteTo(w io.Writer) (n int64, err error) { |
||||
cw := newCountWriter(w) |
||||
b := bufio.NewWriter(cw) |
||||
for _, c := range d.Child { |
||||
c.writeTo(b, &d.WriteSettings) |
||||
} |
||||
err, n = b.Flush(), cw.bytes |
||||
return |
||||
} |
||||
|
||||
// WriteToFile serializes an XML document into the file named
|
||||
// filename.
|
||||
func (d *Document) WriteToFile(filename string) error { |
||||
f, err := os.Create(filename) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer f.Close() |
||||
_, err = d.WriteTo(f) |
||||
return err |
||||
} |
||||
|
||||
// WriteToBytes serializes the XML document into a slice of
|
||||
// bytes.
|
||||
func (d *Document) WriteToBytes() (b []byte, err error) { |
||||
var buf bytes.Buffer |
||||
if _, err = d.WriteTo(&buf); err != nil { |
||||
return |
||||
} |
||||
return buf.Bytes(), nil |
||||
} |
||||
|
||||
// WriteToString serializes the XML document into a string.
|
||||
func (d *Document) WriteToString() (s string, err error) { |
||||
var b []byte |
||||
if b, err = d.WriteToBytes(); err != nil { |
||||
return |
||||
} |
||||
return string(b), nil |
||||
} |
||||
|
||||
type indentFunc func(depth int) string |
||||
|
||||
// Indent modifies the document's element tree by inserting CharData entities
|
||||
// containing carriage returns and indentation. The amount of indentation per
|
||||
// depth level is given as spaces. Pass etree.NoIndent for spaces if you want
|
||||
// no indentation at all.
|
||||
func (d *Document) Indent(spaces int) { |
||||
var indent indentFunc |
||||
switch { |
||||
case spaces < 0: |
||||
indent = func(depth int) string { return "" } |
||||
default: |
||||
indent = func(depth int) string { return crIndent(depth*spaces, crsp) } |
||||
} |
||||
d.Element.indent(0, indent) |
||||
} |
||||
|
||||
// IndentTabs modifies the document's element tree by inserting CharData
|
||||
// entities containing carriage returns and tabs for indentation. One tab is
|
||||
// used per indentation level.
|
||||
func (d *Document) IndentTabs() { |
||||
indent := func(depth int) string { return crIndent(depth, crtab) } |
||||
d.Element.indent(0, indent) |
||||
} |
||||
|
||||
// NewElement creates an unparented element with the specified tag. The tag
|
||||
// may be prefixed by a namespace and a colon.
|
||||
func NewElement(tag string) *Element { |
||||
space, stag := spaceDecompose(tag) |
||||
return newElement(space, stag, nil) |
||||
} |
||||
|
||||
// newElement is a helper function that creates an element and binds it to
|
||||
// a parent element if possible.
|
||||
func newElement(space, tag string, parent *Element) *Element { |
||||
e := &Element{ |
||||
Space: space, |
||||
Tag: tag, |
||||
Attr: make([]Attr, 0), |
||||
Child: make([]Token, 0), |
||||
parent: parent, |
||||
} |
||||
if parent != nil { |
||||
parent.addChild(e) |
||||
} |
||||
return e |
||||
} |
||||
|
||||
// Copy creates a recursive, deep copy of the element and all its attributes
|
||||
// and children. The returned element has no parent but can be parented to a
|
||||
// another element using AddElement, or to a document using SetRoot.
|
||||
func (e *Element) Copy() *Element { |
||||
var parent *Element |
||||
return e.dup(parent).(*Element) |
||||
} |
||||
|
||||
// Text returns the characters immediately following the element's
|
||||
// opening tag.
|
||||
func (e *Element) Text() string { |
||||
if len(e.Child) == 0 { |
||||
return "" |
||||
} |
||||
if cd, ok := e.Child[0].(*CharData); ok { |
||||
return cd.Data |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// SetText replaces an element's subsidiary CharData text with a new string.
|
||||
func (e *Element) SetText(text string) { |
||||
if len(e.Child) > 0 { |
||||
if cd, ok := e.Child[0].(*CharData); ok { |
||||
cd.Data = text |
||||
return |
||||
} |
||||
} |
||||
cd := newCharData(text, false, e) |
||||
copy(e.Child[1:], e.Child[0:]) |
||||
e.Child[0] = cd |
||||
} |
||||
|
||||
// CreateElement creates an element with the specified tag and adds it as the
|
||||
// last child element of the element e. The tag may be prefixed by a namespace
|
||||
// and a colon.
|
||||
func (e *Element) CreateElement(tag string) *Element { |
||||
space, stag := spaceDecompose(tag) |
||||
return newElement(space, stag, e) |
||||
} |
||||
|
||||
// AddChild adds the token t as the last child of element e. If token t was
|
||||
// already the child of another element, it is first removed from its current
|
||||
// parent element.
|
||||
func (e *Element) AddChild(t Token) { |
||||
if t.Parent() != nil { |
||||
t.Parent().RemoveChild(t) |
||||
} |
||||
t.setParent(e) |
||||
e.addChild(t) |
||||
} |
||||
|
||||
// InsertChild inserts the token t before e's existing child token ex. If ex
|
||||
// is nil (or if ex is not a child of e), then t is added to the end of e's
|
||||
// child token list. If token t was already the child of another element, it
|
||||
// is first removed from its current parent element.
|
||||
func (e *Element) InsertChild(ex Token, t Token) { |
||||
if t.Parent() != nil { |
||||
t.Parent().RemoveChild(t) |
||||
} |
||||
t.setParent(e) |
||||
|
||||
for i, c := range e.Child { |
||||
if c == ex { |
||||
e.Child = append(e.Child, nil) |
||||
copy(e.Child[i+1:], e.Child[i:]) |
||||
e.Child[i] = t |
||||
return |
||||
} |
||||
} |
||||
e.addChild(t) |
||||
} |
||||
|
||||
// RemoveChild attempts to remove the token t from element e's list of
|
||||
// children. If the token t is a child of e, then it is returned. Otherwise,
|
||||
// nil is returned.
|
||||
func (e *Element) RemoveChild(t Token) Token { |
||||
for i, c := range e.Child { |
||||
if c == t { |
||||
e.Child = append(e.Child[:i], e.Child[i+1:]...) |
||||
c.setParent(nil) |
||||
return t |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ReadFrom reads XML from the reader r and stores the result as a new child
|
||||
// of element e.
|
||||
func (e *Element) readFrom(ri io.Reader, charsetReader func(charset string, input io.Reader) (io.Reader, error)) (n int64, err error) { |
||||
r := newCountReader(ri) |
||||
dec := xml.NewDecoder(r) |
||||
dec.CharsetReader = charsetReader |
||||
var stack stack |
||||
stack.push(e) |
||||
for { |
||||
t, err := dec.RawToken() |
||||
switch { |
||||
case err == io.EOF: |
||||
return r.bytes, nil |
||||
case err != nil: |
||||
return r.bytes, err |
||||
case stack.empty(): |
||||
return r.bytes, ErrXML |
||||
} |
||||
|
||||
top := stack.peek().(*Element) |
||||
|
||||
switch t := t.(type) { |
||||
case xml.StartElement: |
||||
e := newElement(t.Name.Space, t.Name.Local, top) |
||||
for _, a := range t.Attr { |
||||
e.createAttr(a.Name.Space, a.Name.Local, a.Value) |
||||
} |
||||
stack.push(e) |
||||
case xml.EndElement: |
||||
stack.pop() |
||||
case xml.CharData: |
||||
data := string(t) |
||||
newCharData(data, isWhitespace(data), top) |
||||
case xml.Comment: |
||||
newComment(string(t), top) |
||||
case xml.Directive: |
||||
newDirective(string(t), top) |
||||
case xml.ProcInst: |
||||
newProcInst(t.Target, string(t.Inst), top) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// SelectAttr finds an element attribute matching the requested key and
|
||||
// returns it if found. The key may be prefixed by a namespace and a colon.
|
||||
func (e *Element) SelectAttr(key string) *Attr { |
||||
space, skey := spaceDecompose(key) |
||||
for i, a := range e.Attr { |
||||
if spaceMatch(space, a.Space) && skey == a.Key { |
||||
return &e.Attr[i] |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// SelectAttrValue finds an element attribute matching the requested key and
|
||||
// returns its value if found. The key may be prefixed by a namespace and a
|
||||
// colon. If the key is not found, the dflt value is returned instead.
|
||||
func (e *Element) SelectAttrValue(key, dflt string) string { |
||||
space, skey := spaceDecompose(key) |
||||
for _, a := range e.Attr { |
||||
if spaceMatch(space, a.Space) && skey == a.Key { |
||||
return a.Value |
||||
} |
||||
} |
||||
return dflt |
||||
} |
||||
|
||||
// ChildElements returns all elements that are children of element e.
|
||||
func (e *Element) ChildElements() []*Element { |
||||
var elements []*Element |
||||
for _, t := range e.Child { |
||||
if c, ok := t.(*Element); ok { |
||||
elements = append(elements, c) |
||||
} |
||||
} |
||||
return elements |
||||
} |
||||
|
||||
// SelectElement returns the first child element with the given tag. The tag
|
||||
// may be prefixed by a namespace and a colon.
|
||||
func (e *Element) SelectElement(tag string) *Element { |
||||
space, stag := spaceDecompose(tag) |
||||
for _, t := range e.Child { |
||||
if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { |
||||
return c |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// SelectElements returns a slice of all child elements with the given tag.
|
||||
// The tag may be prefixed by a namespace and a colon.
|
||||
func (e *Element) SelectElements(tag string) []*Element { |
||||
space, stag := spaceDecompose(tag) |
||||
var elements []*Element |
||||
for _, t := range e.Child { |
||||
if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { |
||||
elements = append(elements, c) |
||||
} |
||||
} |
||||
return elements |
||||
} |
||||
|
||||
// FindElement returns the first element matched by the XPath-like path
|
||||
// string. Panics if an invalid path string is supplied.
|
||||
func (e *Element) FindElement(path string) *Element { |
||||
return e.FindElementPath(MustCompilePath(path)) |
||||
} |
||||
|
||||
// FindElementPath returns the first element matched by the XPath-like path
|
||||
// string.
|
||||
func (e *Element) FindElementPath(path Path) *Element { |
||||
p := newPather() |
||||
elements := p.traverse(e, path) |
||||
switch { |
||||
case len(elements) > 0: |
||||
return elements[0] |
||||
default: |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// FindElements returns a slice of elements matched by the XPath-like path
|
||||
// string. Panics if an invalid path string is supplied.
|
||||
func (e *Element) FindElements(path string) []*Element { |
||||
return e.FindElementsPath(MustCompilePath(path)) |
||||
} |
||||
|
||||
// FindElementsPath returns a slice of elements matched by the Path object.
|
||||
func (e *Element) FindElementsPath(path Path) []*Element { |
||||
p := newPather() |
||||
return p.traverse(e, path) |
||||
} |
||||
|
||||
// indent recursively inserts proper indentation between an
|
||||
// XML element's child tokens.
|
||||
func (e *Element) indent(depth int, indent indentFunc) { |
||||
e.stripIndent() |
||||
n := len(e.Child) |
||||
if n == 0 { |
||||
return |
||||
} |
||||
|
||||
oldChild := e.Child |
||||
e.Child = make([]Token, 0, n*2+1) |
||||
isCharData, firstNonCharData := false, true |
||||
for _, c := range oldChild { |
||||
|
||||
// Insert CR+indent before child if it's not character data.
|
||||
// Exceptions: when it's the first non-character-data child, or when
|
||||
// the child is at root depth.
|
||||
_, isCharData = c.(*CharData) |
||||
if !isCharData { |
||||
if !firstNonCharData || depth > 0 { |
||||
newCharData(indent(depth), true, e) |
||||
} |
||||
firstNonCharData = false |
||||
} |
||||
|
||||
e.addChild(c) |
||||
|
||||
// Recursively process child elements.
|
||||
if ce, ok := c.(*Element); ok { |
||||
ce.indent(depth+1, indent) |
||||
} |
||||
} |
||||
|
||||
// Insert CR+indent before the last child.
|
||||
if !isCharData { |
||||
if !firstNonCharData || depth > 0 { |
||||
newCharData(indent(depth-1), true, e) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// stripIndent removes any previously inserted indentation.
|
||||
func (e *Element) stripIndent() { |
||||
// Count the number of non-indent child tokens
|
||||
n := len(e.Child) |
||||
for _, c := range e.Child { |
||||
if cd, ok := c.(*CharData); ok && cd.whitespace { |
||||
n-- |
||||
} |
||||
} |
||||
if n == len(e.Child) { |
||||
return |
||||
} |
||||
|
||||
// Strip out indent CharData
|
||||
newChild := make([]Token, n) |
||||
j := 0 |
||||
for _, c := range e.Child { |
||||
if cd, ok := c.(*CharData); ok && cd.whitespace { |
||||
continue |
||||
} |
||||
newChild[j] = c |
||||
j++ |
||||
} |
||||
e.Child = newChild |
||||
} |
||||
|
||||
// dup duplicates the element.
|
||||
func (e *Element) dup(parent *Element) Token { |
||||
ne := &Element{ |
||||
Space: e.Space, |
||||
Tag: e.Tag, |
||||
Attr: make([]Attr, len(e.Attr)), |
||||
Child: make([]Token, len(e.Child)), |
||||
parent: parent, |
||||
} |
||||
for i, t := range e.Child { |
||||
ne.Child[i] = t.dup(ne) |
||||
} |
||||
for i, a := range e.Attr { |
||||
ne.Attr[i] = a |
||||
} |
||||
return ne |
||||
} |
||||
|
||||
// Parent returns the element token's parent element, or nil if it has no
|
||||
// parent.
|
||||
func (e *Element) Parent() *Element { |
||||
return e.parent |
||||
} |
||||
|
||||
// setParent replaces the element token's parent.
|
||||
func (e *Element) setParent(parent *Element) { |
||||
e.parent = parent |
||||
} |
||||
|
||||
// writeTo serializes the element to the writer w.
|
||||
func (e *Element) writeTo(w *bufio.Writer, s *WriteSettings) { |
||||
w.WriteByte('<') |
||||
if e.Space != "" { |
||||
w.WriteString(e.Space) |
||||
w.WriteByte(':') |
||||
} |
||||
w.WriteString(e.Tag) |
||||
for _, a := range e.Attr { |
||||
w.WriteByte(' ') |
||||
a.writeTo(w, s) |
||||
} |
||||
if len(e.Child) > 0 { |
||||
w.WriteString(">") |
||||
for _, c := range e.Child { |
||||
c.writeTo(w, s) |
||||
} |
||||
w.Write([]byte{'<', '/'}) |
||||
if e.Space != "" { |
||||
w.WriteString(e.Space) |
||||
w.WriteByte(':') |
||||
} |
||||
w.WriteString(e.Tag) |
||||
w.WriteByte('>') |
||||
} else { |
||||
if s.CanonicalEndTags { |
||||
w.Write([]byte{'>', '<', '/'}) |
||||
if e.Space != "" { |
||||
w.WriteString(e.Space) |
||||
w.WriteByte(':') |
||||
} |
||||
w.WriteString(e.Tag) |
||||
w.WriteByte('>') |
||||
} else { |
||||
w.Write([]byte{'/', '>'}) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// addChild adds a child token to the element e.
|
||||
func (e *Element) addChild(t Token) { |
||||
e.Child = append(e.Child, t) |
||||
} |
||||
|
||||
// CreateAttr creates an attribute and adds it to element e. The key may be
|
||||
// prefixed by a namespace and a colon. If an attribute with the key already
|
||||
// exists, its value is replaced.
|
||||
func (e *Element) CreateAttr(key, value string) *Attr { |
||||
space, skey := spaceDecompose(key) |
||||
return e.createAttr(space, skey, value) |
||||
} |
||||
|
||||
// createAttr is a helper function that creates attributes.
|
||||
func (e *Element) createAttr(space, key, value string) *Attr { |
||||
for i, a := range e.Attr { |
||||
if space == a.Space && key == a.Key { |
||||
e.Attr[i].Value = value |
||||
return &e.Attr[i] |
||||
} |
||||
} |
||||
a := Attr{space, key, value} |
||||
e.Attr = append(e.Attr, a) |
||||
return &e.Attr[len(e.Attr)-1] |
||||
} |
||||
|
||||
// RemoveAttr removes and returns the first attribute of the element whose key
|
||||
// matches the given key. The key may be prefixed by a namespace and a colon.
|
||||
// If an equal attribute does not exist, nil is returned.
|
||||
func (e *Element) RemoveAttr(key string) *Attr { |
||||
space, skey := spaceDecompose(key) |
||||
for i, a := range e.Attr { |
||||
if space == a.Space && skey == a.Key { |
||||
e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...) |
||||
return &a |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
var xmlReplacerNormal = strings.NewReplacer( |
||||
"&", "&", |
||||
"<", "<", |
||||
">", ">", |
||||
"'", "'", |
||||
`"`, """, |
||||
) |
||||
|
||||
var xmlReplacerCanonicalText = strings.NewReplacer( |
||||
"&", "&", |
||||
"<", "<", |
||||
">", ">", |
||||
"\r", "
", |
||||
) |
||||
|
||||
var xmlReplacerCanonicalAttrVal = strings.NewReplacer( |
||||
"&", "&", |
||||
"<", "<", |
||||
`"`, """, |
||||
"\t", "	", |
||||
"\n", "
", |
||||
"\r", "
", |
||||
) |
||||
|
||||
// writeTo serializes the attribute to the writer.
|
||||
func (a *Attr) writeTo(w *bufio.Writer, s *WriteSettings) { |
||||
if a.Space != "" { |
||||
w.WriteString(a.Space) |
||||
w.WriteByte(':') |
||||
} |
||||
w.WriteString(a.Key) |
||||
w.WriteString(`="`) |
||||
var r *strings.Replacer |
||||
if s.CanonicalAttrVal { |
||||
r = xmlReplacerCanonicalAttrVal |
||||
} else { |
||||
r = xmlReplacerNormal |
||||
} |
||||
w.WriteString(r.Replace(a.Value)) |
||||
w.WriteByte('"') |
||||
} |
||||
|
||||
// NewCharData creates a parentless XML character data entity.
|
||||
func NewCharData(data string) *CharData { |
||||
return newCharData(data, false, nil) |
||||
} |
||||
|
||||
// newCharData creates an XML character data entity and binds it to a parent
|
||||
// element. If parent is nil, the CharData token remains unbound.
|
||||
func newCharData(data string, whitespace bool, parent *Element) *CharData { |
||||
c := &CharData{ |
||||
Data: data, |
||||
whitespace: whitespace, |
||||
parent: parent, |
||||
} |
||||
if parent != nil { |
||||
parent.addChild(c) |
||||
} |
||||
return c |
||||
} |
||||
|
||||
// CreateCharData creates an XML character data entity and adds it as a child
|
||||
// of element e.
|
||||
func (e *Element) CreateCharData(data string) *CharData { |
||||
return newCharData(data, false, e) |
||||
} |
||||
|
||||
// dup duplicates the character data.
|
||||
func (c *CharData) dup(parent *Element) Token { |
||||
return &CharData{ |
||||
Data: c.Data, |
||||
whitespace: c.whitespace, |
||||
parent: parent, |
||||
} |
||||
} |
||||
|
||||
// Parent returns the character data token's parent element, or nil if it has
|
||||
// no parent.
|
||||
func (c *CharData) Parent() *Element { |
||||
return c.parent |
||||
} |
||||
|
||||
// setParent replaces the character data token's parent.
|
||||
func (c *CharData) setParent(parent *Element) { |
||||
c.parent = parent |
||||
} |
||||
|
||||
// writeTo serializes the character data entity to the writer.
|
||||
func (c *CharData) writeTo(w *bufio.Writer, s *WriteSettings) { |
||||
var r *strings.Replacer |
||||
if s.CanonicalText { |
||||
r = xmlReplacerCanonicalText |
||||
} else { |
||||
r = xmlReplacerNormal |
||||
} |
||||
w.WriteString(r.Replace(c.Data)) |
||||
} |
||||
|
||||
// NewComment creates a parentless XML comment.
|
||||
func NewComment(comment string) *Comment { |
||||
return newComment(comment, nil) |
||||
} |
||||
|
||||
// NewComment creates an XML comment and binds it to a parent element. If
|
||||
// parent is nil, the Comment remains unbound.
|
||||
func newComment(comment string, parent *Element) *Comment { |
||||
c := &Comment{ |
||||
Data: comment, |
||||
parent: parent, |
||||
} |
||||
if parent != nil { |
||||
parent.addChild(c) |
||||
} |
||||
return c |
||||
} |
||||
|
||||
// CreateComment creates an XML comment and adds it as a child of element e.
|
||||
func (e *Element) CreateComment(comment string) *Comment { |
||||
return newComment(comment, e) |
||||
} |
||||
|
||||
// dup duplicates the comment.
|
||||
func (c *Comment) dup(parent *Element) Token { |
||||
return &Comment{ |
||||
Data: c.Data, |
||||
parent: parent, |
||||
} |
||||
} |
||||
|
||||
// Parent returns comment token's parent element, or nil if it has no parent.
|
||||
func (c *Comment) Parent() *Element { |
||||
return c.parent |
||||
} |
||||
|
||||
// setParent replaces the comment token's parent.
|
||||
func (c *Comment) setParent(parent *Element) { |
||||
c.parent = parent |
||||
} |
||||
|
||||
// writeTo serialies the comment to the writer.
|
||||
func (c *Comment) writeTo(w *bufio.Writer, s *WriteSettings) { |
||||
w.WriteString("<!--") |
||||
w.WriteString(c.Data) |
||||
w.WriteString("-->") |
||||
} |
||||
|
||||
// NewDirective creates a parentless XML directive.
|
||||
func NewDirective(data string) *Directive { |
||||
return newDirective(data, nil) |
||||
} |
||||
|
||||
// newDirective creates an XML directive and binds it to a parent element. If
|
||||
// parent is nil, the Directive remains unbound.
|
||||
func newDirective(data string, parent *Element) *Directive { |
||||
d := &Directive{ |
||||
Data: data, |
||||
parent: parent, |
||||
} |
||||
if parent != nil { |
||||
parent.addChild(d) |
||||
} |
||||
return d |
||||
} |
||||
|
||||
// CreateDirective creates an XML directive and adds it as the last child of
|
||||
// element e.
|
||||
func (e *Element) CreateDirective(data string) *Directive { |
||||
return newDirective(data, e) |
||||
} |
||||
|
||||
// dup duplicates the directive.
|
||||
func (d *Directive) dup(parent *Element) Token { |
||||
return &Directive{ |
||||
Data: d.Data, |
||||
parent: parent, |
||||
} |
||||
} |
||||
|
||||
// Parent returns directive token's parent element, or nil if it has no
|
||||
// parent.
|
||||
func (d *Directive) Parent() *Element { |
||||
return d.parent |
||||
} |
||||
|
||||
// setParent replaces the directive token's parent.
|
||||
func (d *Directive) setParent(parent *Element) { |
||||
d.parent = parent |
||||
} |
||||
|
||||
// writeTo serializes the XML directive to the writer.
|
||||
func (d *Directive) writeTo(w *bufio.Writer, s *WriteSettings) { |
||||
w.WriteString("<!") |
||||
w.WriteString(d.Data) |
||||
w.WriteString(">") |
||||
} |
||||
|
||||
// NewProcInst creates a parentless XML processing instruction.
|
||||
func NewProcInst(target, inst string) *ProcInst { |
||||
return newProcInst(target, inst, nil) |
||||
} |
||||
|
||||
// newProcInst creates an XML processing instruction and binds it to a parent
|
||||
// element. If parent is nil, the ProcInst remains unbound.
|
||||
func newProcInst(target, inst string, parent *Element) *ProcInst { |
||||
p := &ProcInst{ |
||||
Target: target, |
||||
Inst: inst, |
||||
parent: parent, |
||||
} |
||||
if parent != nil { |
||||
parent.addChild(p) |
||||
} |
||||
return p |
||||
} |
||||
|
||||
// CreateProcInst creates a processing instruction and adds it as a child of
|
||||
// element e.
|
||||
func (e *Element) CreateProcInst(target, inst string) *ProcInst { |
||||
return newProcInst(target, inst, e) |
||||
} |
||||
|
||||
// dup duplicates the procinst.
|
||||
func (p *ProcInst) dup(parent *Element) Token { |
||||
return &ProcInst{ |
||||
Target: p.Target, |
||||
Inst: p.Inst, |
||||
parent: parent, |
||||
} |
||||
} |
||||
|
||||
// Parent returns processing instruction token's parent element, or nil if it
|
||||
// has no parent.
|
||||
func (p *ProcInst) Parent() *Element { |
||||
return p.parent |
||||
} |
||||
|
||||
// setParent replaces the processing instruction token's parent.
|
||||
func (p *ProcInst) setParent(parent *Element) { |
||||
p.parent = parent |
||||
} |
||||
|
||||
// writeTo serializes the processing instruction to the writer.
|
||||
func (p *ProcInst) writeTo(w *bufio.Writer, s *WriteSettings) { |
||||
w.WriteString("<?") |
||||
w.WriteString(p.Target) |
||||
if p.Inst != "" { |
||||
w.WriteByte(' ') |
||||
w.WriteString(p.Inst) |
||||
} |
||||
w.WriteString("?>") |
||||
} |
||||
@ -0,0 +1,188 @@
|
||||
// Copyright 2015 Brett Vickers.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package etree |
||||
|
||||
import ( |
||||
"io" |
||||
"strings" |
||||
) |
||||
|
||||
// A simple stack
|
||||
type stack struct { |
||||
data []interface{} |
||||
} |
||||
|
||||
func (s *stack) empty() bool { |
||||
return len(s.data) == 0 |
||||
} |
||||
|
||||
func (s *stack) push(value interface{}) { |
||||
s.data = append(s.data, value) |
||||
} |
||||
|
||||
func (s *stack) pop() interface{} { |
||||
value := s.data[len(s.data)-1] |
||||
s.data[len(s.data)-1] = nil |
||||
s.data = s.data[:len(s.data)-1] |
||||
return value |
||||
} |
||||
|
||||
func (s *stack) peek() interface{} { |
||||
return s.data[len(s.data)-1] |
||||
} |
||||
|
||||
// A fifo is a simple first-in-first-out queue.
|
||||
type fifo struct { |
||||
data []interface{} |
||||
head, tail int |
||||
} |
||||
|
||||
func (f *fifo) add(value interface{}) { |
||||
if f.len()+1 >= len(f.data) { |
||||
f.grow() |
||||
} |
||||
f.data[f.tail] = value |
||||
if f.tail++; f.tail == len(f.data) { |
||||
f.tail = 0 |
||||
} |
||||
} |
||||
|
||||
func (f *fifo) remove() interface{} { |
||||
value := f.data[f.head] |
||||
f.data[f.head] = nil |
||||
if f.head++; f.head == len(f.data) { |
||||
f.head = 0 |
||||
} |
||||
return value |
||||
} |
||||
|
||||
func (f *fifo) len() int { |
||||
if f.tail >= f.head { |
||||
return f.tail - f.head |
||||
} |
||||
return len(f.data) - f.head + f.tail |
||||
} |
||||
|
||||
func (f *fifo) grow() { |
||||
c := len(f.data) * 2 |
||||
if c == 0 { |
||||
c = 4 |
||||
} |
||||
buf, count := make([]interface{}, c), f.len() |
||||
if f.tail >= f.head { |
||||
copy(buf[0:count], f.data[f.head:f.tail]) |
||||
} else { |
||||
hindex := len(f.data) - f.head |
||||
copy(buf[0:hindex], f.data[f.head:]) |
||||
copy(buf[hindex:count], f.data[:f.tail]) |
||||
} |
||||
f.data, f.head, f.tail = buf, 0, count |
||||
} |
||||
|
||||
// countReader implements a proxy reader that counts the number of
|
||||
// bytes read from its encapsulated reader.
|
||||
type countReader struct { |
||||
r io.Reader |
||||
bytes int64 |
||||
} |
||||
|
||||
func newCountReader(r io.Reader) *countReader { |
||||
return &countReader{r: r} |
||||
} |
||||
|
||||
func (cr *countReader) Read(p []byte) (n int, err error) { |
||||
b, err := cr.r.Read(p) |
||||
cr.bytes += int64(b) |
||||
return b, err |
||||
} |
||||
|
||||
// countWriter implements a proxy writer that counts the number of
|
||||
// bytes written by its encapsulated writer.
|
||||
type countWriter struct { |
||||
w io.Writer |
||||
bytes int64 |
||||
} |
||||
|
||||
func newCountWriter(w io.Writer) *countWriter { |
||||
return &countWriter{w: w} |
||||
} |
||||
|
||||
func (cw *countWriter) Write(p []byte) (n int, err error) { |
||||
b, err := cw.w.Write(p) |
||||
cw.bytes += int64(b) |
||||
return b, err |
||||
} |
||||
|
||||
// isWhitespace returns true if the byte slice contains only
|
||||
// whitespace characters.
|
||||
func isWhitespace(s string) bool { |
||||
for i := 0; i < len(s); i++ { |
||||
if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// spaceMatch returns true if namespace a is the empty string
|
||||
// or if namespace a equals namespace b.
|
||||
func spaceMatch(a, b string) bool { |
||||
switch { |
||||
case a == "": |
||||
return true |
||||
default: |
||||
return a == b |
||||
} |
||||
} |
||||
|
||||
// spaceDecompose breaks a namespace:tag identifier at the ':'
|
||||
// and returns the two parts.
|
||||
func spaceDecompose(str string) (space, key string) { |
||||
colon := strings.IndexByte(str, ':') |
||||
if colon == -1 { |
||||
return "", str |
||||
} |
||||
return str[:colon], str[colon+1:] |
||||
} |
||||
|
||||
// Strings used by crIndent
|
||||
const ( |
||||
crsp = "\n " |
||||
crtab = "\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t" |
||||
) |
||||
|
||||
// crIndent returns a carriage return followed by n copies of the
|
||||
// first non-CR character in the source string.
|
||||
func crIndent(n int, source string) string { |
||||
switch { |
||||
case n < 0: |
||||
return source[:1] |
||||
case n < len(source): |
||||
return source[:n+1] |
||||
default: |
||||
return source + strings.Repeat(source[1:2], n-len(source)+1) |
||||
} |
||||
} |
||||
|
||||
// nextIndex returns the index of the next occurrence of sep in s,
|
||||
// starting from offset. It returns -1 if the sep string is not found.
|
||||
func nextIndex(s, sep string, offset int) int { |
||||
switch i := strings.Index(s[offset:], sep); i { |
||||
case -1: |
||||
return -1 |
||||
default: |
||||
return offset + i |
||||
} |
||||
} |
||||
|
||||
// isInteger returns true if the string s contains an integer.
|
||||
func isInteger(s string) bool { |
||||
for i := 0; i < len(s); i++ { |
||||
if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
@ -0,0 +1,470 @@
|
||||
// Copyright 2015 Brett Vickers.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package etree |
||||
|
||||
import ( |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
/* |
||||
A Path is an object that represents an optimized version of an |
||||
XPath-like search string. Although path strings are XPath-like, |
||||
only the following limited syntax is supported: |
||||
|
||||
. Selects the current element |
||||
.. Selects the parent of the current element |
||||
* Selects all child elements |
||||
// Selects all descendants of the current element
|
||||
tag Selects all child elements with the given tag |
||||
[#] Selects the element of the given index (1-based, |
||||
negative starts from the end) |
||||
[@attrib] Selects all elements with the given attribute |
||||
[@attrib='val'] Selects all elements with the given attribute set to val |
||||
[tag] Selects all elements with a child element named tag |
||||
[tag='val'] Selects all elements with a child element named tag |
||||
and text equal to val |
||||
|
||||
Examples: |
||||
|
||||
Select the title elements of all descendant book elements having a |
||||
'category' attribute of 'WEB': |
||||
//book[@category='WEB']/title
|
||||
|
||||
Select the first book element with a title child containing the text |
||||
'Great Expectations': |
||||
.//book[title='Great Expectations'][1]
|
||||
|
||||
Starting from the current element, select all children of book elements |
||||
with an attribute 'language' set to 'english': |
||||
./book/*[@language='english'] |
||||
|
||||
Select all descendant book elements whose title element has an attribute |
||||
'language' set to 'french': |
||||
//book/title[@language='french']/..
|
||||
*/ |
||||
type Path struct { |
||||
segments []segment |
||||
} |
||||
|
||||
// ErrPath is returned by path functions when an invalid etree path is provided.
|
||||
type ErrPath string |
||||
|
||||
// Error returns the string describing a path error.
|
||||
func (err ErrPath) Error() string { |
||||
return "etree: " + string(err) |
||||
} |
||||
|
||||
// CompilePath creates an optimized version of an XPath-like string that
|
||||
// can be used to query elements in an element tree.
|
||||
func CompilePath(path string) (Path, error) { |
||||
var comp compiler |
||||
segments := comp.parsePath(path) |
||||
if comp.err != ErrPath("") { |
||||
return Path{nil}, comp.err |
||||
} |
||||
return Path{segments}, nil |
||||
} |
||||
|
||||
// MustCompilePath creates an optimized version of an XPath-like string that
|
||||
// can be used to query elements in an element tree. Panics if an error
|
||||
// occurs. Use this function to create Paths when you know the path is
|
||||
// valid (i.e., if it's hard-coded).
|
||||
func MustCompilePath(path string) Path { |
||||
p, err := CompilePath(path) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return p |
||||
} |
||||
|
||||
// A segment is a portion of a path between "/" characters.
|
||||
// It contains one selector and zero or more [filters].
|
||||
type segment struct { |
||||
sel selector |
||||
filters []filter |
||||
} |
||||
|
||||
func (seg *segment) apply(e *Element, p *pather) { |
||||
seg.sel.apply(e, p) |
||||
for _, f := range seg.filters { |
||||
f.apply(p) |
||||
} |
||||
} |
||||
|
||||
// A selector selects XML elements for consideration by the
|
||||
// path traversal.
|
||||
type selector interface { |
||||
apply(e *Element, p *pather) |
||||
} |
||||
|
||||
// A filter pares down a list of candidate XML elements based
|
||||
// on a path filter in [brackets].
|
||||
type filter interface { |
||||
apply(p *pather) |
||||
} |
||||
|
||||
// A pather is helper object that traverses an element tree using
|
||||
// a Path object. It collects and deduplicates all elements matching
|
||||
// the path query.
|
||||
type pather struct { |
||||
queue fifo |
||||
results []*Element |
||||
inResults map[*Element]bool |
||||
candidates []*Element |
||||
scratch []*Element // used by filters
|
||||
} |
||||
|
||||
// A node represents an element and the remaining path segments that
|
||||
// should be applied against it by the pather.
|
||||
type node struct { |
||||
e *Element |
||||
segments []segment |
||||
} |
||||
|
||||
func newPather() *pather { |
||||
return &pather{ |
||||
results: make([]*Element, 0), |
||||
inResults: make(map[*Element]bool), |
||||
candidates: make([]*Element, 0), |
||||
scratch: make([]*Element, 0), |
||||
} |
||||
} |
||||
|
||||
// traverse follows the path from the element e, collecting
|
||||
// and then returning all elements that match the path's selectors
|
||||
// and filters.
|
||||
func (p *pather) traverse(e *Element, path Path) []*Element { |
||||
for p.queue.add(node{e, path.segments}); p.queue.len() > 0; { |
||||
p.eval(p.queue.remove().(node)) |
||||
} |
||||
return p.results |
||||
} |
||||
|
||||
// eval evalutes the current path node by applying the remaining
|
||||
// path's selector rules against the node's element.
|
||||
func (p *pather) eval(n node) { |
||||
p.candidates = p.candidates[0:0] |
||||
seg, remain := n.segments[0], n.segments[1:] |
||||
seg.apply(n.e, p) |
||||
|
||||
if len(remain) == 0 { |
||||
for _, c := range p.candidates { |
||||
if in := p.inResults[c]; !in { |
||||
p.inResults[c] = true |
||||
p.results = append(p.results, c) |
||||
} |
||||
} |
||||
} else { |
||||
for _, c := range p.candidates { |
||||
p.queue.add(node{c, remain}) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// A compiler generates a compiled path from a path string.
|
||||
type compiler struct { |
||||
err ErrPath |
||||
} |
||||
|
||||
// parsePath parses an XPath-like string describing a path
|
||||
// through an element tree and returns a slice of segment
|
||||
// descriptors.
|
||||
func (c *compiler) parsePath(path string) []segment { |
||||
// If path starts or ends with //, fix it
|
||||
if strings.HasPrefix(path, "//") { |
||||
path = "." + path |
||||
} |
||||
if strings.HasSuffix(path, "//") { |
||||
path = path + "*" |
||||
} |
||||
|
||||
// Paths cannot be absolute
|
||||
if strings.HasPrefix(path, "/") { |
||||
c.err = ErrPath("paths cannot be absolute.") |
||||
return nil |
||||
} |
||||
|
||||
// Split path into segment objects
|
||||
var segments []segment |
||||
for _, s := range splitPath(path) { |
||||
segments = append(segments, c.parseSegment(s)) |
||||
if c.err != ErrPath("") { |
||||
break |
||||
} |
||||
} |
||||
return segments |
||||
} |
||||
|
||||
func splitPath(path string) []string { |
||||
pieces := make([]string, 0) |
||||
start := 0 |
||||
inquote := false |
||||
for i := 0; i+1 <= len(path); i++ { |
||||
if path[i] == '\'' { |
||||
inquote = !inquote |
||||
} else if path[i] == '/' && !inquote { |
||||
pieces = append(pieces, path[start:i]) |
||||
start = i + 1 |
||||
} |
||||
} |
||||
return append(pieces, path[start:]) |
||||
} |
||||
|
||||
// parseSegment parses a path segment between / characters.
|
||||
func (c *compiler) parseSegment(path string) segment { |
||||
pieces := strings.Split(path, "[") |
||||
seg := segment{ |
||||
sel: c.parseSelector(pieces[0]), |
||||
filters: make([]filter, 0), |
||||
} |
||||
for i := 1; i < len(pieces); i++ { |
||||
fpath := pieces[i] |
||||
if fpath[len(fpath)-1] != ']' { |
||||
c.err = ErrPath("path has invalid filter [brackets].") |
||||
break |
||||
} |
||||
seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1])) |
||||
} |
||||
return seg |
||||
} |
||||
|
||||
// parseSelector parses a selector at the start of a path segment.
|
||||
func (c *compiler) parseSelector(path string) selector { |
||||
switch path { |
||||
case ".": |
||||
return new(selectSelf) |
||||
case "..": |
||||
return new(selectParent) |
||||
case "*": |
||||
return new(selectChildren) |
||||
case "": |
||||
return new(selectDescendants) |
||||
default: |
||||
return newSelectChildrenByTag(path) |
||||
} |
||||
} |
||||
|
||||
// parseFilter parses a path filter contained within [brackets].
|
||||
func (c *compiler) parseFilter(path string) filter { |
||||
if len(path) == 0 { |
||||
c.err = ErrPath("path contains an empty filter expression.") |
||||
return nil |
||||
} |
||||
|
||||
// Filter contains [@attr='val'] or [tag='val']?
|
||||
eqindex := strings.Index(path, "='") |
||||
if eqindex >= 0 { |
||||
rindex := nextIndex(path, "'", eqindex+2) |
||||
if rindex != len(path)-1 { |
||||
c.err = ErrPath("path has mismatched filter quotes.") |
||||
return nil |
||||
} |
||||
switch { |
||||
case path[0] == '@': |
||||
return newFilterAttrVal(path[1:eqindex], path[eqindex+2:rindex]) |
||||
default: |
||||
return newFilterChildText(path[:eqindex], path[eqindex+2:rindex]) |
||||
} |
||||
} |
||||
|
||||
// Filter contains [@attr], [N] or [tag]
|
||||
switch { |
||||
case path[0] == '@': |
||||
return newFilterAttr(path[1:]) |
||||
case isInteger(path): |
||||
pos, _ := strconv.Atoi(path) |
||||
switch { |
||||
case pos > 0: |
||||
return newFilterPos(pos - 1) |
||||
default: |
||||
return newFilterPos(pos) |
||||
} |
||||
default: |
||||
return newFilterChild(path) |
||||
} |
||||
} |
||||
|
||||
// selectSelf selects the current element into the candidate list.
|
||||
type selectSelf struct{} |
||||
|
||||
func (s *selectSelf) apply(e *Element, p *pather) { |
||||
p.candidates = append(p.candidates, e) |
||||
} |
||||
|
||||
// selectParent selects the element's parent into the candidate list.
|
||||
type selectParent struct{} |
||||
|
||||
func (s *selectParent) apply(e *Element, p *pather) { |
||||
if e.parent != nil { |
||||
p.candidates = append(p.candidates, e.parent) |
||||
} |
||||
} |
||||
|
||||
// selectChildren selects the element's child elements into the
|
||||
// candidate list.
|
||||
type selectChildren struct{} |
||||
|
||||
func (s *selectChildren) apply(e *Element, p *pather) { |
||||
for _, c := range e.Child { |
||||
if c, ok := c.(*Element); ok { |
||||
p.candidates = append(p.candidates, c) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// selectDescendants selects all descendant child elements
|
||||
// of the element into the candidate list.
|
||||
type selectDescendants struct{} |
||||
|
||||
func (s *selectDescendants) apply(e *Element, p *pather) { |
||||
var queue fifo |
||||
for queue.add(e); queue.len() > 0; { |
||||
e := queue.remove().(*Element) |
||||
p.candidates = append(p.candidates, e) |
||||
for _, c := range e.Child { |
||||
if c, ok := c.(*Element); ok { |
||||
queue.add(c) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// selectChildrenByTag selects into the candidate list all child
|
||||
// elements of the element having the specified tag.
|
||||
type selectChildrenByTag struct { |
||||
space, tag string |
||||
} |
||||
|
||||
func newSelectChildrenByTag(path string) *selectChildrenByTag { |
||||
s, l := spaceDecompose(path) |
||||
return &selectChildrenByTag{s, l} |
||||
} |
||||
|
||||
func (s *selectChildrenByTag) apply(e *Element, p *pather) { |
||||
for _, c := range e.Child { |
||||
if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag { |
||||
p.candidates = append(p.candidates, c) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// filterPos filters the candidate list, keeping only the
|
||||
// candidate at the specified index.
|
||||
type filterPos struct { |
||||
index int |
||||
} |
||||
|
||||
func newFilterPos(pos int) *filterPos { |
||||
return &filterPos{pos} |
||||
} |
||||
|
||||
func (f *filterPos) apply(p *pather) { |
||||
if f.index >= 0 { |
||||
if f.index < len(p.candidates) { |
||||
p.scratch = append(p.scratch, p.candidates[f.index]) |
||||
} |
||||
} else { |
||||
if -f.index <= len(p.candidates) { |
||||
p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index]) |
||||
} |
||||
} |
||||
p.candidates, p.scratch = p.scratch, p.candidates[0:0] |
||||
} |
||||
|
||||
// filterAttr filters the candidate list for elements having
|
||||
// the specified attribute.
|
||||
type filterAttr struct { |
||||
space, key string |
||||
} |
||||
|
||||
func newFilterAttr(str string) *filterAttr { |
||||
s, l := spaceDecompose(str) |
||||
return &filterAttr{s, l} |
||||
} |
||||
|
||||
func (f *filterAttr) apply(p *pather) { |
||||
for _, c := range p.candidates { |
||||
for _, a := range c.Attr { |
||||
if spaceMatch(f.space, a.Space) && f.key == a.Key { |
||||
p.scratch = append(p.scratch, c) |
||||
break |
||||
} |
||||
} |
||||
} |
||||
p.candidates, p.scratch = p.scratch, p.candidates[0:0] |
||||
} |
||||
|
||||
// filterAttrVal filters the candidate list for elements having
|
||||
// the specified attribute with the specified value.
|
||||
type filterAttrVal struct { |
||||
space, key, val string |
||||
} |
||||
|
||||
func newFilterAttrVal(str, value string) *filterAttrVal { |
||||
s, l := spaceDecompose(str) |
||||
return &filterAttrVal{s, l, value} |
||||
} |
||||
|
||||
func (f *filterAttrVal) apply(p *pather) { |
||||
for _, c := range p.candidates { |
||||
for _, a := range c.Attr { |
||||
if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value { |
||||
p.scratch = append(p.scratch, c) |
||||
break |
||||
} |
||||
} |
||||
} |
||||
p.candidates, p.scratch = p.scratch, p.candidates[0:0] |
||||
} |
||||
|
||||
// filterChild filters the candidate list for elements having
|
||||
// a child element with the specified tag.
|
||||
type filterChild struct { |
||||
space, tag string |
||||
} |
||||
|
||||
func newFilterChild(str string) *filterChild { |
||||
s, l := spaceDecompose(str) |
||||
return &filterChild{s, l} |
||||
} |
||||
|
||||
func (f *filterChild) apply(p *pather) { |
||||
for _, c := range p.candidates { |
||||
for _, cc := range c.Child { |
||||
if cc, ok := cc.(*Element); ok && |
||||
spaceMatch(f.space, cc.Space) && |
||||
f.tag == cc.Tag { |
||||
p.scratch = append(p.scratch, c) |
||||
} |
||||
} |
||||
} |
||||
p.candidates, p.scratch = p.scratch, p.candidates[0:0] |
||||
} |
||||
|
||||
// filterChildText filters the candidate list for elements having
|
||||
// a child element with the specified tag and text.
|
||||
type filterChildText struct { |
||||
space, tag, text string |
||||
} |
||||
|
||||
func newFilterChildText(str, text string) *filterChildText { |
||||
s, l := spaceDecompose(str) |
||||
return &filterChildText{s, l, text} |
||||
} |
||||
|
||||
func (f *filterChildText) apply(p *pather) { |
||||
for _, c := range p.candidates { |
||||
for _, cc := range c.Child { |
||||
if cc, ok := cc.(*Element); ok && |
||||
spaceMatch(f.space, cc.Space) && |
||||
f.tag == cc.Tag && |
||||
f.text == cc.Text() { |
||||
p.scratch = append(p.scratch, c) |
||||
} |
||||
} |
||||
} |
||||
p.candidates, p.scratch = p.scratch, p.candidates[0:0] |
||||
} |
||||
@ -0,0 +1,201 @@
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
APPENDIX: How to apply the Apache License to your work. |
||||
|
||||
To apply the Apache License to your work, attach the following |
||||
boilerplate notice, with the fields enclosed by brackets "{}" |
||||
replaced with your own identifying information. (Don't include |
||||
the brackets!) The text should be enclosed in the appropriate |
||||
comment syntax for the file format. We also recommend that a |
||||
file or class name and description of purpose be included on the |
||||
same "printed page" as the copyright notice for easier |
||||
identification within third-party archives. |
||||
|
||||
Copyright {yyyy} {name of copyright owner} |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
@ -0,0 +1,179 @@
|
||||
package clockwork |
||||
|
||||
import ( |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
// Clock provides an interface that packages can use instead of directly
|
||||
// using the time module, so that chronology-related behavior can be tested
|
||||
type Clock interface { |
||||
After(d time.Duration) <-chan time.Time |
||||
Sleep(d time.Duration) |
||||
Now() time.Time |
||||
Since(t time.Time) time.Duration |
||||
} |
||||
|
||||
// FakeClock provides an interface for a clock which can be
|
||||
// manually advanced through time
|
||||
type FakeClock interface { |
||||
Clock |
||||
// Advance advances the FakeClock to a new point in time, ensuring any existing
|
||||
// sleepers are notified appropriately before returning
|
||||
Advance(d time.Duration) |
||||
// BlockUntil will block until the FakeClock has the given number of
|
||||
// sleepers (callers of Sleep or After)
|
||||
BlockUntil(n int) |
||||
} |
||||
|
||||
// NewRealClock returns a Clock which simply delegates calls to the actual time
|
||||
// package; it should be used by packages in production.
|
||||
func NewRealClock() Clock { |
||||
return &realClock{} |
||||
} |
||||
|
||||
// NewFakeClock returns a FakeClock implementation which can be
|
||||
// manually advanced through time for testing. The initial time of the
|
||||
// FakeClock will be an arbitrary non-zero time.
|
||||
func NewFakeClock() FakeClock { |
||||
// use a fixture that does not fulfill Time.IsZero()
|
||||
return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC)) |
||||
} |
||||
|
||||
// NewFakeClockAt returns a FakeClock initialised at the given time.Time.
|
||||
func NewFakeClockAt(t time.Time) FakeClock { |
||||
return &fakeClock{ |
||||
time: t, |
||||
} |
||||
} |
||||
|
||||
type realClock struct{} |
||||
|
||||
func (rc *realClock) After(d time.Duration) <-chan time.Time { |
||||
return time.After(d) |
||||
} |
||||
|
||||
func (rc *realClock) Sleep(d time.Duration) { |
||||
time.Sleep(d) |
||||
} |
||||
|
||||
func (rc *realClock) Now() time.Time { |
||||
return time.Now() |
||||
} |
||||
|
||||
func (rc *realClock) Since(t time.Time) time.Duration { |
||||
return rc.Now().Sub(t) |
||||
} |
||||
|
||||
type fakeClock struct { |
||||
sleepers []*sleeper |
||||
blockers []*blocker |
||||
time time.Time |
||||
|
||||
l sync.RWMutex |
||||
} |
||||
|
||||
// sleeper represents a caller of After or Sleep
|
||||
type sleeper struct { |
||||
until time.Time |
||||
done chan time.Time |
||||
} |
||||
|
||||
// blocker represents a caller of BlockUntil
|
||||
type blocker struct { |
||||
count int |
||||
ch chan struct{} |
||||
} |
||||
|
||||
// After mimics time.After; it waits for the given duration to elapse on the
|
||||
// fakeClock, then sends the current time on the returned channel.
|
||||
func (fc *fakeClock) After(d time.Duration) <-chan time.Time { |
||||
fc.l.Lock() |
||||
defer fc.l.Unlock() |
||||
now := fc.time |
||||
done := make(chan time.Time, 1) |
||||
if d.Nanoseconds() == 0 { |
||||
// special case - trigger immediately
|
||||
done <- now |
||||
} else { |
||||
// otherwise, add to the set of sleepers
|
||||
s := &sleeper{ |
||||
until: now.Add(d), |
||||
done: done, |
||||
} |
||||
fc.sleepers = append(fc.sleepers, s) |
||||
// and notify any blockers
|
||||
fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) |
||||
} |
||||
return done |
||||
} |
||||
|
||||
// notifyBlockers notifies all the blockers waiting until the
|
||||
// given number of sleepers are waiting on the fakeClock. It
|
||||
// returns an updated slice of blockers (i.e. those still waiting)
|
||||
func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { |
||||
for _, b := range blockers { |
||||
if b.count == count { |
||||
close(b.ch) |
||||
} else { |
||||
newBlockers = append(newBlockers, b) |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// Sleep blocks until the given duration has passed on the fakeClock
|
||||
func (fc *fakeClock) Sleep(d time.Duration) { |
||||
<-fc.After(d) |
||||
} |
||||
|
||||
// Time returns the current time of the fakeClock
|
||||
func (fc *fakeClock) Now() time.Time { |
||||
fc.l.RLock() |
||||
t := fc.time |
||||
fc.l.RUnlock() |
||||
return t |
||||
} |
||||
|
||||
// Since returns the duration that has passed since the given time on the fakeClock
|
||||
func (fc *fakeClock) Since(t time.Time) time.Duration { |
||||
return fc.Now().Sub(t) |
||||
} |
||||
|
||||
// Advance advances fakeClock to a new point in time, ensuring channels from any
|
||||
// previous invocations of After are notified appropriately before returning
|
||||
func (fc *fakeClock) Advance(d time.Duration) { |
||||
fc.l.Lock() |
||||
defer fc.l.Unlock() |
||||
end := fc.time.Add(d) |
||||
var newSleepers []*sleeper |
||||
for _, s := range fc.sleepers { |
||||
if end.Sub(s.until) >= 0 { |
||||
s.done <- end |
||||
} else { |
||||
newSleepers = append(newSleepers, s) |
||||
} |
||||
} |
||||
fc.sleepers = newSleepers |
||||
fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) |
||||
fc.time = end |
||||
} |
||||
|
||||
// BlockUntil will block until the fakeClock has the given number of sleepers
|
||||
// (callers of Sleep or After)
|
||||
func (fc *fakeClock) BlockUntil(n int) { |
||||
fc.l.Lock() |
||||
// Fast path: current number of sleepers is what we're looking for
|
||||
if len(fc.sleepers) == n { |
||||
fc.l.Unlock() |
||||
return |
||||
} |
||||
// Otherwise, set up a new blocker
|
||||
b := &blocker{ |
||||
count: n, |
||||
ch: make(chan struct{}), |
||||
} |
||||
fc.blockers = append(fc.blockers, b) |
||||
fc.l.Unlock() |
||||
<-b.ch |
||||
} |
||||
@ -0,0 +1,175 @@
|
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
@ -0,0 +1,251 @@
|
||||
package dsig |
||||
|
||||
import ( |
||||
"sort" |
||||
"strings" |
||||
|
||||
"github.com/beevik/etree" |
||||
) |
||||
|
||||
// Canonicalizer is an implementation of a canonicalization algorithm.
|
||||
type Canonicalizer interface { |
||||
Canonicalize(el *etree.Element) ([]byte, error) |
||||
Algorithm() AlgorithmID |
||||
} |
||||
|
||||
type c14N10ExclusiveCanonicalizer struct { |
||||
InclusiveNamespaces map[string]struct{} |
||||
} |
||||
|
||||
// MakeC14N10ExclusiveCanonicalizerWithPrefixList constructs an exclusive Canonicalizer
|
||||
// from a PrefixList in NMTOKENS format (a white space separated list).
|
||||
func MakeC14N10ExclusiveCanonicalizerWithPrefixList(prefixList string) Canonicalizer { |
||||
prefixes := strings.Fields(prefixList) |
||||
prefixSet := make(map[string]struct{}, len(prefixes)) |
||||
|
||||
for _, prefix := range prefixes { |
||||
prefixSet[prefix] = struct{}{} |
||||
} |
||||
|
||||
return &c14N10ExclusiveCanonicalizer{ |
||||
InclusiveNamespaces: prefixSet, |
||||
} |
||||
} |
||||
|
||||
// Canonicalize transforms the input Element into a serialized XML document in canonical form.
|
||||
func (c *c14N10ExclusiveCanonicalizer) Canonicalize(el *etree.Element) ([]byte, error) { |
||||
scope := make(map[string]c14nSpace) |
||||
return canonicalSerialize(excCanonicalPrep(el, scope, c.InclusiveNamespaces)) |
||||
} |
||||
|
||||
func (c *c14N10ExclusiveCanonicalizer) Algorithm() AlgorithmID { |
||||
return CanonicalXML10ExclusiveAlgorithmId |
||||
} |
||||
|
||||
type c14N11Canonicalizer struct{} |
||||
|
||||
// MakeC14N11Canonicalizer constructs an inclusive canonicalizer.
|
||||
func MakeC14N11Canonicalizer() Canonicalizer { |
||||
return &c14N11Canonicalizer{} |
||||
} |
||||
|
||||
// Canonicalize transforms the input Element into a serialized XML document in canonical form.
|
||||
func (c *c14N11Canonicalizer) Canonicalize(el *etree.Element) ([]byte, error) { |
||||
scope := make(map[string]struct{}) |
||||
return canonicalSerialize(canonicalPrep(el, scope)) |
||||
} |
||||
|
||||
func (c *c14N11Canonicalizer) Algorithm() AlgorithmID { |
||||
return CanonicalXML11AlgorithmId |
||||
} |
||||
|
||||
func composeAttr(space, key string) string { |
||||
if space != "" { |
||||
return space + ":" + key |
||||
} |
||||
|
||||
return key |
||||
} |
||||
|
||||
type attrsByKey []etree.Attr |
||||
|
||||
func (a attrsByKey) Len() int { |
||||
return len(a) |
||||
} |
||||
|
||||
func (a attrsByKey) Swap(i, j int) { |
||||
a[i], a[j] = a[j], a[i] |
||||
} |
||||
|
||||
func (a attrsByKey) Less(i, j int) bool { |
||||
// As I understand it: any "xmlns" attribute should come first, followed by any
|
||||
// any "xmlns:prefix" attributes, presumably ordered by prefix. Lastly any other
|
||||
// attributes in lexicographical order.
|
||||
if a[i].Space == "" && a[i].Key == "xmlns" { |
||||
return true |
||||
} |
||||
|
||||
if a[i].Space == "xmlns" { |
||||
if a[j].Space == "xmlns" { |
||||
return a[i].Key < a[j].Key |
||||
} |
||||
return true |
||||
} |
||||
|
||||
if a[j].Space == "xmlns" { |
||||
return false |
||||
} |
||||
|
||||
return composeAttr(a[i].Space, a[i].Key) < composeAttr(a[j].Space, a[j].Key) |
||||
} |
||||
|
||||
type c14nSpace struct { |
||||
a etree.Attr |
||||
used bool |
||||
} |
||||
|
||||
const nsSpace = "xmlns" |
||||
|
||||
// excCanonicalPrep accepts an *etree.Element and recursively transforms it into one
|
||||
// which is ready for serialization to exclusive canonical form. Specifically this
|
||||
// entails:
|
||||
//
|
||||
// 1. Stripping re-declarations of namespaces
|
||||
// 2. Stripping unused namespaces
|
||||
// 3. Sorting attributes into canonical order.
|
||||
//
|
||||
// NOTE(russell_h): Currently this function modifies the passed element.
|
||||
func excCanonicalPrep(el *etree.Element, _nsAlreadyDeclared map[string]c14nSpace, inclusiveNamespaces map[string]struct{}) *etree.Element { |
||||
//Copy alreadyDeclared map (only contains namespaces)
|
||||
nsAlreadyDeclared := make(map[string]c14nSpace, len(_nsAlreadyDeclared)) |
||||
for k := range _nsAlreadyDeclared { |
||||
nsAlreadyDeclared[k] = _nsAlreadyDeclared[k] |
||||
} |
||||
|
||||
//Track the namespaces used on the current element
|
||||
nsUsedHere := make(map[string]struct{}) |
||||
|
||||
//Make sure to track the element namespace for the case:
|
||||
//<foo:bar xmlns:foo="..."/>
|
||||
if el.Space != "" { |
||||
nsUsedHere[el.Space] = struct{}{} |
||||
} |
||||
|
||||
toRemove := make([]string, 0, 0) |
||||
|
||||
for _, a := range el.Attr { |
||||
switch a.Space { |
||||
case nsSpace: |
||||
|
||||
//For simplicity, remove all xmlns attribues; to be added in one pass
|
||||
//later. Otherwise, we need another map/set to track xmlns attributes
|
||||
//that we left alone.
|
||||
toRemove = append(toRemove, a.Space+":"+a.Key) |
||||
if _, ok := nsAlreadyDeclared[a.Key]; !ok { |
||||
//If we're not tracking ancestor state already for this namespace, add
|
||||
//it to the map
|
||||
nsAlreadyDeclared[a.Key] = c14nSpace{a: a, used: false} |
||||
} |
||||
|
||||
// This algorithm accepts a set of namespaces which should be treated
|
||||
// in an inclusive fashion. Specifically that means we should keep the
|
||||
// declaration of that namespace closest to the root of the tree. We can
|
||||
// accomplish that be pretending it was used by this element.
|
||||
_, inclusive := inclusiveNamespaces[a.Key] |
||||
if inclusive { |
||||
nsUsedHere[a.Key] = struct{}{} |
||||
} |
||||
|
||||
default: |
||||
//We only track namespaces, so ignore attributes without one.
|
||||
if a.Space != "" { |
||||
nsUsedHere[a.Space] = struct{}{} |
||||
} |
||||
} |
||||
} |
||||
|
||||
//Remove all attributes so that we can add them with much-simpler logic
|
||||
for _, attrK := range toRemove { |
||||
el.RemoveAttr(attrK) |
||||
} |
||||
|
||||
//For all namespaces used on the current element, declare them if they were
|
||||
//not declared (and used) in an ancestor.
|
||||
for k := range nsUsedHere { |
||||
spc := nsAlreadyDeclared[k] |
||||
//If previously unused, mark as used
|
||||
if !spc.used { |
||||
el.Attr = append(el.Attr, spc.a) |
||||
spc.used = true |
||||
|
||||
//Assignment here is only to update the pre-existing `used` tracking value
|
||||
nsAlreadyDeclared[k] = spc |
||||
} |
||||
} |
||||
|
||||
//Canonicalize all children, passing down the ancestor tracking map
|
||||
for _, child := range el.ChildElements() { |
||||
excCanonicalPrep(child, nsAlreadyDeclared, inclusiveNamespaces) |
||||
} |
||||
|
||||
//Sort attributes lexicographically
|
||||
sort.Sort(attrsByKey(el.Attr)) |
||||
|
||||
return el.Copy() |
||||
} |
||||
|
||||
// canonicalPrep accepts an *etree.Element and transforms it into one which is ready
|
||||
// for serialization into inclusive canonical form. Specifically this
|
||||
// entails:
|
||||
//
|
||||
// 1. Stripping re-declarations of namespaces
|
||||
// 2. Sorting attributes into canonical order
|
||||
//
|
||||
// Inclusive canonicalization does not strip unused namespaces.
|
||||
//
|
||||
// TODO(russell_h): This is very similar to excCanonicalPrep - perhaps they should
|
||||
// be unified into one parameterized function?
|
||||
func canonicalPrep(el *etree.Element, seenSoFar map[string]struct{}) *etree.Element { |
||||
_seenSoFar := make(map[string]struct{}) |
||||
for k, v := range seenSoFar { |
||||
_seenSoFar[k] = v |
||||
} |
||||
|
||||
ne := el.Copy() |
||||
sort.Sort(attrsByKey(ne.Attr)) |
||||
if len(ne.Attr) != 0 { |
||||
for _, attr := range ne.Attr { |
||||
if attr.Space != nsSpace { |
||||
continue |
||||
} |
||||
key := attr.Space + ":" + attr.Key |
||||
if _, seen := _seenSoFar[key]; seen { |
||||
ne.RemoveAttr(attr.Space + ":" + attr.Key) |
||||
} else { |
||||
_seenSoFar[key] = struct{}{} |
||||
} |
||||
} |
||||
} |
||||
|
||||
for i, token := range ne.Child { |
||||
childElement, ok := token.(*etree.Element) |
||||
if ok { |
||||
ne.Child[i] = canonicalPrep(childElement, _seenSoFar) |
||||
} |
||||
} |
||||
|
||||
return ne |
||||
} |
||||
|
||||
func canonicalSerialize(el *etree.Element) ([]byte, error) { |
||||
doc := etree.NewDocument() |
||||
doc.SetRoot(el) |
||||
|
||||
doc.WriteSettings = etree.WriteSettings{ |
||||
CanonicalAttrVal: true, |
||||
CanonicalEndTags: true, |
||||
CanonicalText: true, |
||||
} |
||||
|
||||
return doc.WriteToBytes() |
||||
} |
||||
@ -0,0 +1,55 @@
|
||||
package dsig |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/jonboulle/clockwork" |
||||
) |
||||
|
||||
// Clock wraps a clockwork.Clock (which could be real or fake) in order
|
||||
// to default to a real clock when a nil *Clock is used. In other words,
|
||||
// if you attempt to use a nil *Clock it will defer to the real system
|
||||
// clock. This allows Clock to be easily added to structs with methods
|
||||
// that currently reference the time package, without requiring every
|
||||
// instantiation of that struct to be updated.
|
||||
type Clock struct { |
||||
wrapped clockwork.Clock |
||||
} |
||||
|
||||
func (c *Clock) getWrapped() clockwork.Clock { |
||||
if c == nil { |
||||
return clockwork.NewRealClock() |
||||
} |
||||
|
||||
return c.wrapped |
||||
} |
||||
|
||||
func (c *Clock) After(d time.Duration) <-chan time.Time { |
||||
return c.getWrapped().After(d) |
||||
} |
||||
|
||||
func (c *Clock) Sleep(d time.Duration) { |
||||
c.getWrapped().Sleep(d) |
||||
} |
||||
|
||||
func (c *Clock) Now() time.Time { |
||||
return c.getWrapped().Now() |
||||
} |
||||
|
||||
func NewRealClock() *Clock { |
||||
return &Clock{ |
||||
wrapped: clockwork.NewRealClock(), |
||||
} |
||||
} |
||||
|
||||
func NewFakeClock(wrapped clockwork.Clock) *Clock { |
||||
return &Clock{ |
||||
wrapped: wrapped, |
||||
} |
||||
} |
||||
|
||||
func NewFakeClockAt(t time.Time) *Clock { |
||||
return &Clock{ |
||||
wrapped: clockwork.NewFakeClockAt(t), |
||||
} |
||||
} |
||||
@ -0,0 +1,63 @@
|
||||
package dsig |
||||
|
||||
import ( |
||||
"crypto/rand" |
||||
"crypto/rsa" |
||||
"crypto/x509" |
||||
"math/big" |
||||
"time" |
||||
) |
||||
|
||||
type X509KeyStore interface { |
||||
GetKeyPair() (privateKey *rsa.PrivateKey, cert []byte, err error) |
||||
} |
||||
|
||||
type X509CertificateStore interface { |
||||
Certificates() (roots []*x509.Certificate, err error) |
||||
} |
||||
|
||||
type MemoryX509CertificateStore struct { |
||||
Roots []*x509.Certificate |
||||
} |
||||
|
||||
func (mX509cs *MemoryX509CertificateStore) Certificates() ([]*x509.Certificate, error) { |
||||
return mX509cs.Roots, nil |
||||
} |
||||
|
||||
type MemoryX509KeyStore struct { |
||||
privateKey *rsa.PrivateKey |
||||
cert []byte |
||||
} |
||||
|
||||
func (ks *MemoryX509KeyStore) GetKeyPair() (*rsa.PrivateKey, []byte, error) { |
||||
return ks.privateKey, ks.cert, nil |
||||
} |
||||
|
||||
func RandomKeyStoreForTest() X509KeyStore { |
||||
key, err := rsa.GenerateKey(rand.Reader, 1024) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
now := time.Now() |
||||
|
||||
template := &x509.Certificate{ |
||||
SerialNumber: big.NewInt(0), |
||||
NotBefore: now.Add(-5 * time.Minute), |
||||
NotAfter: now.Add(365 * 24 * time.Hour), |
||||
|
||||
KeyUsage: x509.KeyUsageDigitalSignature, |
||||
ExtKeyUsage: []x509.ExtKeyUsage{}, |
||||
BasicConstraintsValid: true, |
||||
} |
||||
|
||||
cert, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
return &MemoryX509KeyStore{ |
||||
privateKey: key, |
||||
cert: cert, |
||||
} |
||||
} |
||||
@ -0,0 +1,186 @@
|
||||
package dsig |
||||
|
||||
import ( |
||||
"crypto" |
||||
"crypto/rand" |
||||
"crypto/rsa" |
||||
_ "crypto/sha1" |
||||
_ "crypto/sha256" |
||||
"encoding/base64" |
||||
"errors" |
||||
"fmt" |
||||
|
||||
"github.com/beevik/etree" |
||||
) |
||||
|
||||
type SigningContext struct { |
||||
Hash crypto.Hash |
||||
KeyStore X509KeyStore |
||||
IdAttribute string |
||||
Prefix string |
||||
Canonicalizer Canonicalizer |
||||
} |
||||
|
||||
func NewDefaultSigningContext(ks X509KeyStore) *SigningContext { |
||||
return &SigningContext{ |
||||
Hash: crypto.SHA256, |
||||
KeyStore: ks, |
||||
IdAttribute: DefaultIdAttr, |
||||
Prefix: DefaultPrefix, |
||||
Canonicalizer: MakeC14N11Canonicalizer(), |
||||
} |
||||
} |
||||
|
||||
func (ctx *SigningContext) SetSignatureMethod(algorithmID string) error { |
||||
hash, ok := signatureMethodsByIdentifier[algorithmID] |
||||
if !ok { |
||||
return fmt.Errorf("Unknown SignatureMethod: %s", algorithmID) |
||||
} |
||||
|
||||
ctx.Hash = hash |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (ctx *SigningContext) digest(el *etree.Element) ([]byte, error) { |
||||
canonical, err := ctx.Canonicalizer.Canonicalize(el) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
hash := ctx.Hash.New() |
||||
_, err = hash.Write(canonical) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return hash.Sum(nil), nil |
||||
} |
||||
|
||||
func (ctx *SigningContext) constructSignedInfo(el *etree.Element, enveloped bool) (*etree.Element, error) { |
||||
digestAlgorithmIdentifier, ok := digestAlgorithmIdentifiers[ctx.Hash] |
||||
if !ok { |
||||
return nil, errors.New("unsupported hash mechanism") |
||||
} |
||||
|
||||
signatureMethodIdentifier, ok := signatureMethodIdentifiers[ctx.Hash] |
||||
if !ok { |
||||
return nil, errors.New("unsupported signature method") |
||||
} |
||||
|
||||
digest, err := ctx.digest(el) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
signedInfo := &etree.Element{ |
||||
Tag: SignedInfoTag, |
||||
Space: ctx.Prefix, |
||||
} |
||||
|
||||
// /SignedInfo/CanonicalizationMethod
|
||||
canonicalizationMethod := ctx.createNamespacedElement(signedInfo, CanonicalizationMethodTag) |
||||
canonicalizationMethod.CreateAttr(AlgorithmAttr, string(ctx.Canonicalizer.Algorithm())) |
||||
|
||||
// /SignedInfo/SignatureMethod
|
||||
signatureMethod := ctx.createNamespacedElement(signedInfo, SignatureMethodTag) |
||||
signatureMethod.CreateAttr(AlgorithmAttr, signatureMethodIdentifier) |
||||
|
||||
// /SignedInfo/Reference
|
||||
reference := ctx.createNamespacedElement(signedInfo, ReferenceTag) |
||||
|
||||
dataId := el.SelectAttrValue(DefaultIdAttr, "") |
||||
if dataId == "" { |
||||
return nil, errors.New("Missing data ID") |
||||
} |
||||
|
||||
reference.CreateAttr(URIAttr, "#"+dataId) |
||||
|
||||
// /SignedInfo/Reference/Transforms
|
||||
transforms := ctx.createNamespacedElement(reference, TransformsTag) |
||||
if enveloped { |
||||
envelopedTransform := ctx.createNamespacedElement(transforms, TransformTag) |
||||
envelopedTransform.CreateAttr(AlgorithmAttr, EnvelopedSignatureAltorithmId.String()) |
||||
} |
||||
canonicalizationAlgorithm := ctx.createNamespacedElement(transforms, TransformTag) |
||||
canonicalizationAlgorithm.CreateAttr(AlgorithmAttr, string(ctx.Canonicalizer.Algorithm())) |
||||
|
||||
// /SignedInfo/Reference/DigestMethod
|
||||
digestMethod := ctx.createNamespacedElement(reference, DigestMethodTag) |
||||
digestMethod.CreateAttr(AlgorithmAttr, digestAlgorithmIdentifier) |
||||
|
||||
// /SignedInfo/Reference/DigestValue
|
||||
digestValue := ctx.createNamespacedElement(reference, DigestValueTag) |
||||
digestValue.SetText(base64.StdEncoding.EncodeToString(digest)) |
||||
|
||||
return signedInfo, nil |
||||
} |
||||
|
||||
func (ctx *SigningContext) constructSignature(el *etree.Element, enveloped bool) (*etree.Element, error) { |
||||
signedInfo, err := ctx.constructSignedInfo(el, enveloped) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
sig := &etree.Element{ |
||||
Tag: SignatureTag, |
||||
Space: ctx.Prefix, |
||||
} |
||||
|
||||
xmlns := "xmlns" |
||||
if ctx.Prefix != "" { |
||||
xmlns += ":" + ctx.Prefix |
||||
} |
||||
|
||||
sig.CreateAttr(xmlns, Namespace) |
||||
|
||||
sig.Child = append(sig.Child, signedInfo) |
||||
|
||||
// Must propagate down the attributes to the 'SignedInfo' before digesting
|
||||
for _, attr := range sig.Attr { |
||||
signedInfo.CreateAttr(attr.Space+":"+attr.Key, attr.Value) |
||||
} |
||||
|
||||
digest, err := ctx.digest(signedInfo) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
key, cert, err := ctx.KeyStore.GetKeyPair() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
rawSignature, err := rsa.SignPKCS1v15(rand.Reader, key, ctx.Hash, digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
signatureValue := ctx.createNamespacedElement(sig, SignatureValueTag) |
||||
signatureValue.SetText(base64.StdEncoding.EncodeToString(rawSignature)) |
||||
|
||||
keyInfo := ctx.createNamespacedElement(sig, KeyInfoTag) |
||||
x509Data := ctx.createNamespacedElement(keyInfo, X509DataTag) |
||||
x509Certificate := ctx.createNamespacedElement(x509Data, X509CertificateTag) |
||||
x509Certificate.SetText(base64.StdEncoding.EncodeToString(cert)) |
||||
|
||||
return sig, nil |
||||
} |
||||
|
||||
func (ctx *SigningContext) createNamespacedElement(el *etree.Element, tag string) *etree.Element { |
||||
child := el.CreateElement(tag) |
||||
child.Space = ctx.Prefix |
||||
return child |
||||
} |
||||
|
||||
func (ctx *SigningContext) SignEnveloped(el *etree.Element) (*etree.Element, error) { |
||||
sig, err := ctx.constructSignature(el, true) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
ret := el.Copy() |
||||
ret.Child = append(ret.Child, sig) |
||||
|
||||
return ret, nil |
||||
} |
||||
@ -0,0 +1,34 @@
|
||||
package dsig |
||||
|
||||
import ( |
||||
"crypto/rsa" |
||||
"crypto/tls" |
||||
"fmt" |
||||
) |
||||
|
||||
//Well-known errors
|
||||
var ( |
||||
ErrNonRSAKey = fmt.Errorf("Private key was not RSA") |
||||
ErrMissingCertificates = fmt.Errorf("No public certificates provided") |
||||
) |
||||
|
||||
//TLSCertKeyStore wraps the stdlib tls.Certificate to return its contained key
|
||||
//and certs.
|
||||
type TLSCertKeyStore tls.Certificate |
||||
|
||||
//GetKeyPair implements X509KeyStore using the underlying tls.Certificate
|
||||
func (d TLSCertKeyStore) GetKeyPair() (*rsa.PrivateKey, []byte, error) { |
||||
pk, ok := d.PrivateKey.(*rsa.PrivateKey) |
||||
|
||||
if !ok { |
||||
return nil, nil, ErrNonRSAKey |
||||
} |
||||
|
||||
if len(d.Certificate) < 1 { |
||||
return nil, nil, ErrMissingCertificates |
||||
} |
||||
|
||||
crt := d.Certificate[0] |
||||
|
||||
return pk, crt, nil |
||||
} |
||||
@ -0,0 +1,397 @@
|
||||
package dsig |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/rsa" |
||||
"crypto/x509" |
||||
"encoding/base64" |
||||
"encoding/pem" |
||||
"errors" |
||||
"fmt" |
||||
"regexp" |
||||
|
||||
"github.com/beevik/etree" |
||||
) |
||||
|
||||
var uriRegexp = regexp.MustCompile("^#[a-zA-Z_][\\w.-]*$") |
||||
|
||||
type ValidationContext struct { |
||||
CertificateStore X509CertificateStore |
||||
IdAttribute string |
||||
Clock *Clock |
||||
} |
||||
|
||||
func NewDefaultValidationContext(certificateStore X509CertificateStore) *ValidationContext { |
||||
return &ValidationContext{ |
||||
CertificateStore: certificateStore, |
||||
IdAttribute: DefaultIdAttr, |
||||
} |
||||
} |
||||
|
||||
// TODO(russell_h): More flexible namespace support. This might barely work.
|
||||
func inNamespace(el *etree.Element, ns string) bool { |
||||
for _, attr := range el.Attr { |
||||
if attr.Value == ns { |
||||
if attr.Space == "" && attr.Key == "xmlns" { |
||||
return el.Space == "" |
||||
} else if attr.Space == "xmlns" { |
||||
return el.Space == attr.Key |
||||
} |
||||
} |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
func childPath(space, tag string) string { |
||||
if space == "" { |
||||
return "./" + tag |
||||
} else { |
||||
return "./" + space + ":" + tag |
||||
} |
||||
} |
||||
|
||||
// The RemoveElement method on etree.Element isn't recursive...
|
||||
func recursivelyRemoveElement(tree, el *etree.Element) bool { |
||||
if tree.RemoveChild(el) != nil { |
||||
return true |
||||
} |
||||
|
||||
for _, child := range tree.Child { |
||||
if childElement, ok := child.(*etree.Element); ok { |
||||
if recursivelyRemoveElement(childElement, el) { |
||||
return true |
||||
} |
||||
} |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
// transform applies the passed set of transforms to the specified root element.
|
||||
//
|
||||
// The functionality of transform is currently very limited and purpose-specific.
|
||||
//
|
||||
// NOTE(russell_h): Ideally this wouldn't mutate the root passed to it, and would
|
||||
// instead return a copy. Unfortunately copying the tree makes it difficult to
|
||||
// correctly locate the signature. I'm opting, for now, to simply mutate the root
|
||||
// parameter.
|
||||
func (ctx *ValidationContext) transform(root, sig *etree.Element, transforms []*etree.Element) (*etree.Element, Canonicalizer, error) { |
||||
if len(transforms) != 2 { |
||||
return nil, nil, errors.New("Expected Enveloped and C14N transforms") |
||||
} |
||||
|
||||
var canonicalizer Canonicalizer |
||||
|
||||
for _, transform := range transforms { |
||||
algo := transform.SelectAttr(AlgorithmAttr) |
||||
if algo == nil { |
||||
return nil, nil, errors.New("Missing Algorithm attribute") |
||||
} |
||||
|
||||
switch AlgorithmID(algo.Value) { |
||||
case EnvelopedSignatureAltorithmId: |
||||
if !recursivelyRemoveElement(root, sig) { |
||||
return nil, nil, errors.New("Error applying canonicalization transform: Signature not found") |
||||
} |
||||
|
||||
case CanonicalXML10ExclusiveAlgorithmId: |
||||
var prefixList string |
||||
ins := transform.FindElement(childPath("", InclusiveNamespacesTag)) |
||||
if ins != nil { |
||||
prefixListEl := ins.SelectAttr(PrefixListAttr) |
||||
if prefixListEl != nil { |
||||
prefixList = prefixListEl.Value |
||||
} |
||||
} |
||||
|
||||
canonicalizer = MakeC14N10ExclusiveCanonicalizerWithPrefixList(prefixList) |
||||
|
||||
case CanonicalXML11AlgorithmId: |
||||
canonicalizer = MakeC14N11Canonicalizer() |
||||
|
||||
default: |
||||
return nil, nil, errors.New("Unknown Transform Algorithm: " + algo.Value) |
||||
} |
||||
} |
||||
|
||||
if canonicalizer == nil { |
||||
return nil, nil, errors.New("Expected canonicalization transform") |
||||
} |
||||
|
||||
return root, canonicalizer, nil |
||||
} |
||||
|
||||
func (ctx *ValidationContext) digest(el *etree.Element, digestAlgorithmId string, canonicalizer Canonicalizer) ([]byte, error) { |
||||
data, err := canonicalizer.Canonicalize(el) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
digestAlgorithm, ok := digestAlgorithmsByIdentifier[digestAlgorithmId] |
||||
if !ok { |
||||
return nil, errors.New("Unknown digest algorithm: " + digestAlgorithmId) |
||||
} |
||||
|
||||
hash := digestAlgorithm.New() |
||||
_, err = hash.Write(data) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return hash.Sum(nil), nil |
||||
} |
||||
|
||||
func (ctx *ValidationContext) verifySignedInfo(signatureElement *etree.Element, canonicalizer Canonicalizer, signatureMethodId string, cert *x509.Certificate, sig []byte) error { |
||||
signedInfo := signatureElement.FindElement(childPath(signatureElement.Space, SignedInfoTag)) |
||||
if signedInfo == nil { |
||||
return errors.New("Missing SignedInfo") |
||||
} |
||||
|
||||
// Any attributes from the 'Signature' element must be pushed down into the 'SignedInfo' element before it is canonicalized
|
||||
for _, attr := range signatureElement.Attr { |
||||
signedInfo.CreateAttr(attr.Space+":"+attr.Key, attr.Value) |
||||
} |
||||
|
||||
// Canonicalize the xml
|
||||
canonical, err := canonicalizer.Canonicalize(signedInfo) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
signatureAlgorithm, ok := signatureMethodsByIdentifier[signatureMethodId] |
||||
if !ok { |
||||
return errors.New("Unknown signature method: " + signatureMethodId) |
||||
} |
||||
|
||||
hash := signatureAlgorithm.New() |
||||
_, err = hash.Write(canonical) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
hashed := hash.Sum(nil) |
||||
|
||||
pubKey, ok := cert.PublicKey.(*rsa.PublicKey) |
||||
if !ok { |
||||
return errors.New("Invalid public key") |
||||
} |
||||
|
||||
// Verify that the private key matching the public key from the cert was what was used to sign the 'SignedInfo' and produce the 'SignatureValue'
|
||||
err = rsa.VerifyPKCS1v15(pubKey, signatureAlgorithm, hashed[:], sig) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (ctx *ValidationContext) validateSignature(el *etree.Element, cert *x509.Certificate) (*etree.Element, error) { |
||||
el = el.Copy() |
||||
|
||||
// Verify the document minus the signedInfo against the 'DigestValue'
|
||||
// Find the 'Signature' element
|
||||
sig := el.FindElement(SignatureTag) |
||||
|
||||
if sig == nil { |
||||
return nil, errors.New("Missing Signature") |
||||
} |
||||
|
||||
if !inNamespace(sig, Namespace) { |
||||
return nil, errors.New("Signature element is in the wrong namespace") |
||||
} |
||||
|
||||
// Get the 'SignedInfo' element
|
||||
signedInfo := sig.FindElement(childPath(sig.Space, SignedInfoTag)) |
||||
if signedInfo == nil { |
||||
return nil, errors.New("Missing SignedInfo") |
||||
} |
||||
|
||||
reference := signedInfo.FindElement(childPath(sig.Space, ReferenceTag)) |
||||
if reference == nil { |
||||
return nil, errors.New("Missing Reference") |
||||
} |
||||
|
||||
transforms := reference.FindElement(childPath(sig.Space, TransformsTag)) |
||||
if transforms == nil { |
||||
return nil, errors.New("Missing Transforms") |
||||
} |
||||
|
||||
uri := reference.SelectAttr("URI") |
||||
if uri == nil { |
||||
// TODO(russell_h): It is permissible to leave this out. We should be
|
||||
// able to fall back to finding the referenced element some other way.
|
||||
return nil, errors.New("Reference is missing URI attribute") |
||||
} |
||||
|
||||
if !uriRegexp.MatchString(uri.Value) { |
||||
return nil, errors.New("Invalid URI: " + uri.Value) |
||||
} |
||||
|
||||
// Get the element referenced in the 'SignedInfo'
|
||||
referencedElement := el.FindElement(fmt.Sprintf("//[@%s='%s']", ctx.IdAttribute, uri.Value[1:])) |
||||
if referencedElement == nil { |
||||
return nil, errors.New("Unable to find referenced element: " + uri.Value) |
||||
} |
||||
|
||||
// Perform all transformations listed in the 'SignedInfo'
|
||||
// Basically, this means removing the 'SignedInfo'
|
||||
transformed, canonicalizer, err := ctx.transform(referencedElement, sig, transforms.ChildElements()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
digestMethod := reference.FindElement(childPath(sig.Space, DigestMethodTag)) |
||||
if digestMethod == nil { |
||||
return nil, errors.New("Missing DigestMethod") |
||||
} |
||||
|
||||
digestValue := reference.FindElement(childPath(sig.Space, DigestValueTag)) |
||||
if digestValue == nil { |
||||
return nil, errors.New("Missing DigestValue") |
||||
} |
||||
|
||||
digestAlgorithmAttr := digestMethod.SelectAttr(AlgorithmAttr) |
||||
if digestAlgorithmAttr == nil { |
||||
return nil, errors.New("Missing DigestMethod Algorithm attribute") |
||||
} |
||||
|
||||
// Digest the transformed XML and compare it to the 'DigestValue' from the 'SignedInfo'
|
||||
digest, err := ctx.digest(transformed, digestAlgorithmAttr.Value, canonicalizer) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
decodedDigestValue, err := base64.StdEncoding.DecodeString(digestValue.Text()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if !bytes.Equal(digest, decodedDigestValue) { |
||||
return nil, errors.New("Signature could not be verified") |
||||
} |
||||
|
||||
//Verify the signed info
|
||||
signatureMethod := signedInfo.FindElement(childPath(sig.Space, SignatureMethodTag)) |
||||
if signatureMethod == nil { |
||||
return nil, errors.New("Missing SignatureMethod") |
||||
} |
||||
|
||||
signatureMethodAlgorithmAttr := signatureMethod.SelectAttr(AlgorithmAttr) |
||||
if digestAlgorithmAttr == nil { |
||||
return nil, errors.New("Missing SignatureMethod Algorithm attribute") |
||||
} |
||||
|
||||
// Decode the 'SignatureValue' so we can compare against it
|
||||
signatureValue := sig.FindElement(childPath(sig.Space, SignatureValueTag)) |
||||
if signatureValue == nil { |
||||
return nil, errors.New("Missing SignatureValue") |
||||
} |
||||
|
||||
decodedSignature, err := base64.StdEncoding.DecodeString(signatureValue.Text()) |
||||
|
||||
if err != nil { |
||||
return nil, errors.New("Could not decode signature") |
||||
} |
||||
// Actually verify the 'SignedInfo' was signed by a trusted source
|
||||
err = ctx.verifySignedInfo(sig, canonicalizer, signatureMethodAlgorithmAttr.Value, cert, decodedSignature) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return transformed, nil |
||||
} |
||||
|
||||
func contains(roots []*x509.Certificate, cert *x509.Certificate) bool { |
||||
for _, root := range roots { |
||||
if root.Equal(cert) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (ctx *ValidationContext) verifyCertificate(el *etree.Element) (*x509.Certificate, error) { |
||||
now := ctx.Clock.Now() |
||||
el = el.Copy() |
||||
|
||||
idAttr := el.SelectAttr(DefaultIdAttr) |
||||
if idAttr == nil || idAttr.Value == "" { |
||||
return nil, errors.New("Missing ID attribute") |
||||
} |
||||
|
||||
signatureElements := el.FindElements("//" + SignatureTag) |
||||
var signatureElement *etree.Element |
||||
|
||||
// Find the Signature element that references the whole Response element
|
||||
for _, e := range signatureElements { |
||||
e2 := e.Copy() |
||||
|
||||
signedInfo := e2.FindElement(childPath(e2.Space, SignedInfoTag)) |
||||
if signedInfo == nil { |
||||
return nil, errors.New("Missing SignedInfo") |
||||
} |
||||
|
||||
referenceElement := signedInfo.FindElement(childPath(e2.Space, ReferenceTag)) |
||||
if referenceElement == nil { |
||||
return nil, errors.New("Missing Reference Element") |
||||
} |
||||
|
||||
uriAttr := referenceElement.SelectAttr(URIAttr) |
||||
if uriAttr == nil || uriAttr.Value == "" { |
||||
return nil, errors.New("Missing URI attribute") |
||||
} |
||||
|
||||
if uriAttr.Value[1:] == idAttr.Value { |
||||
signatureElement = e |
||||
break |
||||
} |
||||
} |
||||
|
||||
if signatureElement == nil { |
||||
return nil, errors.New("Missing signature referencing the top-level element") |
||||
} |
||||
|
||||
// Get the x509 element from the signature
|
||||
x509Element := signatureElement.FindElement("//" + childPath(signatureElement.Space, X509CertificateTag)) |
||||
if x509Element == nil { |
||||
return nil, errors.New("Missing x509 Element") |
||||
} |
||||
|
||||
x509Text := "-----BEGIN CERTIFICATE-----\n" + x509Element.Text() + "\n-----END CERTIFICATE-----" |
||||
block, _ := pem.Decode([]byte(x509Text)) |
||||
if block == nil { |
||||
return nil, errors.New("Failed to parse certificate PEM") |
||||
} |
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
roots, err := ctx.CertificateStore.Certificates() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Verify that the certificate is one we trust
|
||||
if !contains(roots, cert) { |
||||
return nil, errors.New("Could not verify certificate against trusted certs") |
||||
} |
||||
|
||||
if now.Before(cert.NotBefore) || now.After(cert.NotAfter) { |
||||
return nil, errors.New("Cert is not valid at this time") |
||||
} |
||||
|
||||
return cert, nil |
||||
} |
||||
|
||||
func (ctx *ValidationContext) Validate(el *etree.Element) (*etree.Element, error) { |
||||
cert, err := ctx.verifyCertificate(el) |
||||
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return ctx.validateSignature(el, cert) |
||||
} |
||||
@ -0,0 +1,78 @@
|
||||
package dsig |
||||
|
||||
import "crypto" |
||||
|
||||
const ( |
||||
DefaultPrefix = "ds" |
||||
Namespace = "http://www.w3.org/2000/09/xmldsig#" |
||||
) |
||||
|
||||
// Tags
|
||||
const ( |
||||
SignatureTag = "Signature" |
||||
SignedInfoTag = "SignedInfo" |
||||
CanonicalizationMethodTag = "CanonicalizationMethod" |
||||
SignatureMethodTag = "SignatureMethod" |
||||
ReferenceTag = "Reference" |
||||
TransformsTag = "Transforms" |
||||
TransformTag = "Transform" |
||||
DigestMethodTag = "DigestMethod" |
||||
DigestValueTag = "DigestValue" |
||||
SignatureValueTag = "SignatureValue" |
||||
KeyInfoTag = "KeyInfo" |
||||
X509DataTag = "X509Data" |
||||
X509CertificateTag = "X509Certificate" |
||||
InclusiveNamespacesTag = "InclusiveNamespaces" |
||||
) |
||||
|
||||
const ( |
||||
AlgorithmAttr = "Algorithm" |
||||
URIAttr = "URI" |
||||
DefaultIdAttr = "ID" |
||||
PrefixListAttr = "PrefixList" |
||||
) |
||||
|
||||
type AlgorithmID string |
||||
|
||||
func (id AlgorithmID) String() string { |
||||
return string(id) |
||||
} |
||||
|
||||
const ( |
||||
RSASHA1SignatureMethod = "http://www.w3.org/2000/09/xmldsig#rsa-sha1" |
||||
RSASHA256SignatureMethod = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256" |
||||
RSASHA512SignatureMethod = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha512" |
||||
) |
||||
|
||||
//Well-known signature algorithms
|
||||
const ( |
||||
// Supported canonicalization algorithms
|
||||
CanonicalXML10ExclusiveAlgorithmId AlgorithmID = "http://www.w3.org/2001/10/xml-exc-c14n#" |
||||
CanonicalXML11AlgorithmId AlgorithmID = "http://www.w3.org/2006/12/xml-c14n11" |
||||
|
||||
EnvelopedSignatureAltorithmId AlgorithmID = "http://www.w3.org/2000/09/xmldsig#enveloped-signature" |
||||
) |
||||
|
||||
var digestAlgorithmIdentifiers = map[crypto.Hash]string{ |
||||
crypto.SHA1: "http://www.w3.org/2000/09/xmldsig#sha1", |
||||
crypto.SHA256: "http://www.w3.org/2001/04/xmlenc#sha256", |
||||
crypto.SHA512: "http://www.w3.org/2001/04/xmlenc#sha512", |
||||
} |
||||
|
||||
var digestAlgorithmsByIdentifier = map[string]crypto.Hash{} |
||||
var signatureMethodsByIdentifier = map[string]crypto.Hash{} |
||||
|
||||
func init() { |
||||
for hash, id := range digestAlgorithmIdentifiers { |
||||
digestAlgorithmsByIdentifier[id] = hash |
||||
} |
||||
for hash, id := range signatureMethodIdentifiers { |
||||
signatureMethodsByIdentifier[id] = hash |
||||
} |
||||
} |
||||
|
||||
var signatureMethodIdentifiers = map[crypto.Hash]string{ |
||||
crypto.SHA1: RSASHA1SignatureMethod, |
||||
crypto.SHA256: RSASHA256SignatureMethod, |
||||
crypto.SHA512: RSASHA512SignatureMethod, |
||||
} |
||||
Loading…
Reference in new issue