Browse Source
Bumps [github.com/jackc/pgx/v5](https://github.com/jackc/pgx) from 5.6.0 to 5.7.1. - [Changelog](https://github.com/jackc/pgx/blob/master/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v5.6.0...v5.7.1) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>pull/3307/head
29 changed files with 749 additions and 160 deletions
@ -1,9 +0,0 @@
|
||||
language: go |
||||
|
||||
go: |
||||
- 1.x |
||||
- tip |
||||
|
||||
matrix: |
||||
allow_failures: |
||||
- go: tip |
||||
@ -0,0 +1,262 @@
|
||||
package pgx |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"regexp" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/jackc/pgx/v5/pgtype" |
||||
) |
||||
|
||||
/* |
||||
buildLoadDerivedTypesSQL generates the correct query for retrieving type information. |
||||
|
||||
pgVersion: the major version of the PostgreSQL server |
||||
typeNames: the names of the types to load. If nil, load all types. |
||||
*/ |
||||
func buildLoadDerivedTypesSQL(pgVersion int64, typeNames []string) string { |
||||
supportsMultirange := (pgVersion >= 14) |
||||
var typeNamesClause string |
||||
|
||||
if typeNames == nil { |
||||
// This should not occur; this will not return any types
|
||||
typeNamesClause = "= ''" |
||||
} else { |
||||
typeNamesClause = "= ANY($1)" |
||||
} |
||||
parts := make([]string, 0, 10) |
||||
|
||||
// Each of the type names provided might be found in pg_class or pg_type.
|
||||
// Additionally, it may or may not include a schema portion.
|
||||
parts = append(parts, ` |
||||
WITH RECURSIVE |
||||
-- find the OIDs in pg_class which match one of the provided type names |
||||
selected_classes(oid,reltype) AS ( |
||||
-- this query uses the namespace search path, so will match type names without a schema prefix |
||||
SELECT pg_class.oid, pg_class.reltype |
||||
FROM pg_catalog.pg_class |
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = pg_class.relnamespace |
||||
WHERE pg_catalog.pg_table_is_visible(pg_class.oid) |
||||
AND relname `, typeNamesClause, ` |
||||
UNION ALL |
||||
-- this query will only match type names which include the schema prefix |
||||
SELECT pg_class.oid, pg_class.reltype |
||||
FROM pg_class |
||||
INNER JOIN pg_namespace ON (pg_class.relnamespace = pg_namespace.oid) |
||||
WHERE nspname || '.' || relname `, typeNamesClause, ` |
||||
), |
||||
selected_types(oid) AS ( |
||||
-- collect the OIDs from pg_types which correspond to the selected classes |
||||
SELECT reltype AS oid |
||||
FROM selected_classes |
||||
UNION ALL |
||||
-- as well as any other type names which match our criteria |
||||
SELECT pg_type.oid |
||||
FROM pg_type |
||||
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid) |
||||
WHERE typname `, typeNamesClause, ` |
||||
OR nspname || '.' || typname `, typeNamesClause, ` |
||||
), |
||||
-- this builds a parent/child mapping of objects, allowing us to know |
||||
-- all the child (ie: dependent) types that a parent (type) requires |
||||
-- As can be seen, there are 3 ways this can occur (the last of which |
||||
-- is due to being a composite class, where the composite fields are children) |
||||
pc(parent, child) AS ( |
||||
SELECT parent.oid, parent.typelem |
||||
FROM pg_type parent |
||||
WHERE parent.typtype = 'b' AND parent.typelem != 0 |
||||
UNION ALL |
||||
SELECT parent.oid, parent.typbasetype |
||||
FROM pg_type parent |
||||
WHERE parent.typtypmod = -1 AND parent.typbasetype != 0 |
||||
UNION ALL |
||||
SELECT pg_type.oid, atttypid |
||||
FROM pg_attribute |
||||
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid) |
||||
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype) |
||||
WHERE NOT attisdropped |
||||
AND attnum > 0 |
||||
), |
||||
-- Now construct a recursive query which includes a 'depth' element. |
||||
-- This is used to ensure that the "youngest" children are registered before |
||||
-- their parents. |
||||
relationships(parent, child, depth) AS ( |
||||
SELECT DISTINCT 0::OID, selected_types.oid, 0 |
||||
FROM selected_types |
||||
UNION ALL |
||||
SELECT pg_type.oid AS parent, pg_attribute.atttypid AS child, 1 |
||||
FROM selected_classes c |
||||
inner join pg_type ON (c.reltype = pg_type.oid) |
||||
inner join pg_attribute on (c.oid = pg_attribute.attrelid) |
||||
UNION ALL |
||||
SELECT pc.parent, pc.child, relationships.depth + 1 |
||||
FROM pc |
||||
INNER JOIN relationships ON (pc.parent = relationships.child) |
||||
), |
||||
-- composite fields need to be encapsulated as a couple of arrays to provide the required information for registration |
||||
composite AS ( |
||||
SELECT pg_type.oid, ARRAY_AGG(attname ORDER BY attnum) AS attnames, ARRAY_AGG(atttypid ORDER BY ATTNUM) AS atttypids |
||||
FROM pg_attribute |
||||
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid) |
||||
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype) |
||||
WHERE NOT attisdropped |
||||
AND attnum > 0 |
||||
GROUP BY pg_type.oid |
||||
) |
||||
-- Bring together this information, showing all the information which might possibly be required |
||||
-- to complete the registration, applying filters to only show the items which relate to the selected |
||||
-- types/classes. |
||||
SELECT typname, |
||||
pg_namespace.nspname, |
||||
typtype, |
||||
typbasetype, |
||||
typelem, |
||||
pg_type.oid,`) |
||||
if supportsMultirange { |
||||
parts = append(parts, ` |
||||
COALESCE(multirange.rngtypid, 0) AS rngtypid,`) |
||||
} else { |
||||
parts = append(parts, ` |
||||
0 AS rngtypid,`) |
||||
} |
||||
parts = append(parts, ` |
||||
COALESCE(pg_range.rngsubtype, 0) AS rngsubtype, |
||||
attnames, atttypids |
||||
FROM relationships |
||||
INNER JOIN pg_type ON (pg_type.oid = relationships.child) |
||||
LEFT OUTER JOIN pg_range ON (pg_type.oid = pg_range.rngtypid)`) |
||||
if supportsMultirange { |
||||
parts = append(parts, ` |
||||
LEFT OUTER JOIN pg_range multirange ON (pg_type.oid = multirange.rngmultitypid)`) |
||||
} |
||||
|
||||
parts = append(parts, ` |
||||
LEFT OUTER JOIN composite USING (oid) |
||||
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid) |
||||
WHERE NOT (typtype = 'b' AND typelem = 0)`) |
||||
parts = append(parts, ` |
||||
GROUP BY typname, pg_namespace.nspname, typtype, typbasetype, typelem, pg_type.oid, pg_range.rngsubtype,`) |
||||
if supportsMultirange { |
||||
parts = append(parts, ` |
||||
multirange.rngtypid,`) |
||||
} |
||||
parts = append(parts, ` |
||||
attnames, atttypids |
||||
ORDER BY MAX(depth) desc, typname;`) |
||||
return strings.Join(parts, "") |
||||
} |
||||
|
||||
type derivedTypeInfo struct { |
||||
Oid, Typbasetype, Typelem, Rngsubtype, Rngtypid uint32 |
||||
TypeName, Typtype, NspName string |
||||
Attnames []string |
||||
Atttypids []uint32 |
||||
} |
||||
|
||||
// LoadTypes performs a single (complex) query, returning all the required
|
||||
// information to register the named types, as well as any other types directly
|
||||
// or indirectly required to complete the registration.
|
||||
// The result of this call can be passed into RegisterTypes to complete the process.
|
||||
func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) { |
||||
m := c.TypeMap() |
||||
if typeNames == nil || len(typeNames) == 0 { |
||||
return nil, fmt.Errorf("No type names were supplied.") |
||||
} |
||||
|
||||
// Disregard server version errors. This will result in
|
||||
// the SQL not support recent structures such as multirange
|
||||
serverVersion, _ := serverVersion(c) |
||||
sql := buildLoadDerivedTypesSQL(serverVersion, typeNames) |
||||
var rows Rows |
||||
var err error |
||||
if typeNames == nil { |
||||
rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol) |
||||
} else { |
||||
rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames) |
||||
} |
||||
if err != nil { |
||||
return nil, fmt.Errorf("While generating load types query: %w", err) |
||||
} |
||||
defer rows.Close() |
||||
result := make([]*pgtype.Type, 0, 100) |
||||
for rows.Next() { |
||||
ti := derivedTypeInfo{} |
||||
err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("While scanning type information: %w", err) |
||||
} |
||||
var type_ *pgtype.Type |
||||
switch ti.Typtype { |
||||
case "b": // array
|
||||
dt, ok := m.TypeForOID(ti.Typelem) |
||||
if !ok { |
||||
return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName) |
||||
} |
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}} |
||||
case "c": // composite
|
||||
var fields []pgtype.CompositeCodecField |
||||
for i, fieldName := range ti.Attnames { |
||||
dt, ok := m.TypeForOID(ti.Atttypids[i]) |
||||
if !ok { |
||||
return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i]) |
||||
} |
||||
fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt}) |
||||
} |
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}} |
||||
case "d": // domain
|
||||
dt, ok := m.TypeForOID(ti.Typbasetype) |
||||
if !ok { |
||||
return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName) |
||||
} |
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec} |
||||
case "e": // enum
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.EnumCodec{}} |
||||
case "r": // range
|
||||
dt, ok := m.TypeForOID(ti.Rngsubtype) |
||||
if !ok { |
||||
return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName) |
||||
} |
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}} |
||||
case "m": // multirange
|
||||
dt, ok := m.TypeForOID(ti.Rngtypid) |
||||
if !ok { |
||||
return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName) |
||||
} |
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}} |
||||
default: |
||||
return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName) |
||||
} |
||||
if type_ != nil { |
||||
m.RegisterType(type_) |
||||
if ti.NspName != "" { |
||||
nspType := &pgtype.Type{Name: ti.NspName + "." + type_.Name, OID: type_.OID, Codec: type_.Codec} |
||||
m.RegisterType(nspType) |
||||
result = append(result, nspType) |
||||
} |
||||
result = append(result, type_) |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// serverVersion returns the postgresql server version.
|
||||
func serverVersion(c *Conn) (int64, error) { |
||||
serverVersionStr := c.PgConn().ParameterStatus("server_version") |
||||
serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr) |
||||
// if not PostgreSQL do nothing
|
||||
if serverVersionStr == "" { |
||||
return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr) |
||||
} |
||||
|
||||
version, err := strconv.ParseInt(serverVersionStr, 10, 64) |
||||
if err != nil { |
||||
return 0, fmt.Errorf("postgres version parsing failed: %w", err) |
||||
} |
||||
return version, nil |
||||
} |
||||
@ -0,0 +1,198 @@
|
||||
package pgtype |
||||
|
||||
import ( |
||||
"database/sql" |
||||
"database/sql/driver" |
||||
"encoding/xml" |
||||
"fmt" |
||||
"reflect" |
||||
) |
||||
|
||||
type XMLCodec struct { |
||||
Marshal func(v any) ([]byte, error) |
||||
Unmarshal func(data []byte, v any) error |
||||
} |
||||
|
||||
func (*XMLCodec) FormatSupported(format int16) bool { |
||||
return format == TextFormatCode || format == BinaryFormatCode |
||||
} |
||||
|
||||
func (*XMLCodec) PreferredFormat() int16 { |
||||
return TextFormatCode |
||||
} |
||||
|
||||
func (c *XMLCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { |
||||
switch value.(type) { |
||||
case string: |
||||
return encodePlanXMLCodecEitherFormatString{} |
||||
case []byte: |
||||
return encodePlanXMLCodecEitherFormatByteSlice{} |
||||
|
||||
// Cannot rely on driver.Valuer being handled later because anything can be marshalled.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1430
|
||||
//
|
||||
// Check for driver.Valuer must come before xml.Marshaler so that it is guaranteed to be used
|
||||
// when both are implemented https://github.com/jackc/pgx/issues/1805
|
||||
case driver.Valuer: |
||||
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format} |
||||
|
||||
// Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
|
||||
// marshalled.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1681
|
||||
case xml.Marshaler: |
||||
return &encodePlanXMLCodecEitherFormatMarshal{ |
||||
marshal: c.Marshal, |
||||
} |
||||
} |
||||
|
||||
// Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
|
||||
// appropriate wrappers here.
|
||||
for _, f := range []TryWrapEncodePlanFunc{ |
||||
TryWrapDerefPointerEncodePlan, |
||||
TryWrapFindUnderlyingTypeEncodePlan, |
||||
} { |
||||
if wrapperPlan, nextValue, ok := f(value); ok { |
||||
if nextPlan := c.PlanEncode(m, oid, format, nextValue); nextPlan != nil { |
||||
wrapperPlan.SetNext(nextPlan) |
||||
return wrapperPlan |
||||
} |
||||
} |
||||
} |
||||
|
||||
return &encodePlanXMLCodecEitherFormatMarshal{ |
||||
marshal: c.Marshal, |
||||
} |
||||
} |
||||
|
||||
type encodePlanXMLCodecEitherFormatString struct{} |
||||
|
||||
func (encodePlanXMLCodecEitherFormatString) Encode(value any, buf []byte) (newBuf []byte, err error) { |
||||
xmlString := value.(string) |
||||
buf = append(buf, xmlString...) |
||||
return buf, nil |
||||
} |
||||
|
||||
type encodePlanXMLCodecEitherFormatByteSlice struct{} |
||||
|
||||
func (encodePlanXMLCodecEitherFormatByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) { |
||||
xmlBytes := value.([]byte) |
||||
if xmlBytes == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
buf = append(buf, xmlBytes...) |
||||
return buf, nil |
||||
} |
||||
|
||||
type encodePlanXMLCodecEitherFormatMarshal struct { |
||||
marshal func(v any) ([]byte, error) |
||||
} |
||||
|
||||
func (e *encodePlanXMLCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) { |
||||
xmlBytes, err := e.marshal(value) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
buf = append(buf, xmlBytes...) |
||||
return buf, nil |
||||
} |
||||
|
||||
func (c *XMLCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { |
||||
switch target.(type) { |
||||
case *string: |
||||
return scanPlanAnyToString{} |
||||
|
||||
case **string: |
||||
// This is to fix **string scanning. It seems wrong to special case **string, but it's not clear what a better
|
||||
// solution would be.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1470 -- **string
|
||||
// https://github.com/jackc/pgx/issues/1691 -- ** anything else
|
||||
|
||||
if wrapperPlan, nextDst, ok := TryPointerPointerScanPlan(target); ok { |
||||
if nextPlan := m.planScan(oid, format, nextDst); nextPlan != nil { |
||||
if _, failed := nextPlan.(*scanPlanFail); !failed { |
||||
wrapperPlan.SetNext(nextPlan) |
||||
return wrapperPlan |
||||
} |
||||
} |
||||
} |
||||
|
||||
case *[]byte: |
||||
return scanPlanXMLToByteSlice{} |
||||
case BytesScanner: |
||||
return scanPlanBinaryBytesToBytesScanner{} |
||||
|
||||
// Cannot rely on sql.Scanner being handled later because scanPlanXMLToXMLUnmarshal will take precedence.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1418
|
||||
case sql.Scanner: |
||||
return &scanPlanSQLScanner{formatCode: format} |
||||
} |
||||
|
||||
return &scanPlanXMLToXMLUnmarshal{ |
||||
unmarshal: c.Unmarshal, |
||||
} |
||||
} |
||||
|
||||
type scanPlanXMLToByteSlice struct{} |
||||
|
||||
func (scanPlanXMLToByteSlice) Scan(src []byte, dst any) error { |
||||
dstBuf := dst.(*[]byte) |
||||
if src == nil { |
||||
*dstBuf = nil |
||||
return nil |
||||
} |
||||
|
||||
*dstBuf = make([]byte, len(src)) |
||||
copy(*dstBuf, src) |
||||
return nil |
||||
} |
||||
|
||||
type scanPlanXMLToXMLUnmarshal struct { |
||||
unmarshal func(data []byte, v any) error |
||||
} |
||||
|
||||
func (s *scanPlanXMLToXMLUnmarshal) Scan(src []byte, dst any) error { |
||||
if src == nil { |
||||
dstValue := reflect.ValueOf(dst) |
||||
if dstValue.Kind() == reflect.Ptr { |
||||
el := dstValue.Elem() |
||||
switch el.Kind() { |
||||
case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Struct: |
||||
el.Set(reflect.Zero(el.Type())) |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
return fmt.Errorf("cannot scan NULL into %T", dst) |
||||
} |
||||
|
||||
elem := reflect.ValueOf(dst).Elem() |
||||
elem.Set(reflect.Zero(elem.Type())) |
||||
|
||||
return s.unmarshal(src, dst) |
||||
} |
||||
|
||||
func (c *XMLCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { |
||||
if src == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
dstBuf := make([]byte, len(src)) |
||||
copy(dstBuf, src) |
||||
return dstBuf, nil |
||||
} |
||||
|
||||
func (c *XMLCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { |
||||
if src == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
var dst any |
||||
err := c.Unmarshal(src, &dst) |
||||
return dst, err |
||||
} |
||||
@ -0,0 +1,16 @@
|
||||
package puddle |
||||
|
||||
import "time" |
||||
|
||||
// nanotime returns the time in nanoseconds since process start.
|
||||
//
|
||||
// This approach, described at
|
||||
// https://github.com/golang/go/issues/61765#issuecomment-1672090302,
|
||||
// is fast, monotonic, and portable, and avoids the previous
|
||||
// dependence on runtime.nanotime using the (unsafe) linkname hack.
|
||||
// In particular, time.Since does less work than time.Now.
|
||||
func nanotime() int64 { |
||||
return time.Since(globalStart).Nanoseconds() |
||||
} |
||||
|
||||
var globalStart = time.Now() |
||||
@ -1,13 +0,0 @@
|
||||
//go:build purego || appengine || js
|
||||
|
||||
// This file contains the safe implementation of nanotime using time.Now().
|
||||
|
||||
package puddle |
||||
|
||||
import ( |
||||
"time" |
||||
) |
||||
|
||||
func nanotime() int64 { |
||||
return time.Now().UnixNano() |
||||
} |
||||
@ -1,12 +0,0 @@
|
||||
//go:build !purego && !appengine && !js
|
||||
|
||||
// This file contains the implementation of nanotime using runtime.nanotime.
|
||||
|
||||
package puddle |
||||
|
||||
import "unsafe" |
||||
|
||||
var _ = unsafe.Sizeof(0) |
||||
|
||||
//go:linkname nanotime runtime.nanotime
|
||||
func nanotime() int64 |
||||
Loading…
Reference in new issue