Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions VERSION
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
go1.23.11
time 2025-07-02T21:47:15Z
go1.23.12-kb
time 2025-08-01T19:18:26Z
13 changes: 12 additions & 1 deletion src/cmd/cgo/out.go
Original file line number Diff line number Diff line change
Expand Up @@ -943,6 +943,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(gotype, "struct {\n")
off := int64(0)
npad := 0
// the align is at least 1 (for char)
maxAlign := int64(1)
argField := func(typ ast.Expr, namePat string, args ...interface{}) {
name := fmt.Sprintf(namePat, args...)
t := p.cgoType(typ)
Expand All @@ -957,6 +959,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
noSourceConf.Fprint(gotype, fset, typ)
fmt.Fprintf(gotype, "\n")
off += t.Size
// keep track of the maximum alignment among all fields
// so that we can align the struct correctly
if t.Align > maxAlign {
maxAlign = t.Align
}
}
if fn.Recv != nil {
argField(fn.Recv.List[0].Type, "recv")
Expand Down Expand Up @@ -1039,7 +1046,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
// string.h for memset, and is also robust to C++
// types with constructors. Both GCC and LLVM optimize
// this into just zeroing _cgo_a.
fmt.Fprintf(fgcc, "\ttypedef %s %v _cgo_argtype;\n", ctype, p.packedAttribute())
//
// The struct should be aligned to the maximum alignment
// of any of its fields. This to avoid alignment
// issues.
fmt.Fprintf(fgcc, "\ttypedef %s %v __attribute__((aligned(%d))) _cgo_argtype;\n", ctype, p.packedAttribute(), maxAlign)
fmt.Fprintf(fgcc, "\tstatic _cgo_argtype _cgo_zero;\n")
fmt.Fprintf(fgcc, "\t_cgo_argtype _cgo_a = _cgo_zero;\n")
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
Expand Down
2 changes: 0 additions & 2 deletions src/database/sql/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
if rows == nil {
return errors.New("invalid context to convert cursor rows, missing parent *Rows")
}
rows.closemu.Lock()
*d = Rows{
dc: rows.dc,
releaseConn: func(error) {},
Expand All @@ -351,7 +350,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
parentCancel()
}
}
rows.closemu.Unlock()
return nil
}
}
Expand Down
47 changes: 22 additions & 25 deletions src/database/sql/fakedb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
package sql

import (
"bytes"
"context"
"database/sql/driver"
"errors"
Expand All @@ -15,7 +16,6 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
Expand Down Expand Up @@ -91,8 +91,6 @@ func (cc *fakeDriverCtx) OpenConnector(name string) (driver.Connector, error) {
type fakeDB struct {
name string

useRawBytes atomic.Bool

mu sync.Mutex
tables map[string]*table
badConn bool
Expand Down Expand Up @@ -684,8 +682,6 @@ func (c *fakeConn) PrepareContext(ctx context.Context, query string) (driver.Stm
switch cmd {
case "WIPE":
// Nothing
case "USE_RAWBYTES":
c.db.useRawBytes.Store(true)
case "SELECT":
stmt, err = c.prepareSelect(stmt, parts)
case "CREATE":
Expand Down Expand Up @@ -789,9 +785,6 @@ func (s *fakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (d
case "WIPE":
db.wipe()
return driver.ResultNoRows, nil
case "USE_RAWBYTES":
s.c.db.useRawBytes.Store(true)
return driver.ResultNoRows, nil
case "CREATE":
if err := db.createTable(s.table, s.colName, s.colType); err != nil {
return nil, err
Expand Down Expand Up @@ -1076,10 +1069,9 @@ type rowsCursor struct {
errPos int
err error

// a clone of slices to give out to clients, indexed by the
// original slice's first byte address. we clone them
// just so we're able to corrupt them on close.
bytesClone map[*byte][]byte
// Data returned to clients.
// We clone and stash it here so it can be invalidated by Close and Next.
driverOwnedMemory [][]byte

// Every operation writes to line to enable the race detector
// check for data races.
Expand All @@ -1096,9 +1088,19 @@ func (rc *rowsCursor) touchMem() {
rc.line++
}

func (rc *rowsCursor) invalidateDriverOwnedMemory() {
for _, buf := range rc.driverOwnedMemory {
for i := range buf {
buf[i] = 'x'
}
}
rc.driverOwnedMemory = nil
}

func (rc *rowsCursor) Close() error {
rc.touchMem()
rc.parentMem.touchMem()
rc.invalidateDriverOwnedMemory()
rc.closed = true
return rc.closeErr
}
Expand Down Expand Up @@ -1129,27 +1131,22 @@ func (rc *rowsCursor) Next(dest []driver.Value) error {
if rc.posRow >= len(rc.rows[rc.posSet]) {
return io.EOF // per interface spec
}
// Corrupt any previously returned bytes.
rc.invalidateDriverOwnedMemory()
for i, v := range rc.rows[rc.posSet][rc.posRow].cols {
// TODO(bradfitz): convert to subset types? naah, I
// think the subset types should only be input to
// driver, but the sql package should be able to handle
// a wider range of types coming out of drivers. all
// for ease of drivers, and to prevent drivers from
// messing up conversions or doing them differently.
dest[i] = v

if bs, ok := v.([]byte); ok && !rc.db.useRawBytes.Load() {
if rc.bytesClone == nil {
rc.bytesClone = make(map[*byte][]byte)
}
clone, ok := rc.bytesClone[&bs[0]]
if !ok {
clone = make([]byte, len(bs))
copy(clone, bs)
rc.bytesClone[&bs[0]] = clone
}
dest[i] = clone
if bs, ok := v.([]byte); ok {
// Clone []bytes and stash for later invalidation.
bs = bytes.Clone(bs)
rc.driverOwnedMemory = append(rc.driverOwnedMemory, bs)
v = bs
}
dest[i] = v
}
return nil
}
Expand Down
26 changes: 12 additions & 14 deletions src/database/sql/sql.go
Original file line number Diff line number Diff line change
Expand Up @@ -3360,38 +3360,36 @@ func (rs *Rows) Scan(dest ...any) error {
// without calling Next.
return fmt.Errorf("sql: Scan called without calling Next (closemuScanHold)")
}

rs.closemu.RLock()
rs.raw = rs.raw[:0]
err := rs.scanLocked(dest...)
if err == nil && scanArgsContainRawBytes(dest) {
rs.closemuScanHold = true
} else {
rs.closemu.RUnlock()
}
return err
}

func (rs *Rows) scanLocked(dest ...any) error {
if rs.lasterr != nil && rs.lasterr != io.EOF {
rs.closemu.RUnlock()
return rs.lasterr
}
if rs.closed {
err := rs.lasterrOrErrLocked(errRowsClosed)
rs.closemu.RUnlock()
return err
}

if scanArgsContainRawBytes(dest) {
rs.closemuScanHold = true
rs.raw = rs.raw[:0]
} else {
rs.closemu.RUnlock()
return rs.lasterrOrErrLocked(errRowsClosed)
}

if rs.lastcols == nil {
rs.closemuRUnlockIfHeldByScan()
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
rs.closemuRUnlockIfHeldByScan()
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}

for i, sv := range rs.lastcols {
err := convertAssignRows(dest[i], sv, rs)
if err != nil {
rs.closemuRUnlockIfHeldByScan()
return fmt.Errorf(`sql: Scan error on column index %d, name %q: %w`, i, rs.rowsi.Columns()[i], err)
}
}
Expand Down
64 changes: 56 additions & 8 deletions src/database/sql/sql_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
package sql

import (
"bytes"
"context"
"database/sql/driver"
"errors"
Expand Down Expand Up @@ -4446,10 +4447,6 @@ func testContextCancelDuringRawBytesScan(t *testing.T, mode string) {
db := newTestDB(t, "people")
defer closeDB(t, db)

if _, err := db.Exec("USE_RAWBYTES"); err != nil {
t.Fatal(err)
}

// cancel used to call close asynchronously.
// This test checks that it waits so as not to interfere with RawBytes.
ctx, cancel := context.WithCancel(context.Background())
Expand Down Expand Up @@ -4541,6 +4538,61 @@ func TestContextCancelBetweenNextAndErr(t *testing.T) {
}
}

type testScanner struct {
scanf func(src any) error
}

func (ts testScanner) Scan(src any) error { return ts.scanf(src) }

func TestContextCancelDuringScan(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

scanStart := make(chan any)
scanEnd := make(chan error)
scanner := &testScanner{
scanf: func(src any) error {
scanStart <- src
return <-scanEnd
},
}

// Start a query, and pause it mid-scan.
want := []byte("Alice")
r, err := db.QueryContext(ctx, "SELECT|people|name|name=?", string(want))
if err != nil {
t.Fatal(err)
}
if !r.Next() {
t.Fatalf("r.Next() = false, want true")
}
go func() {
r.Scan(scanner)
}()
got := <-scanStart
defer close(scanEnd)
gotBytes, ok := got.([]byte)
if !ok {
t.Fatalf("r.Scan returned %T, want []byte", got)
}
if !bytes.Equal(gotBytes, want) {
t.Fatalf("before cancel: r.Scan returned %q, want %q", gotBytes, want)
}

// Cancel the query.
// Sleep to give it a chance to finish canceling.
cancel()
time.Sleep(10 * time.Millisecond)

// Cancelling the query should not have changed the result.
if !bytes.Equal(gotBytes, want) {
t.Fatalf("after cancel: r.Scan result is now %q, want %q", gotBytes, want)
}
}

func TestNilErrorAfterClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
Expand Down Expand Up @@ -4574,10 +4626,6 @@ func TestRawBytesReuse(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)

if _, err := db.Exec("USE_RAWBYTES"); err != nil {
t.Fatal(err)
}

var raw RawBytes

// The RawBytes in this query aliases driver-owned memory.
Expand Down