diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000000..e6644419e6a038 --- /dev/null +++ b/VERSION @@ -0,0 +1,2 @@ +go1.23.11 +time 2025-07-02T21:47:15Z diff --git a/codereview.cfg b/codereview.cfg index 77a74f108eae36..3cf4bb2dd376de 100644 --- a/codereview.cfg +++ b/codereview.cfg @@ -1 +1,2 @@ -branch: master +branch: release-branch.go1.23 +parent-branch: master diff --git a/doc/godebug.md b/doc/godebug.md index b3a43664c42cd4..9d3c810cdc227d 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -209,6 +209,10 @@ when serving an error. This behavior is controlled by the [`httpservecontentkeepheaders` setting](/pkg/net/http#ServeContent). Using `httpservecontentkeepheaders=1` restores the pre-Go 1.23 behavior. +Go 1.23.11 disabled build information stamping when multiple VCS are detected due +to concerns around VCS injection attacks. This behavior can be renabled with the +setting `allowmultiplevcs=1`. + ### Go 1.22 Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index 6c23e59adf19eb..be93c4a24bb566 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -2579,6 +2579,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit int type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte int type - %s", lineno(pos), t.Size, dtype) @@ -2595,9 +2600,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.PtrType: @@ -2826,6 +2830,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit uint type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte uint type - %s", lineno(pos), t.Size, dtype) @@ -2842,9 +2851,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.VoidType: @@ -3110,10 +3118,11 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct } // Round off up to talign, assumed to be a power of 2. + origOff := off off = (off + talign - 1) &^ (talign - 1) if f.ByteOffset > off { - fld, sizes = c.pad(fld, sizes, f.ByteOffset-off) + fld, sizes = c.pad(fld, sizes, f.ByteOffset-origOff) off = f.ByteOffset } if f.ByteOffset < off { diff --git a/src/cmd/cgo/internal/test/cgo_test.go b/src/cmd/cgo/internal/test/cgo_test.go index 5e02888b3dddd9..5393552e07a4d1 100644 --- a/src/cmd/cgo/internal/test/cgo_test.go +++ b/src/cmd/cgo/internal/test/cgo_test.go @@ -70,6 +70,7 @@ func Test31891(t *testing.T) { test31891(t) } func Test42018(t *testing.T) { test42018(t) } func Test45451(t *testing.T) { test45451(t) } func Test49633(t *testing.T) { test49633(t) } +func Test69086(t *testing.T) { test69086(t) } func TestAlign(t *testing.T) { testAlign(t) } func TestAtol(t *testing.T) { testAtol(t) } func TestBlocking(t *testing.T) { testBlocking(t) } diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go index 374689631d77ab..362be79a737bee 100644 --- a/src/cmd/cgo/internal/test/test.go +++ b/src/cmd/cgo/internal/test/test.go @@ -940,6 +940,19 @@ typedef struct { } issue67517struct; static void issue67517(issue67517struct* p) {} +// Issue 69086. +// GCC added the __int128 type in GCC 4.6, released in 2011. +typedef struct { + int a; +#ifdef __SIZEOF_INT128__ + unsigned __int128 b; +#else + uint64_t b; +#endif + unsigned char c; +} issue69086struct; +static int issue690861(issue69086struct* p) { p->b = 1234; return p->c; } +static int issue690862(unsigned long ul1, unsigned long ul2, unsigned int u, issue69086struct s) { return (int)(s.b); } */ import "C" @@ -2349,3 +2362,24 @@ func issue67517() { b: nil, }) } + +// Issue 69086. +func test69086(t *testing.T) { + var s C.issue69086struct + + typ := reflect.TypeOf(s) + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + t.Logf("field %d: name %s size %d align %d offset %d", i, f.Name, f.Type.Size(), f.Type.Align(), f.Offset) + } + + s.c = 1 + got := C.issue690861(&s) + if got != 1 { + t.Errorf("field: got %d, want 1", got) + } + got = C.issue690862(1, 2, 3, s) + if got != 1234 { + t.Errorf("call: got %d, want 1234", got) + } +} diff --git a/src/cmd/cgo/internal/testcarchive/carchive_test.go b/src/cmd/cgo/internal/testcarchive/carchive_test.go index a8eebead25dc9f..c263b82d5768f4 100644 --- a/src/cmd/cgo/internal/testcarchive/carchive_test.go +++ b/src/cmd/cgo/internal/testcarchive/carchive_test.go @@ -33,7 +33,7 @@ import ( "unicode" ) -var globalSkip = func(t *testing.T) {} +var globalSkip = func(t testing.TB) {} // Program to run. var bin []string @@ -59,12 +59,12 @@ func TestMain(m *testing.M) { func testMain(m *testing.M) int { if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { - globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") } return m.Run() } if runtime.GOOS == "linux" { if _, err := os.Stat("/etc/alpine-release"); err == nil { - globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } + globalSkip = func(t testing.TB) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } return m.Run() } } @@ -1291,8 +1291,8 @@ func TestPreemption(t *testing.T) { } } -// Issue 59294. Test calling Go function from C after using some -// stack space. +// Issue 59294 and 68285. Test calling Go function from C after with +// various stack space. func TestDeepStack(t *testing.T) { globalSkip(t) testenv.MustHaveGoBuild(t) @@ -1350,6 +1350,53 @@ func TestDeepStack(t *testing.T) { } } +func BenchmarkCgoCallbackMainThread(b *testing.B) { + // Benchmark for calling into Go fron C main thread. + // See issue #68587. + // + // It uses a subprocess, which is a C binary that calls + // Go on the main thread b.N times. There is some overhead + // for launching the subprocess. It is probably fine when + // b.N is large. + + globalSkip(b) + testenv.MustHaveGoBuild(b) + testenv.MustHaveCGO(b) + testenv.MustHaveBuildMode(b, "c-archive") + + if !testWork { + defer func() { + os.Remove("testp10" + exeSuffix) + os.Remove("libgo10.a") + os.Remove("libgo10.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo10.a", "./libgo10") + out, err := cmd.CombinedOutput() + b.Logf("%v\n%s", cmd.Args, out) + if err != nil { + b.Fatal(err) + } + + ccArgs := append(cc, "-o", "testp10"+exeSuffix, "main10.c", "libgo10.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + b.Logf("%v\n%s", ccArgs, out) + if err != nil { + b.Fatal(err) + } + + argv := cmdToRun("./testp10") + argv = append(argv, fmt.Sprint(b.N)) + cmd = exec.Command(argv[0], argv[1:]...) + + b.ResetTimer() + err = cmd.Run() + if err != nil { + b.Fatal(err) + } +} + func TestSharedObject(t *testing.T) { // Test that we can put a Go c-archive into a C shared object. globalSkip(t) diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go new file mode 100644 index 00000000000000..803a0fa5f1cb35 --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export GoF +func GoF() {} + +func main() {} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go index acb08d90ecd5bf..3528bef654ddb3 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go @@ -6,9 +6,29 @@ package main import "runtime" +// extern void callGoWithVariousStack(int); import "C" func main() {} //export GoF -func GoF() { runtime.GC() } +func GoF(p int32) { + runtime.GC() + if p != 0 { + panic("panic") + } +} + +//export callGoWithVariousStackAndGoFrame +func callGoWithVariousStackAndGoFrame(p int32) { + if p != 0 { + defer func() { + e := recover() + if e == nil { + panic("did not panic") + } + runtime.GC() + }() + } + C.callGoWithVariousStack(C.int(p)); +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main10.c b/src/cmd/cgo/internal/testcarchive/testdata/main10.c new file mode 100644 index 00000000000000..53c3c83a99e35c --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/main10.c @@ -0,0 +1,22 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include + +#include "libgo10.h" + +int main(int argc, char **argv) { + int n, i; + + if (argc != 2) { + perror("wrong arg"); + return 2; + } + n = atoi(argv[1]); + for (i = 0; i < n; i++) + GoF(); + + return 0; +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main9.c b/src/cmd/cgo/internal/testcarchive/testdata/main9.c index 95ad4dea49fb1a..e641d8a8027a5f 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/main9.c +++ b/src/cmd/cgo/internal/testcarchive/testdata/main9.c @@ -6,19 +6,27 @@ void use(int *x) { (*x)++; } -void callGoFWithDeepStack() { +void callGoFWithDeepStack(int p) { int x[10000]; use(&x[0]); use(&x[9999]); - GoF(); + GoF(p); use(&x[0]); use(&x[9999]); } +void callGoWithVariousStack(int p) { + GoF(0); // call GoF without using much stack + callGoFWithDeepStack(p); // call GoF with a deep stack + GoF(0); // again on a shallow stack +} + int main() { - GoF(); // call GoF without using much stack - callGoFWithDeepStack(); // call GoF with a deep stack + callGoWithVariousStack(0); + + callGoWithVariousStackAndGoFrame(0); // normal execution + callGoWithVariousStackAndGoFrame(1); // panic and recover } diff --git a/src/cmd/cgo/internal/testsanitizers/asan_test.go b/src/cmd/cgo/internal/testsanitizers/asan_test.go index 7db356244a85a8..20f6ef2d242f1b 100644 --- a/src/cmd/cgo/internal/testsanitizers/asan_test.go +++ b/src/cmd/cgo/internal/testsanitizers/asan_test.go @@ -65,6 +65,8 @@ func TestASAN(t *testing.T) { {src: "asan_global3_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global3_fail.go:13"}, {src: "asan_global4_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global4_fail.go:21"}, {src: "asan_global5.go"}, + {src: "asan_global_asm"}, + {src: "asan_global_asm2_fail", memoryAccessError: "global-buffer-overflow", errorLocation: "main.go:17"}, {src: "arena_fail.go", memoryAccessError: "use-after-poison", errorLocation: "arena_fail.go:26", experiments: []string{"arenas"}}, } for _, tc := range cases { diff --git a/src/cmd/cgo/internal/testsanitizers/cc_test.go b/src/cmd/cgo/internal/testsanitizers/cc_test.go index e650de835ab42e..c493597ded898a 100644 --- a/src/cmd/cgo/internal/testsanitizers/cc_test.go +++ b/src/cmd/cgo/internal/testsanitizers/cc_test.go @@ -536,7 +536,7 @@ func (c *config) checkRuntime() (skip bool, err error) { // srcPath returns the path to the given file relative to this test's source tree. func srcPath(path string) string { - return filepath.Join("testdata", path) + return "./testdata/" + path } // A tempDir manages a temporary directory within a test. diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s new file mode 100644 index 00000000000000..b4b9766f57ac60 --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s @@ -0,0 +1,8 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +DATA ·x(SB)/8, $123 +GLOBL ·x(SB), NOPTR, $8 diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go new file mode 100644 index 00000000000000..2ae54486f34c2b --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go @@ -0,0 +1,11 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var x uint64 + +func main() { + println(x) +} diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s new file mode 100644 index 00000000000000..b4b9766f57ac60 --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s @@ -0,0 +1,8 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +DATA ·x(SB)/8, $123 +GLOBL ·x(SB), NOPTR, $8 diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go new file mode 100644 index 00000000000000..2d02a1b5426179 --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go @@ -0,0 +1,20 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "unsafe" + +var x uint64 + +func main() { + bar(&x) +} + +func bar(a *uint64) { + p := (*uint64)(unsafe.Add(unsafe.Pointer(a), 1*unsafe.Sizeof(uint64(1)))) + if *p == 10 { // BOOM + println("its value is 10") + } +} diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index 2675a16a241fe3..32f5a771a34a66 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,10 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." - // TODO(mdempsky): Better way to recognize this. - fn := f.Sym().Name - cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' + for p := c.ClosureParent; p != nil; p = p.ClosureParent { + if p == f { + return true + } + } + return false } diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go index 7fe4445dad7638..ffeddea0c9d588 100644 --- a/src/cmd/compile/internal/importer/gcimporter_test.go +++ b/src/cmd/compile/internal/importer/gcimporter_test.go @@ -582,6 +582,23 @@ func TestIssue25596(t *testing.T) { compileAndImportPkg(t, "issue25596") } +func TestIssue70394(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + pkg := compileAndImportPkg(t, "alias") + obj := lookupObj(t, pkg.Scope(), "A") + + typ := obj.Type() + if _, ok := typ.(*types2.Alias); !ok { + t.Fatalf("type of %s is %s, wanted an alias", obj, typ) + } +} + func importPkg(t *testing.T, path, srcDir string) *types2.Package { pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil) if err != nil { diff --git a/src/cmd/compile/internal/importer/testdata/alias.go b/src/cmd/compile/internal/importer/testdata/alias.go new file mode 100644 index 00000000000000..51492fc943ea0c --- /dev/null +++ b/src/cmd/compile/internal/importer/testdata/alias.go @@ -0,0 +1,7 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testdata + +type A = int32 diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go index d3c7d4516f7ee8..9d267e6db411c0 100644 --- a/src/cmd/compile/internal/importer/ureader.go +++ b/src/cmd/compile/internal/importer/ureader.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/syntax" "cmd/compile/internal/types2" "cmd/internal/src" + "internal/buildcfg" "internal/pkgbits" ) @@ -28,11 +29,9 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input pr := pkgReader{ PkgDecoder: input, - ctxt: ctxt, - imports: imports, - // Currently, the compiler panics when using Alias types. - // TODO(gri) set to true once this is fixed (issue #66873) - enableAlias: false, + ctxt: ctxt, + imports: imports, + enableAlias: true, posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)), pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)), @@ -411,6 +410,14 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) { panic("weird") case pkgbits.ObjAlias: + if buildcfg.Experiment.AliasTypeParams && len(r.dict.bounds) > 0 { + // Temporary work-around for issue #68526: rather than panicking + // with an non-descriptive index-out-of-bounds panic when trying + // to access a missing type parameter, instead panic with a more + // descriptive error. Only needed for Go 1.23; Go 1.24 will have + // the correct implementation. + panic("importing generic type aliases is not supported in Go 1.23 (see issue #68526)") + } pos := r.pos() typ := r.typ() return newAliasTypeName(pr.enableAlias, pos, objPkg, objName, typ) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index d0c8ee359befff..4fa9055b4b2c0b 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -51,6 +51,8 @@ import ( // the generated ODCLFUNC, but there is no // pointer from the Func back to the OMETHVALUE. type Func struct { + // if you add or remove a field, don't forget to update sizeof_test.go + miniNode Body Nodes @@ -76,6 +78,9 @@ type Func struct { // Populated during walk. Closures []*Func + // Parent of a closure + ClosureParent *Func + // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th // scope's parent is stored at Parents[i-1]. @@ -512,6 +517,7 @@ func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, fn.Nname.Defn = fn pkg.Funcs = append(pkg.Funcs, fn) + fn.ClosureParent = outerfn return fn } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 68d2865595b716..6331cceb4a59b4 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 176, 296}, + {Func{}, 180, 304}, {Name{}, 96, 168}, } diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go index 97ab254395332a..e488c3cf377cae 100644 --- a/src/cmd/compile/internal/rangefunc/rangefunc_test.go +++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go @@ -2099,3 +2099,27 @@ func TestTwoLevelReturnCheck(t *testing.T) { t.Errorf("Expected y=3, got y=%d\n", y) } } + +func Bug70035(s1, s2, s3 []string) string { + var c1 string + for v1 := range slices.Values(s1) { + var c2 string + for v2 := range slices.Values(s2) { + var c3 string + for v3 := range slices.Values(s3) { + c3 = c3 + v3 + } + c2 = c2 + v2 + c3 + } + c1 = c1 + v1 + c2 + } + return c1 +} + +func Test70035(t *testing.T) { + got := Bug70035([]string{"1", "2", "3"}, []string{"a", "b", "c"}, []string{"A", "B", "C"}) + want := "1aABCbABCcABC2aABCbABCcABC3aABCbABCcABC" + if got != want { + t.Errorf("got %v, want %v", got, want) + } +} diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 606171947bbd75..67344d42ebb7e8 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -885,8 +885,8 @@ func init() { inputs: []regMask{buildReg("DI")}, clobbers: buildReg("DI"), }, - faultOnNilArg0: true, - unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts + //faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point + unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts }, // arg0 = address of memory to zero @@ -923,10 +923,10 @@ func init() { inputs: []regMask{buildReg("DI"), buildReg("SI")}, clobbers: buildReg("DI SI X0"), // uses X0 as a temporary }, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts + clobberFlags: true, + //faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point + //faultOnNilArg1: true, + unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts }, // arg0 = destination pointer diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go index fa18b674cc005c..55bec6bfa8696a 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go @@ -536,8 +536,8 @@ func init() { inputs: []regMask{buildReg("R20")}, clobbers: buildReg("R16 R17 R20 R30"), }, - faultOnNilArg0: true, - unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts + //faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point + unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts }, // large zeroing @@ -577,9 +577,9 @@ func init() { inputs: []regMask{buildReg("R21"), buildReg("R20")}, clobbers: buildReg("R16 R17 R20 R21 R26 R30"), }, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts + //faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point + //faultOnNilArg1: true, + unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts }, // large move diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 62472cc94eb8a5..f99425d26f81bc 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -41,11 +41,12 @@ type Func struct { ABISelf *abi.ABIConfig // ABI for function being compiled ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions. - scheduled bool // Values in Blocks are in final order - laidout bool // Blocks are ordered - NoSplit bool // true if function is marked as nosplit. Used by schedule check pass. - dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName) - IsPgoHot bool + scheduled bool // Values in Blocks are in final order + laidout bool // Blocks are ordered + NoSplit bool // true if function is marked as nosplit. Used by schedule check pass. + dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName) + IsPgoHot bool + HasDeferRangeFunc bool // if true, needs a deferreturn so deferrangefunc can use it for recover() return PC // when register allocation is done, maps value ids to locations RegAlloc []Location diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 847d62c0a5910a..ae5fe2f1ca41ed 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -13649,11 +13649,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ {0, 128}, // DI @@ -13723,13 +13722,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ {0, 128}, // DI @@ -22619,11 +22616,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ {0, 1048576}, // R20 @@ -22645,12 +22641,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ {0, 2097152}, // R21 diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index 1caccb7c18d3c4..71acefbf8ae526 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -252,6 +252,7 @@ func writebarrier(f *Func) { var start, end int var nonPtrStores int values := b.Values + hasMove := false FindSeq: for i := len(values) - 1; i >= 0; i-- { w := values[i] @@ -263,6 +264,9 @@ func writebarrier(f *Func) { end = i + 1 } nonPtrStores = 0 + if w.Op == OpMoveWB { + hasMove = true + } case OpVarDef, OpVarLive: continue case OpStore: @@ -273,6 +277,17 @@ func writebarrier(f *Func) { if nonPtrStores > 2 { break FindSeq } + if hasMove { + // We need to ensure that this store happens + // before we issue a wbMove, as the wbMove might + // use the result of this store as its source. + // Even though this store is not write-barrier + // eligible, it might nevertheless be the store + // of a pointer to the stack, which is then the + // source of the move. + // See issue 71228. + break FindSeq + } default: if last == nil { continue diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 26d236dcacafb4..85e3e8ac9fe459 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -5390,6 +5390,9 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExt callABI = s.f.ABI1 } } + if fn := n.Fun.Sym().Name; n.Fun.Sym().Pkg == ir.Pkgs.Runtime && fn == "deferrangefunc" { + s.f.HasDeferRangeFunc = true + } break } closure = s.expr(fn) @@ -7513,10 +7516,13 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // nop (which will never execute) after the call. Arch.Ginsnop(s.pp) } - if openDeferInfo != nil { + if openDeferInfo != nil || f.HasDeferRangeFunc { // When doing open-coded defers, generate a disconnected call to // deferreturn and a return. This will be used to during panic // recovery to unwind the stack and return back to the runtime. + // + // deferrangefunc needs to be sure that at least one of these exists; + // if all returns are dead-code eliminated, there might not be. s.pp.NextLive = s.livenessMap.DeferReturn p := s.pp.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go index 5148d5db034142..07f35b1854acaf 100644 --- a/src/cmd/compile/internal/types2/alias.go +++ b/src/cmd/compile/internal/types2/alias.go @@ -134,10 +134,10 @@ func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { // newAliasInstance creates a new alias instance for the given origin and type // arguments, recording pos as the position of its synthetic object (for error // reporting). -func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type, ctxt *Context) *Alias { +func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type, expanding *Named, ctxt *Context) *Alias { assert(len(targs) > 0) obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil) - rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), nil, ctxt) + rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), expanding, ctxt) res := check.newAlias(obj, rhs) res.orig = orig res.tparams = orig.tparams diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 5126ac51116cd9..a6b105ace5cc33 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2898,22 +2898,48 @@ func TestFileVersions(t *testing.T) { fileVersion string wantVersion string }{ - {"", "", ""}, // no versions specified - {"go1.19", "", "go1.19"}, // module version specified - {"", "go1.20", ""}, // file upgrade ignored - {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted - {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"", "", ""}, // no versions specified + {"go1.19", "", "go1.19"}, // module version specified + {"", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "", "go1"}, // no file version specified + {"go1", "goo1.22", "go1"}, // invalid file version specified + {"go1", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.19", "", "go1.19"}, // no file version specified + {"go1.19", "goo1.22", "go1.19"}, // invalid file version specified + {"go1.19", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.19", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.19", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.20", "", "go1.20"}, // no file version specified + {"go1.20", "goo1.22", "go1.20"}, // invalid file version specified + {"go1.20", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.20", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.21", "", "go1.21"}, // no file version specified + {"go1.21", "goo1.22", "go1.21"}, // invalid file version specified + {"go1.21", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.21", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.22", "", "go1.22"}, // no file version specified + {"go1.22", "goo1.22", "go1.22"}, // invalid file version specified + {"go1.22", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.22", "go1.22", "go1.22"}, // file version specified above 1.21 // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified - {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored - {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted - {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.20.1", "go1.19.1", "go1.20.1"}, // invalid file version + {"go1.20.1", "go1.21.1", "go1.20.1"}, // invalid file version + {"go1.21.1", "go1.19.1", "go1.21.1"}, // invalid file version + {"go1.21.1", "go1.21.1", "go1.21.1"}, // invalid file version + {"go1.22.1", "go1.19.1", "go1.22.1"}, // invalid file version + {"go1.22.1", "go1.21.1", "go1.22.1"}, // invalid file version } { var src string if test.fileVersion != "" { diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 91ad474e9df315..ada421ba939ed4 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -327,7 +327,6 @@ func (check *Checker) initFiles(files []*syntax.File) { check.errorf(files[0], TooNew, "package requires newer Go version %v (application built with %v)", check.version, go_current) } - downgradeOk := check.version.cmp(go1_21) >= 0 // determine Go version for each file for _, file := range check.files { @@ -336,33 +335,18 @@ func (check *Checker) initFiles(files []*syntax.File) { // unlike file versions which are Go language versions only, if valid.) v := check.conf.GoVersion - fileVersion := asGoVersion(file.GoVersion) - if fileVersion.isValid() { - // use the file version, if applicable - // (file versions are either the empty string or of the form go1.dd) - if pkgVersionOk { - cmp := fileVersion.cmp(check.version) - // Go 1.21 introduced the feature of setting the go.mod - // go line to an early version of Go and allowing //go:build lines - // to “upgrade” (cmp > 0) the Go version in a given file. - // We can do that backwards compatibly. - // - // Go 1.21 also introduced the feature of allowing //go:build lines - // to “downgrade” (cmp < 0) the Go version in a given file. - // That can't be done compatibly in general, since before the - // build lines were ignored and code got the module's Go version. - // To work around this, downgrades are only allowed when the - // module's Go version is Go 1.21 or later. - // - // If there is no valid check.version, then we don't really know what - // Go version to apply. - // Legacy tools may do this, and they historically have accepted everything. - // Preserve that behavior by ignoring //go:build constraints entirely in that - // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { - v = file.GoVersion - } - } + // If the file specifies a version, use max(fileVersion, go1.21). + if fileVersion := asGoVersion(file.GoVersion); fileVersion.isValid() { + // Go 1.21 introduced the feature of allowing //go:build lines + // to sometimes set the Go version in a given file. Versions Go 1.21 and later + // can be set backwards compatibly as that was the first version + // files with go1.21 or later build tags could be built with. + // + // Set the version to max(fileVersion, go1.21): That will allow a + // downgrade to a version before go1.22, where the for loop semantics + // change was made, while being backwards compatible with versions of + // go before the new //go:build semantics were introduced. + v = string(versionMax(fileVersion, go1_21)) // Report a specific error for each tagged file that's too new. // (Normally the build system will have filtered files by version, @@ -377,6 +361,13 @@ func (check *Checker) initFiles(files []*syntax.File) { } } +func versionMax(a, b goVersion) goVersion { + if a.cmp(b) > 0 { + return a + } + return b +} + // A bailout panic is used for early termination. type bailout struct{} diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go index 72227ab12256dd..308d1f550ad4fa 100644 --- a/src/cmd/compile/internal/types2/instantiate.go +++ b/src/cmd/compile/internal/types2/instantiate.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/syntax" "errors" "fmt" + "internal/buildcfg" . "internal/types/errors" ) @@ -126,8 +127,9 @@ func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, e res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily case *Alias: - // TODO(gri) is this correct? - assert(expanding == nil) // Alias instances cannot be reached from Named types + if !buildcfg.Experiment.AliasTypeParams { + assert(expanding == nil) // Alias instances cannot be reached from Named types + } tparams := orig.TypeParams() // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) @@ -138,7 +140,7 @@ func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, e return orig // nothing to do (minor optimization) } - return check.newAliasInstance(pos, orig, targs, ctxt) + return check.newAliasInstance(pos, orig, targs, expanding, ctxt) case *Signature: assert(expanding == nil) // function instances cannot be reached from Named types diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go index 20e3f52facd9de..b339def7354e28 100644 --- a/src/cmd/compile/internal/types2/issues_test.go +++ b/src/cmd/compile/internal/types2/issues_test.go @@ -1121,3 +1121,23 @@ func f(x int) { t.Errorf("got: %s want: %s", got, want) } } + +func TestIssue68877(t *testing.T) { + const src = ` +package p + +type ( + S struct{} + A = S + T A +)` + + conf := Config{EnableAlias: true} + pkg := mustTypecheck(src, &conf, nil) + T := pkg.Scope().Lookup("T").(*TypeName) + got := T.String() // this must not panic (was issue) + const want = "type p.T struct{}" + if got != want { + t.Errorf("got %s, want %s", got, want) + } +} diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go index 1859b27aa4edfb..02b5ecf1669ea5 100644 --- a/src/cmd/compile/internal/types2/named.go +++ b/src/cmd/compile/internal/types2/named.go @@ -282,7 +282,7 @@ func (t *Named) cleanup() { if t.TypeArgs().Len() == 0 { panic("nil underlying") } - case *Named: + case *Named, *Alias: t.under() // t.under may add entries to check.cleaners } t.check = nil diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go index 650ae846a61e85..7c4cd732501e43 100644 --- a/src/cmd/compile/internal/types2/subst.go +++ b/src/cmd/compile/internal/types2/subst.go @@ -115,7 +115,7 @@ func (subst *subster) typ(typ Type) Type { // that has a type argument for it. targs, updated := subst.typeList(t.TypeArgs().list()) if updated { - return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.ctxt) + return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.expanding, subst.ctxt) } case *Array: diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index 0457502e393942..a2d9e42c615ca4 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -131,8 +131,8 @@ func (s *_TypeSet) underIs(f func(Type) bool) bool { } for _, t := range s.terms { assert(t.typ != nil) - // x == under(x) for ~x terms - u := t.typ + // Unalias(x) == under(x) for ~x terms + u := Unalias(t.typ) if !t.tilde { u = under(u) } diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 49f02012d3103a..484fef03d10a16 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -9,7 +9,7 @@ require ( golang.org/x/mod v0.19.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.22.0 - golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 + golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 golang.org/x/term v0.20.0 golang.org/x/tools v0.22.1-0.20240618181713-f2d2ebe43e72 ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index ee671f95122344..919dbd2dc74c74 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -16,8 +16,8 @@ golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 h1:+bltxAtk8YFEQ61B/lcYQM8e+7XjLwSDbpspVaVYkz8= -golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701/go.mod h1:amNmu/SBSm2GAF3X+9U2C0epLocdh+r5Z+7oMYO5cLM= +golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 h1:Lj8KbuZmoFUbI6pQ28G3Diz/5bRYD2UY5vfAmhrLZWo= +golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147/go.mod h1:amNmu/SBSm2GAF3X+9U2C0epLocdh+r5Z+7oMYO5cLM= golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 7c402b419ea0ca..d45b29659942e6 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -2409,7 +2409,6 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) { var repoDir string var vcsCmd *vcs.Cmd var err error - const allowNesting = true wantVCS := false switch cfg.BuildBuildvcs { @@ -2429,7 +2428,7 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) { // (so the bootstrap toolchain packages don't even appear to be in GOROOT). goto omitVCS } - repoDir, vcsCmd, err = vcs.FromDir(base.Cwd(), "", allowNesting) + repoDir, vcsCmd, err = vcs.FromDir(base.Cwd(), "") if err != nil && !errors.Is(err, os.ErrNotExist) { setVCSError(err) return @@ -2452,10 +2451,11 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) { } if repoDir != "" && vcsCmd.Status != nil { // Check that the current directory, package, and module are in the same - // repository. vcs.FromDir allows nested Git repositories, but nesting - // is not allowed for other VCS tools. The current directory may be outside - // p.Module.Dir when a workspace is used. - pkgRepoDir, _, err := vcs.FromDir(p.Dir, "", allowNesting) + // repository. vcs.FromDir disallows nested VCS and multiple VCS in the + // same repository, unless the GODEBUG allowmultiplevcs is set. The + // current directory may be outside p.Module.Dir when a workspace is + // used. + pkgRepoDir, _, err := vcs.FromDir(p.Dir, "") if err != nil { setVCSError(err) return @@ -2467,7 +2467,7 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) { } goto omitVCS } - modRepoDir, _, err := vcs.FromDir(p.Module.Dir, "", allowNesting) + modRepoDir, _, err := vcs.FromDir(p.Module.Dir, "") if err != nil { setVCSError(err) return diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go index bab4c5ebbedba4..9996be7af7e424 100644 --- a/src/cmd/go/internal/modfetch/codehost/git.go +++ b/src/cmd/go/internal/modfetch/codehost/git.go @@ -662,7 +662,21 @@ func (r *gitRepo) statLocal(ctx context.Context, version, rev string) (*RevInfo, } } } - sort.Strings(info.Tags) + + // Git 2.47.1 does not send the tags during shallow clone anymore + // (perhaps the exact version that changed behavior is an earlier one), + // so we have to also add tags from the refs list we fetched with ls-remote. + if refs, err := r.loadRefs(ctx); err == nil { + for ref, h := range refs { + if h == hash { + if tag, found := strings.CutPrefix(ref, "refs/tags/"); found { + info.Tags = append(info.Tags, tag) + } + } + } + } + slices.Sort(info.Tags) + info.Tags = slices.Compact(info.Tags) // Used hash as info.Version above. // Use caller's suggested version if it appears in the tag list diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go index 19a6a5ef6b0b7c..f2f362b57c2973 100644 --- a/src/cmd/go/internal/vcs/vcs.go +++ b/src/cmd/go/internal/vcs/vcs.go @@ -8,6 +8,7 @@ import ( "bytes" "errors" "fmt" + "internal/godebug" "internal/lazyregexp" "internal/singleflight" "io/fs" @@ -831,11 +832,13 @@ type vcsPath struct { schemelessRepo bool // if true, the repo pattern lacks a scheme } +var allowmultiplevcs = godebug.New("allowmultiplevcs") + // FromDir inspects dir and its parents to determine the // version control system and code repository to use. // If no repository is found, FromDir returns an error // equivalent to os.ErrNotExist. -func FromDir(dir, srcRoot string, allowNesting bool) (repoDir string, vcsCmd *Cmd, err error) { +func FromDir(dir, srcRoot string) (repoDir string, vcsCmd *Cmd, err error) { // Clean and double-check that dir is in (a subdirectory of) srcRoot. dir = filepath.Clean(dir) if srcRoot != "" { @@ -849,21 +852,28 @@ func FromDir(dir, srcRoot string, allowNesting bool) (repoDir string, vcsCmd *Cm for len(dir) > len(srcRoot) { for _, vcs := range vcsList { if isVCSRoot(dir, vcs.RootNames) { - // Record first VCS we find. - // If allowNesting is false (as it is in GOPATH), keep looking for - // repositories in parent directories and report an error if one is - // found to mitigate VCS injection attacks. if vcsCmd == nil { + // Record first VCS we find. vcsCmd = vcs repoDir = dir - if allowNesting { + if allowmultiplevcs.Value() == "1" { + allowmultiplevcs.IncNonDefault() return repoDir, vcsCmd, nil } + // If allowmultiplevcs is not set, keep looking for + // repositories in current and parent directories and report + // an error if one is found to mitigate VCS injection + // attacks. + continue + } + if vcsCmd == vcsGit && vcs == vcsGit { + // Nested Git is allowed, as this is how things like + // submodules work. Git explicitly protects against + // injection against itself. continue } - // Otherwise, we have one VCS inside a different VCS. - return "", nil, fmt.Errorf("directory %q uses %s, but parent %q uses %s", - repoDir, vcsCmd.Cmd, dir, vcs.Cmd) + return "", nil, fmt.Errorf("multiple VCS detected: %s in %q, and %s in %q", + vcsCmd.Cmd, repoDir, vcs.Cmd, dir) } } diff --git a/src/cmd/go/internal/vcs/vcs_test.go b/src/cmd/go/internal/vcs/vcs_test.go index 2ce85ea210967f..06e63c29528f2f 100644 --- a/src/cmd/go/internal/vcs/vcs_test.go +++ b/src/cmd/go/internal/vcs/vcs_test.go @@ -239,7 +239,7 @@ func TestFromDir(t *testing.T) { } wantRepoDir := filepath.Dir(dir) - gotRepoDir, gotVCS, err := FromDir(dir, tempDir, false) + gotRepoDir, gotVCS, err := FromDir(dir, tempDir) if err != nil { t.Errorf("FromDir(%q, %q): %v", dir, tempDir, err) continue diff --git a/src/cmd/go/testdata/script/test_multivcs.txt b/src/cmd/go/testdata/script/test_multivcs.txt new file mode 100644 index 00000000000000..538cbf700b4585 --- /dev/null +++ b/src/cmd/go/testdata/script/test_multivcs.txt @@ -0,0 +1,54 @@ +# To avoid VCS injection attacks, we should not accept multiple different VCS metadata +# folders within a single module (either in the same directory, or nested in different +# directories.) +# +# This behavior should be disabled by setting the allowmultiplevcs GODEBUG. + +[short] skip +[!git] skip + +cd samedir + +exec git init . + +# Without explicitly requesting buildvcs, the go command should silently continue +# without determining the correct VCS. +go test -c -o $devnull . + +# If buildvcs is explicitly requested, we expect the go command to fail +! go test -buildvcs -c -o $devnull . +stderr '^error obtaining VCS status: multiple VCS detected:' + +env GODEBUG=allowmultiplevcs=1 +go test -buildvcs -c -o $devnull . + +env GODEBUG= +cd ../nested +exec git init . +# cd a +go test -c -o $devnull ./a +! go test -buildvcs -c -o $devnull ./a +stderr '^error obtaining VCS status: multiple VCS detected:' +# allowmultiplevcs doesn't disable the check that the current directory, package, and +# module are in the same repository. +env GODEBUG=allowmultiplevcs=1 +! go test -buildvcs -c -o $devnull ./a +stderr '^error obtaining VCS status: main package is in repository' + +-- samedir/go.mod -- +module example + +go 1.18 +-- samedir/example.go -- +package main +-- samedir/.bzr/test -- +hello + +-- nested/go.mod -- +module example + +go 1.18 +-- nested/a/example.go -- +package main +-- nested/a/.bzr/test -- +hello diff --git a/src/cmd/go/testdata/script/version_buildvcs_nested.txt b/src/cmd/go/testdata/script/version_buildvcs_nested.txt index 6dab8474b59d44..22cd71c454b712 100644 --- a/src/cmd/go/testdata/script/version_buildvcs_nested.txt +++ b/src/cmd/go/testdata/script/version_buildvcs_nested.txt @@ -9,25 +9,35 @@ cd root go mod init example.com/root exec git init -# Nesting repositories in parent directories are ignored, as the current -# directory main package, and containing main module are in the same repository. -# This is an error in GOPATH mode (to prevent VCS injection), but for modules, -# we assume users have control over repositories they've checked out. + +# Nesting repositories in parent directories are an error, to prevent VCS injection. +# This can be disabled with the allowmultiplevcs GODEBUG. mkdir hgsub cd hgsub exec hg init cp ../../main.go main.go ! go build +stderr '^error obtaining VCS status: multiple VCS detected: hg in ".*hgsub", and git in ".*root"$' +stderr '^\tUse -buildvcs=false to disable VCS stamping.$' +env GODEBUG=allowmultiplevcs=1 +! go build stderr '^error obtaining VCS status: main module is in repository ".*root" but current directory is in repository ".*hgsub"$' stderr '^\tUse -buildvcs=false to disable VCS stamping.$' go build -buildvcs=false +env GODEBUG= go mod init example.com/root/hgsub +! go build +stderr '^error obtaining VCS status: multiple VCS detected: hg in ".*hgsub", and git in ".*root"$' +stderr '^\tUse -buildvcs=false to disable VCS stamping.$' +env GODEBUG=allowmultiplevcs=1 go build +env GODEBUG= cd .. # It's an error to build a package from a nested Git repository if the package # is in a separate repository from the current directory or from the module -# root directory. +# root directory. Otherwise nested Git repositories are allowed, as this is +# how Git implements submodules (and protects against Git based VCS injection.) mkdir gitsub cd gitsub exec git init diff --git a/src/cmd/internal/objabi/pkgspecial.go b/src/cmd/internal/objabi/pkgspecial.go index c34ede53fe0fb5..b507d98dc7e941 100644 --- a/src/cmd/internal/objabi/pkgspecial.go +++ b/src/cmd/internal/objabi/pkgspecial.go @@ -43,6 +43,9 @@ type PkgSpecial struct { } var runtimePkgs = []string{ + // TODO(panjf2000): consider syncing the list inside the + // isAsyncSafePoint in preempt.go based on this list? + "runtime", "internal/runtime/atomic", diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 0d8455d92e336b..bc484dedf6ed54 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -805,13 +805,19 @@ func elfwritefreebsdsig(out *OutBuf) int { return int(sh.Size) } -func addbuildinfo(val string) { +func addbuildinfo(ctxt *Link) { + val := *flagHostBuildid if val == "gobuildid" { buildID := *flagBuildid if buildID == "" { Exitf("-B gobuildid requires a Go build ID supplied via -buildid") } + if ctxt.IsDarwin() { + buildinfo = uuidFromGoBuildId(buildID) + return + } + hashedBuildID := notsha256.Sum256([]byte(buildID)) buildinfo = hashedBuildID[:20] @@ -821,11 +827,13 @@ func addbuildinfo(val string) { if !strings.HasPrefix(val, "0x") { Exitf("-B argument must start with 0x: %s", val) } - ov := val val = val[2:] - const maxLen = 32 + maxLen := 32 + if ctxt.IsDarwin() { + maxLen = 16 + } if hex.DecodedLen(len(val)) > maxLen { Exitf("-B option too long (max %d digits): %s", maxLen, ov) } diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 34624c25a9f333..c5a85f0e75e7cf 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -297,6 +297,8 @@ func getMachoHdr() *MachoHdr { return &machohdr } +// Create a new Mach-O load command. ndata is the number of 32-bit words for +// the data (not including the load command header). func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad { if arch.PtrSize == 8 && (ndata&1 != 0) { ndata++ @@ -849,6 +851,20 @@ func asmbMacho(ctxt *Link) { } } + if ctxt.IsInternal() && len(buildinfo) > 0 { + ml := newMachoLoad(ctxt.Arch, LC_UUID, 4) + // Mach-O UUID is 16 bytes + if len(buildinfo) < 16 { + buildinfo = append(buildinfo, make([]byte, 16)...) + } + // By default, buildinfo is already in UUIDv3 format + // (see uuidFromGoBuildId). + ml.data[0] = ctxt.Arch.ByteOrder.Uint32(buildinfo) + ml.data[1] = ctxt.Arch.ByteOrder.Uint32(buildinfo[4:]) + ml.data[2] = ctxt.Arch.ByteOrder.Uint32(buildinfo[8:]) + ml.data[3] = ctxt.Arch.ByteOrder.Uint32(buildinfo[12:]) + } + if ctxt.IsInternal() && ctxt.NeedCodeSign() { ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2) ml.data[0] = uint32(codesigOff) diff --git a/src/cmd/link/internal/ld/macho_update_uuid.go b/src/cmd/link/internal/ld/macho_update_uuid.go index de27e655d59bf4..40e0c11ed19d6e 100644 --- a/src/cmd/link/internal/ld/macho_update_uuid.go +++ b/src/cmd/link/internal/ld/macho_update_uuid.go @@ -42,7 +42,7 @@ func uuidFromGoBuildId(buildID string) []byte { // to use this UUID flavor than any of the others. This is similar // to how other linkers handle this (for example this code in lld: // https://github.com/llvm/llvm-project/blob/2a3a79ce4c2149d7787d56f9841b66cacc9061d0/lld/MachO/Writer.cpp#L524). - rv[6] &= 0xcf + rv[6] &= 0x0f rv[6] |= 0x30 rv[8] &= 0x3f rv[8] |= 0xc0 diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 56e865d8a53287..12bc896c66c3d7 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -95,6 +95,7 @@ var ( flagN = flag.Bool("n", false, "no-op (deprecated)") FlagS = flag.Bool("s", false, "disable symbol table") flag8 bool // use 64-bit addresses in symbol table + flagHostBuildid = flag.String("B", "", "set ELF NT_GNU_BUILD_ID `note` or Mach-O UUID; use \"gobuildid\" to generate it from the Go build ID") flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") flagCheckLinkname = flag.Bool("checklinkname", true, "check linkname symbol references") FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") @@ -196,7 +197,6 @@ func Main(arch *sys.Arch, theArch Arch) { flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`") flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`") flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible") - objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo) objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) }) objabi.AddVersionFlag() // -V objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) @@ -294,6 +294,10 @@ func Main(arch *sys.Arch, theArch Arch) { *flagBuildid = "go-openbsd" } + if *flagHostBuildid != "" { + addbuildinfo(ctxt) + } + // enable benchmarking var bench *benchmark.Metrics if len(*benchmarkFlag) != 0 { diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index f448a3ee7cb241..ca1736fd2e559e 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -251,6 +251,12 @@ type Loader struct { // CgoExports records cgo-exported symbols by SymName. CgoExports map[string]Sym + // sizeFixups records symbols that we need to fix up the size + // after loading. It is very rarely needed, only for a DATA symbol + // and a BSS symbol with the same name, and the BSS symbol has + // larger size. + sizeFixups []symAndSize + flags uint32 strictDupMsgs int // number of strict-dup warning/errors, when FlagStrictDups is enabled @@ -430,16 +436,16 @@ func (st *loadState) addSym(name string, ver int, r *oReader, li uint32, kind in return i } // symbol already exists + // Fix for issue #47185 -- given two dupok or BSS symbols with + // different sizes, favor symbol with larger size. See also + // issue #46653 and #72032. + oldsz := l.SymSize(oldi) + sz := int64(r.Sym(li).Siz()) if osym.Dupok() { if l.flags&FlagStrictDups != 0 { l.checkdup(name, r, li, oldi) } - // Fix for issue #47185 -- given two dupok symbols with - // different sizes, favor symbol with larger size. See - // also issue #46653. - szdup := l.SymSize(oldi) - sz := int64(r.Sym(li).Siz()) - if szdup < sz { + if oldsz < sz { // new symbol overwrites old symbol. l.objSyms[oldi] = objSym{r.objidx, li} } @@ -450,20 +456,65 @@ func (st *loadState) addSym(name string, ver int, r *oReader, li uint32, kind in if oldsym.Dupok() { return oldi } - overwrite := r.DataSize(li) != 0 - if overwrite { + // If one is a DATA symbol (i.e. has content, DataSize != 0, + // including RODATA) and the other is BSS, the one with content wins. + // If both are BSS, the one with larger size wins. + // + // For a special case, we allow a TEXT symbol overwrites a BSS symbol + // even if the BSS symbol has larger size. This is because there is + // code like below to take the address of a function + // + // //go:linkname fn + // var fn uintptr + // var fnAddr = uintptr(unsafe.Pointer(&fn)) + // + // TODO: maybe limit this case to just pointer sized variable? + // + // In summary, the "overwrite" variable and the final result are + // + // new sym old sym result + // ------------------------------------------------------- + // TEXT BSS new wins + // DATA DATA ERROR + // DATA lg/eq BSS sm/eq new wins + // DATA small BSS large merge: new with larger size + // BSS large DATA small merge: old with larger size + // BSS large BSS small new wins + // BSS sm/eq D/B lg/eq old wins + // BSS TEXT old wins + oldtyp := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] + newtyp := sym.AbiSymKindToSymKind[objabi.SymKind(osym.Type())] + newIsText := newtyp == sym.STEXT + oldHasContent := oldr.DataSize(oldli) != 0 + newHasContent := r.DataSize(li) != 0 + oldIsBSS := oldtyp.IsData() && !oldHasContent + newIsBSS := newtyp.IsData() && !newHasContent + switch { + case newIsText && oldIsBSS, + newHasContent && oldIsBSS, + newIsBSS && oldIsBSS && sz > oldsz: // new symbol overwrites old symbol. - oldtyp := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] - if !(oldtyp.IsData() && oldr.DataSize(oldli) == 0) { - log.Fatalf("duplicated definition of symbol %s, from %s and %s", name, r.unit.Lib.Pkg, oldr.unit.Lib.Pkg) - } l.objSyms[oldi] = objSym{r.objidx, li} - } else { - // old symbol overwrites new symbol. - typ := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] - if !typ.IsData() { // only allow overwriting data symbol - log.Fatalf("duplicated definition of symbol %s, from %s and %s", name, r.unit.Lib.Pkg, oldr.unit.Lib.Pkg) + if oldsz > sz { + // If the BSS symbol has a larger size, expand the data + // symbol's size so access from the BSS side cannot overrun. + // It is hard to modify the symbol size until all Go objects + // (potentially read-only) are loaded, so we record it in + // a fixup table and apply them later. This is very rare. + // One case is a global variable with a Go declaration and an + // assembly definition, which typically have the same size, + // but in ASAN mode the Go declaration has a larger size due + // to the inserted red zone. + l.sizeFixups = append(l.sizeFixups, symAndSize{oldi, uint32(oldsz)}) } + case newIsBSS: + // old win, just ignore the new symbol. + if sz > oldsz { + // See the comment above for sizeFixups. + l.sizeFixups = append(l.sizeFixups, symAndSize{oldi, uint32(sz)}) + } + default: + log.Fatalf("duplicated definition of symbol %s, from %s (type %s size %d) and %s (type %s size %d)", name, r.unit.Lib.Pkg, newtyp, sz, oldr.unit.Lib.Pkg, oldtyp, oldsz) } return oldi } @@ -2256,6 +2307,10 @@ func (l *Loader) LoadSyms(arch *sys.Arch) { st.preloadSyms(r, hashedDef) st.preloadSyms(r, nonPkgDef) } + for _, sf := range l.sizeFixups { + pp := l.cloneToExternal(sf.sym) + pp.size = int64(sf.size) + } for _, vr := range st.linknameVarRefs { l.checkLinkname(vr.pkg, vr.name, vr.sym) } @@ -2413,7 +2468,7 @@ func topLevelSym(sname string, skind sym.SymKind) bool { // a symbol originally discovered as part of an object file, it's // easier to do this if we make the updates to an external symbol // payload. -func (l *Loader) cloneToExternal(symIdx Sym) { +func (l *Loader) cloneToExternal(symIdx Sym) *extSymPayload { if l.IsExternal(symIdx) { panic("sym is already external, no need for clone") } @@ -2465,6 +2520,8 @@ func (l *Loader) cloneToExternal(symIdx Sym) { // Some attributes were encoded in the object file. Copy them over. l.SetAttrDuplicateOK(symIdx, r.Sym(li).Dupok()) l.SetAttrShared(symIdx, r.Shared()) + + return pp } // Copy the payload of symbol src to dst. Both src and dst must be external diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 5fed6619c79dcf..08cdf1750b66bd 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -19,6 +19,7 @@ import ( "strings" "testing" + "cmd/internal/objfile" "cmd/internal/sys" ) @@ -1431,6 +1432,9 @@ func TestCheckLinkname(t *testing.T) { {"ok.go", true}, // push linkname is ok {"push.go", true}, + // using a linknamed variable to reference an assembly + // function in the same package is ok + {"textvar", true}, // pull linkname of blocked symbol is not ok {"coro.go", false}, {"coro_var.go", false}, @@ -1446,7 +1450,7 @@ func TestCheckLinkname(t *testing.T) { test := test t.Run(test.src, func(t *testing.T) { t.Parallel() - src := filepath.Join("testdata", "linkname", test.src) + src := "./testdata/linkname/" + test.src exe := filepath.Join(tmpdir, test.src+".exe") cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe, src) out, err := cmd.CombinedOutput() @@ -1459,3 +1463,53 @@ func TestCheckLinkname(t *testing.T) { }) } } + +func TestLinknameBSS(t *testing.T) { + // Test that the linker chooses the right one as the definition + // for linknamed variables. See issue #72032. + testenv.MustHaveGoBuild(t) + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join("testdata", "linkname", "sched.go") + exe := filepath.Join(tmpdir, "sched.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed unexpectedly: %v:\n%s", err, out) + } + + // Check the symbol size. + f, err := objfile.Open(exe) + if err != nil { + t.Fatalf("fail to open executable: %v", err) + } + defer f.Close() + syms, err := f.Symbols() + if err != nil { + t.Fatalf("fail to get symbols: %v", err) + } + found := false + for _, s := range syms { + if s.Name == "runtime.sched" || s.Name == "_runtime.sched" { + found = true + if s.Size < 100 { + // As of Go 1.25 (Mar 2025), runtime.sched has 6848 bytes on + // darwin/arm64. It should always be larger than 100 bytes on + // all platforms. + t.Errorf("runtime.sched symbol size too small: want > 100, got %d", s.Size) + } + } + } + if !found { + t.Errorf("runtime.sched symbol not found") + } + + // Executable should run. + cmd = testenv.Command(t, exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("executable failed to run: %v\n%s", err, out) + } +} diff --git a/src/cmd/link/testdata/linkname/sched.go b/src/cmd/link/testdata/linkname/sched.go new file mode 100644 index 00000000000000..7a9d66f495098b --- /dev/null +++ b/src/cmd/link/testdata/linkname/sched.go @@ -0,0 +1,19 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import _ "unsafe" + +type schedt struct{} + +//go:linkname sched runtime.sched +var sched schedt + +func main() { + select { + default: + println("hello") + } +} diff --git a/src/cmd/link/testdata/linkname/textvar/asm.s b/src/cmd/link/testdata/linkname/textvar/asm.s new file mode 100644 index 00000000000000..332dcdb4e79b62 --- /dev/null +++ b/src/cmd/link/testdata/linkname/textvar/asm.s @@ -0,0 +1,6 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT ·asmfunc(SB),0,$0-0 + RET diff --git a/src/cmd/link/testdata/linkname/textvar/main.go b/src/cmd/link/testdata/linkname/textvar/main.go new file mode 100644 index 00000000000000..b38995e706ad78 --- /dev/null +++ b/src/cmd/link/testdata/linkname/textvar/main.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Using a linknamed variable to reference an assembly +// function in the same package is ok. + +package main + +import _ "unsafe" + +func main() { + println(&asmfunc) +} + +//go:linkname asmfunc +var asmfunc uintptr diff --git a/src/cmd/trace/gstate.go b/src/cmd/trace/gstate.go index 638d492670a6e7..4b380db9f53cd7 100644 --- a/src/cmd/trace/gstate.go +++ b/src/cmd/trace/gstate.go @@ -257,6 +257,10 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { if gs.lastStopStack != trace.NoStack { stk = ctx.Stack(viewerFrames(gs.lastStopStack)) } + var endStk int + if stack != trace.NoStack { + endStk = ctx.Stack(viewerFrames(stack)) + } // Check invariants. if gs.startRunningTime == 0 { panic("silently broken trace or generator invariant (startRunningTime != 0) not held") @@ -270,6 +274,7 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { Dur: ts.Sub(gs.startRunningTime), Resource: uint64(gs.executing), Stack: stk, + EndStack: endStk, }) // Flush completed ranges. diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go b/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go index a38f371d0f51b6..e60ab7e9fdd73e 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go @@ -16,6 +16,7 @@ import ( "os" "os/exec" "path/filepath" + "sync/atomic" "golang.org/x/telemetry/internal/telemetry" ) @@ -29,12 +30,22 @@ const ( // creation flag. var needNoConsole = func(cmd *exec.Cmd) {} +var downloads int64 + +// Downloads reports, for testing purposes, the number of times [Download] has +// been called. +func Downloads() int64 { + return atomic.LoadInt64(&downloads) +} + // Download fetches the requested telemetry UploadConfig using "go mod // download". If envOverlay is provided, it is appended to the environment used // for invoking the go command. // // The second result is the canonical version of the requested configuration. func Download(version string, envOverlay []string) (*telemetry.UploadConfig, string, error) { + atomic.AddInt64(&downloads, 1) + if version == "" { version = "latest" } diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go index f475f7eec2dfce..612f7563a74c9f 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go @@ -21,12 +21,12 @@ import ( "golang.org/x/telemetry/internal/counter" ) -// Supported reports whether the runtime supports [runtime.SetCrashOutput]. +// Supported reports whether the runtime supports [runtime/debug.SetCrashOutput]. // // TODO(adonovan): eliminate once go1.23+ is assured. func Supported() bool { return setCrashOutput != nil } -var setCrashOutput func(*os.File) error // = runtime.SetCrashOutput on go1.23+ +var setCrashOutput func(*os.File) error // = runtime/debug.SetCrashOutput on go1.23+ // Parent sets up the parent side of the crashmonitor. It requires // exclusive use of a writable pipe connected to the child process's stdin. diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go b/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go index eba13b1a573560..e9c8dc207126a1 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go @@ -112,9 +112,24 @@ func newUploader(rcfg RunConfig) (*uploader, error) { logger := log.New(logWriter, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) // Fetch the upload config, if it is not provided. - config, configVersion, err := configstore.Download("latest", rcfg.Env) - if err != nil { - return nil, err + var ( + config *telemetry.UploadConfig + configVersion string + ) + + if mode, _ := dir.Mode(); mode == "on" { + // golang/go#68946: only download the upload config if it will be used. + // + // TODO(rfindley): This is a narrow change aimed at minimally fixing the + // associated bug. In the future, we should read the mode only once during + // the upload process. + config, configVersion, err = configstore.Download("latest", rcfg.Env) + if err != nil { + return nil, err + } + } else { + config = &telemetry.UploadConfig{} + configVersion = "v0.0.0-0" } // Set the start time, if it is not provided. diff --git a/src/cmd/vendor/golang.org/x/telemetry/start.go b/src/cmd/vendor/golang.org/x/telemetry/start.go index 4b37a5c3945cd5..69ebcc71359405 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/start.go +++ b/src/cmd/vendor/golang.org/x/telemetry/start.go @@ -206,7 +206,8 @@ func startChild(reportCrashes, upload bool, result *StartResult) { fd, err := os.Stat(telemetry.Default.DebugDir()) if err != nil { if !os.IsNotExist(err) { - log.Fatalf("failed to stat debug directory: %v", err) + log.Printf("failed to stat debug directory: %v", err) + return } } else if fd.IsDir() { // local/debug exists and is a directory. Set stderr to a log file path @@ -214,23 +215,31 @@ func startChild(reportCrashes, upload bool, result *StartResult) { childLogPath := filepath.Join(telemetry.Default.DebugDir(), "sidecar.log") childLog, err := os.OpenFile(childLogPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) if err != nil { - log.Fatalf("opening sidecar log file for child: %v", err) + log.Printf("opening sidecar log file for child: %v", err) + return } defer childLog.Close() cmd.Stderr = childLog } + var crashOutputFile *os.File if reportCrashes { pipe, err := cmd.StdinPipe() if err != nil { - log.Fatalf("StdinPipe: %v", err) + log.Printf("StdinPipe: %v", err) + return } - crashmonitor.Parent(pipe.(*os.File)) // (this conversion is safe) + crashOutputFile = pipe.(*os.File) // (this conversion is safe) } if err := cmd.Start(); err != nil { - log.Fatalf("can't start telemetry child process: %v", err) + // The child couldn't be started. Log the failure. + log.Printf("can't start telemetry child process: %v", err) + return + } + if reportCrashes { + crashmonitor.Parent(crashOutputFile) } result.wg.Add(1) go func() { diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index bf9c1341b94f73..22d40b9e4c1385 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -45,7 +45,7 @@ golang.org/x/sync/semaphore golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 +# golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 ## explicit; go 1.20 golang.org/x/telemetry golang.org/x/telemetry/counter diff --git a/src/crypto/internal/nistec/p256_asm_ppc64le.s b/src/crypto/internal/nistec/p256_asm_ppc64le.s index 1475dfb1d90e4b..b13bd512a6e354 100644 --- a/src/crypto/internal/nistec/p256_asm_ppc64le.s +++ b/src/crypto/internal/nistec/p256_asm_ppc64le.s @@ -126,14 +126,23 @@ GLOBL p256mul<>(SB), 8, $160 #define PH V31 #define CAR1 V6 + +#define SEL V8 +#define ZER V9 + // func p256NegCond(val *p256Point, cond int) TEXT ·p256NegCond(SB), NOSPLIT, $0-16 MOVD val+0(FP), P1ptr MOVD $16, R16 - MOVD cond+8(FP), R6 - CMP $0, R6 - BC 12, 2, LR // just return if cond == 0 + // Copy cond into SEL (cond is R1 + 8 (cond offset) + 32) + MOVD $40, R17 + LXVDSX (R1)(R17), SEL + // Zeroize ZER + VSPLTISB $0, ZER + // SEL controls whether to return the original value (Y1H/Y1L) + // or the negated value (T1H/T1L). + VCMPEQUD SEL, ZER, SEL MOVD $p256mul<>+0x00(SB), CPOOL @@ -150,6 +159,9 @@ TEXT ·p256NegCond(SB), NOSPLIT, $0-16 VSUBUQM PL, Y1L, T1L // subtract part2 giving result VSUBEUQM PH, Y1H, CAR1, T1H // subtract part1 using carry from part2 + VSEL T1H, Y1H, SEL, T1H + VSEL T1L, Y1L, SEL, T1L + XXPERMDI T1H, T1H, $2, T1H XXPERMDI T1L, T1L, $2, T1L @@ -166,6 +178,8 @@ TEXT ·p256NegCond(SB), NOSPLIT, $0-16 #undef PL #undef PH #undef CAR1 +#undef SEL +#undef ZER #define P3ptr R3 #define P1ptr R4 diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go index 501f9c6755f9e3..3c87916bcf0bb7 100644 --- a/src/crypto/tls/handshake_client_test.go +++ b/src/crypto/tls/handshake_client_test.go @@ -852,6 +852,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } issuer, err := x509.ParseCertificate(testRSACertificateIssuer) @@ -868,6 +869,7 @@ func testResumption(t *testing.T, version uint16) { ClientSessionCache: NewLRUClientSessionCache(32), RootCAs: rootCAs, ServerName: "example.golang", + Time: testTime, } testResumeState := func(test string, didResume bool) { @@ -914,7 +916,7 @@ func testResumption(t *testing.T, version uint16) { // An old session ticket is replaced with a ticket encrypted with a fresh key. ticket = getTicket() - serverConfig.Time = func() time.Time { return time.Now().Add(24*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*time.Hour + time.Minute) } testResumeState("ResumeWithOldTicket", true) if bytes.Equal(ticket, getTicket()) { t.Fatal("old first ticket matches the fresh one") @@ -922,13 +924,13 @@ func testResumption(t *testing.T, version uint16) { // Once the session master secret is expired, a full handshake should occur. ticket = getTicket() - serverConfig.Time = func() time.Time { return time.Now().Add(24*8*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*8*time.Hour + time.Minute) } testResumeState("ResumeWithExpiredTicket", false) if bytes.Equal(ticket, getTicket()) { t.Fatal("expired first ticket matches the fresh one") } - serverConfig.Time = func() time.Time { return time.Now() } // reset the time back + serverConfig.Time = testTime // reset the time back key1 := randomKey() serverConfig.SetSessionTicketKeys([][32]byte{key1}) @@ -945,11 +947,11 @@ func testResumption(t *testing.T, version uint16) { testResumeState("KeyChangeFinish", true) // Age the session ticket a bit, but not yet expired. - serverConfig.Time = func() time.Time { return time.Now().Add(24*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*time.Hour + time.Minute) } testResumeState("OldSessionTicket", true) ticket = getTicket() // Expire the session ticket, which would force a full handshake. - serverConfig.Time = func() time.Time { return time.Now().Add(24*8*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*8*time.Hour + 2*time.Minute) } testResumeState("ExpiredSessionTicket", false) if bytes.Equal(ticket, getTicket()) { t.Fatal("new ticket wasn't provided after old ticket expired") @@ -957,7 +959,7 @@ func testResumption(t *testing.T, version uint16) { // Age the session ticket a bit at a time, but don't expire it. d := 0 * time.Hour - serverConfig.Time = func() time.Time { return time.Now().Add(d) } + serverConfig.Time = func() time.Time { return testTime().Add(d) } deleteTicket() testResumeState("GetFreshSessionTicket", false) for i := 0; i < 13; i++ { @@ -968,7 +970,7 @@ func testResumption(t *testing.T, version uint16) { // handshake occurs for TLS 1.2. Resumption should still occur for // TLS 1.3 since the client should be using a fresh ticket sent over // by the server. - d += 12 * time.Hour + d += 12*time.Hour + time.Minute if version == VersionTLS13 { testResumeState("ExpiredSessionTicket", true) } else { @@ -984,6 +986,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } serverConfig.SetSessionTicketKeys([][32]byte{key2}) @@ -1009,6 +1012,7 @@ func testResumption(t *testing.T, version uint16) { CurvePreferences: []CurveID{CurveP521, CurveP384, CurveP256}, MaxVersion: version, Certificates: testConfig.Certificates, + Time: testTime, } testResumeState("InitialHandshake", false) testResumeState("WithHelloRetryRequest", true) @@ -1018,6 +1022,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } } @@ -1736,6 +1741,7 @@ func testVerifyConnection(t *testing.T, version uint16) { serverConfig := &Config{ MaxVersion: version, Certificates: []Certificate{testConfig.Certificates[0]}, + Time: testTime, ClientCAs: rootCAs, NextProtos: []string{"protocol1"}, } @@ -1749,6 +1755,7 @@ func testVerifyConnection(t *testing.T, version uint16) { RootCAs: rootCAs, ServerName: "example.golang", Certificates: []Certificate{testConfig.Certificates[0]}, + Time: testTime, NextProtos: []string{"protocol1"}, } test.configureClient(clientConfig, &clientCalled) @@ -1791,8 +1798,6 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { rootCAs := x509.NewCertPool() rootCAs.AddCert(issuer) - now := func() time.Time { return time.Unix(1476984729, 0) } - sentinelErr := errors.New("TestVerifyPeerCertificate") verifyPeerCertificateCallback := func(called *bool, rawCerts [][]byte, validatedChains [][]*x509.Certificate) error { @@ -2038,7 +2043,7 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { config.ServerName = "example.golang" config.ClientAuth = RequireAndVerifyClientCert config.ClientCAs = rootCAs - config.Time = now + config.Time = testTime config.MaxVersion = version config.Certificates = make([]Certificate, 1) config.Certificates[0].Certificate = [][]byte{testRSACertificate} @@ -2055,7 +2060,7 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { config := testConfig.Clone() config.ServerName = "example.golang" config.RootCAs = rootCAs - config.Time = now + config.Time = testTime config.MaxVersion = version test.configureClient(config, &clientCalled) clientErr := Client(c, config).Handshake() @@ -2368,7 +2373,7 @@ func testGetClientCertificate(t *testing.T, version uint16) { serverConfig.RootCAs = x509.NewCertPool() serverConfig.RootCAs.AddCert(issuer) serverConfig.ClientCAs = serverConfig.RootCAs - serverConfig.Time = func() time.Time { return time.Unix(1476984729, 0) } + serverConfig.Time = testTime serverConfig.MaxVersion = version clientConfig := testConfig.Clone() @@ -2539,6 +2544,7 @@ func testResumptionKeepsOCSPAndSCT(t *testing.T, ver uint16) { ClientSessionCache: NewLRUClientSessionCache(32), ServerName: "example.golang", RootCAs: roots, + Time: testTime, } serverConfig := testConfig.Clone() serverConfig.MaxVersion = ver diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go index 94d3d0f6dc87bc..bbfe44bd97daa2 100644 --- a/src/crypto/tls/handshake_server_test.go +++ b/src/crypto/tls/handshake_server_test.go @@ -501,6 +501,7 @@ func testCrossVersionResume(t *testing.T, version uint16) { serverConfig := &Config{ CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } clientConfig := &Config{ CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA}, @@ -508,6 +509,7 @@ func testCrossVersionResume(t *testing.T, version uint16) { ClientSessionCache: NewLRUClientSessionCache(1), ServerName: "servername", MinVersion: VersionTLS12, + Time: testTime, } // Establish a session at TLS 1.3. diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index bc3d23d5adc24e..803aa736578f8c 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -491,9 +491,10 @@ func testHandshake(t *testing.T, clientConfig, serverConfig *Config) (serverStat if got := string(buf); got != sentinel { t.Errorf("read %q from TLS connection, but expected %q", got, sentinel) } - if err := cli.Close(); err != nil { - t.Errorf("failed to call cli.Close: %v", err) - } + // We discard the error because after ReadAll returns the server must + // have already closed the connection. Sending data (the closeNotify + // alert) can cause a reset, that will make Close return an error. + cli.Close() }() server := Server(s, serverConfig) err = server.Handshake() @@ -518,6 +519,11 @@ func fromHex(s string) []byte { return b } +// testTime is 2016-10-20T17:32:09.000Z, which is within the validity period of +// [testRSACertificate], [testRSACertificateIssuer], [testRSA2048Certificate], +// [testRSA2048CertificateIssuer], and [testECDSACertificate]. +var testTime = func() time.Time { return time.Unix(1476984729, 0) } + var testRSACertificate = fromHex("3082024b308201b4a003020102020900e8f09d3fe25beaa6300d06092a864886f70d01010b0500301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f74301e170d3136303130313030303030305a170d3235303130313030303030305a301a310b3009060355040a1302476f310b300906035504031302476f30819f300d06092a864886f70d010101050003818d0030818902818100db467d932e12270648bc062821ab7ec4b6a25dfe1e5245887a3647a5080d92425bc281c0be97799840fb4f6d14fd2b138bc2a52e67d8d4099ed62238b74a0b74732bc234f1d193e596d9747bf3589f6c613cc0b041d4d92b2b2423775b1c3bbd755dce2054cfa163871d1e24c4f31d1a508baab61443ed97a77562f414c852d70203010001a38193308190300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff0402300030190603551d0e041204109f91161f43433e49a6de6db680d79f60301b0603551d230414301280104813494d137e1631bba301d5acab6e7b30190603551d1104123010820e6578616d706c652e676f6c616e67300d06092a864886f70d01010b0500038181009d30cc402b5b50a061cbbae55358e1ed8328a9581aa938a495a1ac315a1a84663d43d32dd90bf297dfd320643892243a00bccf9c7db74020015faad3166109a276fd13c3cce10c5ceeb18782f16c04ed73bbb343778d0c1cf10fa1d8408361c94c722b9daedb4606064df4c1b33ec0d1bd42d4dbfe3d1360845c21d33be9fae7") var testRSACertificateIssuer = fromHex("3082021930820182a003020102020900ca5e4e811a965964300d06092a864886f70d01010b0500301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f74301e170d3136303130313030303030305a170d3235303130313030303030305a301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f7430819f300d06092a864886f70d010101050003818d0030818902818100d667b378bb22f34143b6cd2008236abefaf2852adf3ab05e01329e2c14834f5105df3f3073f99dab5442d45ee5f8f57b0111c8cb682fbb719a86944eebfffef3406206d898b8c1b1887797c9c5006547bb8f00e694b7a063f10839f269f2c34fff7a1f4b21fbcd6bfdfb13ac792d1d11f277b5c5b48600992203059f2a8f8cc50203010001a35d305b300e0603551d0f0101ff040403020204301d0603551d250416301406082b0601050507030106082b06010505070302300f0603551d130101ff040530030101ff30190603551d0e041204104813494d137e1631bba301d5acab6e7b300d06092a864886f70d01010b050003818100c1154b4bab5266221f293766ae4138899bd4c5e36b13cee670ceeaa4cbdf4f6679017e2fe649765af545749fe4249418a56bd38a04b81e261f5ce86b8d5c65413156a50d12449554748c59a30c515bc36a59d38bddf51173e899820b282e40aa78c806526fd184fb6b4cf186ec728edffa585440d2b3225325f7ab580e87dd76") diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go index fc5040635fbbf7..13c5ddced2cddb 100644 --- a/src/crypto/tls/tls_test.go +++ b/src/crypto/tls/tls_test.go @@ -1112,8 +1112,6 @@ func TestConnectionState(t *testing.T) { rootCAs := x509.NewCertPool() rootCAs.AddCert(issuer) - now := func() time.Time { return time.Unix(1476984729, 0) } - const alpnProtocol = "golang" const serverName = "example.golang" var scts = [][]byte{[]byte("dummy sct 1"), []byte("dummy sct 2")} @@ -1129,7 +1127,7 @@ func TestConnectionState(t *testing.T) { } t.Run(name, func(t *testing.T) { config := &Config{ - Time: now, + Time: testTime, Rand: zeroSource{}, Certificates: make([]Certificate, 1), MaxVersion: v, @@ -1760,7 +1758,7 @@ func testVerifyCertificates(t *testing.T, version uint16) { var serverVerifyPeerCertificates, clientVerifyPeerCertificates bool clientConfig := testConfig.Clone() - clientConfig.Time = func() time.Time { return time.Unix(1476984729, 0) } + clientConfig.Time = testTime clientConfig.MaxVersion = version clientConfig.MinVersion = version clientConfig.RootCAs = rootCAs diff --git a/src/crypto/x509/name_constraints_test.go b/src/crypto/x509/name_constraints_test.go index 008c7028f4e4c4..a5851845164d10 100644 --- a/src/crypto/x509/name_constraints_test.go +++ b/src/crypto/x509/name_constraints_test.go @@ -1607,6 +1607,23 @@ var nameConstraintsTests = []nameConstraintsTest{ leaf: leafSpec{sans: []string{"dns:.example.com"}}, expectedError: "cannot parse dnsName \".example.com\"", }, + // #86: URIs with IPv6 addresses with zones and ports are rejected + { + roots: []constraintsSpec{ + { + ok: []string{"uri:example.com"}, + }, + }, + intermediates: [][]constraintsSpec{ + { + {}, + }, + }, + leaf: leafSpec{ + sans: []string{"uri:http://[2006:abcd::1%25.example.com]:16/"}, + }, + expectedError: "URI with IP", + }, } func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.PrivateKey, parent *Certificate, parentKey *ecdsa.PrivateKey) (*Certificate, error) { diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 7170087287644e..bbccfce57742c0 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "net" + "net/netip" "net/url" "reflect" "runtime" @@ -434,8 +435,10 @@ func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { } } - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") || - net.ParseIP(host) != nil { + // netip.ParseAddr will reject the URI IPv6 literal form "[...]", so we + // check if _either_ the string parses as an IP, or if it is enclosed in + // square brackets. + if _, err := netip.ParseAddr(host); err == nil || (strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]")) { return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) } diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index de774a051093df..c247a9b506bfab 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -1368,8 +1368,8 @@ func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn db.waitDuration.Add(int64(time.Since(waitStart))) - // If we failed to delete it, that means something else - // grabbed it and is about to send on it. + // If we failed to delete it, that means either the DB was closed or + // something else grabbed it and is about to send on it. if !deleted { // TODO(bradfitz): rather than this best effort select, we // should probably start a goroutine to read from req. This best @@ -3594,6 +3594,7 @@ type connRequestAndIndex struct { // and clears the set. func (s *connRequestSet) CloseAndRemoveAll() { for _, v := range s.s { + *v.curIdx = -1 close(v.req) } s.s = nil diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index ff65e877a5af6b..110a2bae5bd247 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -4920,6 +4920,17 @@ func TestConnRequestSet(t *testing.T) { t.Error("wasn't random") } }) + t.Run("close-delete", func(t *testing.T) { + reset() + ch := make(chan connRequest) + dh := s.Add(ch) + wantLen(1) + s.CloseAndRemoveAll() + wantLen(0) + if s.Delete(dh) { + t.Error("unexpected delete after CloseAndRemoveAll") + } + }) } func BenchmarkConnRequestSet(b *testing.B) { diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go index d178b2b2fb6467..26b5f6d62b631e 100644 --- a/src/encoding/gob/decode.go +++ b/src/encoding/gob/decode.go @@ -911,8 +911,11 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg var maxIgnoreNestingDepth = 10000 // decIgnoreOpFor returns the decoding op for a field that has no destination. -func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp { - if depth > maxIgnoreNestingDepth { +func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp { + // Track how deep we've recursed trying to skip nested ignored fields. + dec.ignoreDepth++ + defer func() { dec.ignoreDepth-- }() + if dec.ignoreDepth > maxIgnoreNestingDepth { error_(errors.New("invalid nesting depth")) } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). @@ -938,7 +941,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, errorf("bad data: undefined type %s", wireId.string()) case wire.ArrayT != nil: elemId := wire.ArrayT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len) } @@ -946,15 +949,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, case wire.MapT != nil: keyId := dec.wireType[wireId].MapT.Key elemId := dec.wireType[wireId].MapT.Elem - keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1) - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + keyOp := dec.decIgnoreOpFor(keyId, inProgress) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreMap(state, *keyOp, *elemOp) } case wire.SliceT != nil: elemId := wire.SliceT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreSlice(state, *elemOp) } @@ -1115,7 +1118,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine { engine := new(decEngine) engine.instr = make([]decInstr, 1) // one item - op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0) + op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp)) ovfl := overflow(dec.typeString(remoteId)) engine.instr[0] = decInstr{*op, 0, nil, ovfl} engine.numInstr = 1 @@ -1160,7 +1163,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn localField, present := srt.FieldByName(wireField.Name) // TODO(r): anonymous names if !present || !isExported(wireField.Name) { - op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0) + op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp)) engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl} continue } diff --git a/src/encoding/gob/decoder.go b/src/encoding/gob/decoder.go index c4b60880130787..eae307838e201e 100644 --- a/src/encoding/gob/decoder.go +++ b/src/encoding/gob/decoder.go @@ -35,6 +35,8 @@ type Decoder struct { freeList *decoderState // list of free decoderStates; avoids reallocation countBuf []byte // used for decoding integers while parsing messages err error + // ignoreDepth tracks the depth of recursively parsed ignored fields + ignoreDepth int } // NewDecoder returns a new decoder that reads from the [io.Reader]. diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go index ae806fc39a21fc..d30e622aa2cbe7 100644 --- a/src/encoding/gob/gobencdec_test.go +++ b/src/encoding/gob/gobencdec_test.go @@ -806,6 +806,8 @@ func TestIgnoreDepthLimit(t *testing.T) { defer func() { maxIgnoreNestingDepth = oldNestingDepth }() b := new(bytes.Buffer) enc := NewEncoder(b) + + // Nested slice typ := reflect.TypeFor[int]() nested := reflect.ArrayOf(1, typ) for i := 0; i < 100; i++ { @@ -819,4 +821,16 @@ func TestIgnoreDepthLimit(t *testing.T) { if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) } + + // Nested struct + nested = reflect.StructOf([]reflect.StructField{{Name: "F", Type: typ}}) + for i := 0; i < 100; i++ { + nested = reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}) + } + badStruct = reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}})) + enc.Encode(badStruct.Interface()) + dec = NewDecoder(b) + if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { + t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) + } } diff --git a/src/go.mod b/src/go.mod index 789f5aaa1d3e26..96513ccfc92eab 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.23 require ( golang.org/x/crypto v0.23.1-0.20240603234054-0b431c7de36a - golang.org/x/net v0.25.1-0.20240603202750-6249541f2a6c + golang.org/x/net v0.25.1-0.20250304182835-b70a9e3eaa27 ) require ( diff --git a/src/go.sum b/src/go.sum index a75ea98c7312df..7d2f0b01d502fb 100644 --- a/src/go.sum +++ b/src/go.sum @@ -1,7 +1,7 @@ golang.org/x/crypto v0.23.1-0.20240603234054-0b431c7de36a h1:37MIv+iGfwMYzWJECGyrPCtd5nuqcciRUeJfkNCkCf0= golang.org/x/crypto v0.23.1-0.20240603234054-0b431c7de36a/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/net v0.25.1-0.20240603202750-6249541f2a6c h1:CR/7/SLUhIJw6g675eeoDiwggElO2MV9rGkNYjqi8GM= -golang.org/x/net v0.25.1-0.20240603202750-6249541f2a6c/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.25.1-0.20250304182835-b70a9e3eaa27 h1:BLroQt2NWk69+mgdbJFxbd1Y6nc8r9UCc/iPQ0FgpNs= +golang.org/x/net v0.25.1-0.20250304182835-b70a9e3eaa27/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= diff --git a/src/go/build/constraint/expr.go b/src/go/build/constraint/expr.go index e59012361bef6d..0f05f8db6a48cb 100644 --- a/src/go/build/constraint/expr.go +++ b/src/go/build/constraint/expr.go @@ -16,6 +16,10 @@ import ( "unicode/utf8" ) +// maxSize is a limit used to control the complexity of expressions, in order +// to prevent stack exhaustion issues due to recursion. +const maxSize = 1000 + // An Expr is a build tag constraint expression. // The underlying concrete type is *[AndExpr], *[OrExpr], *[NotExpr], or *[TagExpr]. type Expr interface { @@ -151,7 +155,7 @@ func Parse(line string) (Expr, error) { return parseExpr(text) } if text, ok := splitPlusBuild(line); ok { - return parsePlusBuildExpr(text), nil + return parsePlusBuildExpr(text) } return nil, errNotConstraint } @@ -201,6 +205,8 @@ type exprParser struct { tok string // last token read isTag bool pos int // position (start) of last token + + size int } // parseExpr parses a boolean build tag expression. @@ -249,6 +255,10 @@ func (p *exprParser) and() Expr { // On entry, the next input token has not yet been lexed. // On exit, the next input token has been lexed and is in p.tok. func (p *exprParser) not() Expr { + p.size++ + if p.size > maxSize { + panic(&SyntaxError{Offset: p.pos, Err: "build expression too large"}) + } p.lex() if p.tok == "!" { p.lex() @@ -388,7 +398,13 @@ func splitPlusBuild(line string) (expr string, ok bool) { } // parsePlusBuildExpr parses a legacy build tag expression (as used with “// +build”). -func parsePlusBuildExpr(text string) Expr { +func parsePlusBuildExpr(text string) (Expr, error) { + // Only allow up to 100 AND/OR operators for "old" syntax. + // This is much less than the limit for "new" syntax, + // but uses of old syntax were always very simple. + const maxOldSize = 100 + size := 0 + var x Expr for _, clause := range strings.Fields(text) { var y Expr @@ -414,19 +430,25 @@ func parsePlusBuildExpr(text string) Expr { if y == nil { y = z } else { + if size++; size > maxOldSize { + return nil, errComplex + } y = and(y, z) } } if x == nil { x = y } else { + if size++; size > maxOldSize { + return nil, errComplex + } x = or(x, y) } } if x == nil { x = tag("ignore") } - return x + return x, nil } // isValidTag reports whether the word is a valid build tag. diff --git a/src/go/build/constraint/expr_test.go b/src/go/build/constraint/expr_test.go index 15d189012efb7d..ac38ba69294930 100644 --- a/src/go/build/constraint/expr_test.go +++ b/src/go/build/constraint/expr_test.go @@ -222,7 +222,7 @@ var parsePlusBuildExprTests = []struct { func TestParsePlusBuildExpr(t *testing.T) { for i, tt := range parsePlusBuildExprTests { t.Run(fmt.Sprint(i), func(t *testing.T) { - x := parsePlusBuildExpr(tt.in) + x, _ := parsePlusBuildExpr(tt.in) if x.String() != tt.x.String() { t.Errorf("parsePlusBuildExpr(%q):\nhave %v\nwant %v", tt.in, x, tt.x) } @@ -319,3 +319,66 @@ func TestPlusBuildLines(t *testing.T) { }) } } + +func TestSizeLimits(t *testing.T) { + for _, tc := range []struct { + name string + expr string + }{ + { + name: "go:build or limit", + expr: "//go:build " + strings.Repeat("a || ", maxSize+2), + }, + { + name: "go:build and limit", + expr: "//go:build " + strings.Repeat("a && ", maxSize+2), + }, + { + name: "go:build and depth limit", + expr: "//go:build " + strings.Repeat("(a &&", maxSize+2), + }, + { + name: "go:build or depth limit", + expr: "//go:build " + strings.Repeat("(a ||", maxSize+2), + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.expr) + if err == nil { + t.Error("expression did not trigger limit") + } else if syntaxErr, ok := err.(*SyntaxError); !ok || syntaxErr.Err != "build expression too large" { + if !ok { + t.Errorf("unexpected error: %v", err) + } else { + t.Errorf("unexpected syntax error: %s", syntaxErr.Err) + } + } + }) + } +} + +func TestPlusSizeLimits(t *testing.T) { + maxOldSize := 100 + for _, tc := range []struct { + name string + expr string + }{ + { + name: "+build or limit", + expr: "// +build " + strings.Repeat("a ", maxOldSize+2), + }, + { + name: "+build and limit", + expr: "// +build " + strings.Repeat("a,", maxOldSize+2), + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.expr) + if err == nil { + t.Error("expression did not trigger limit") + } else if err != errComplex { + t.Errorf("unexpected error: got %q, want %q", err, errComplex) + } + }) + } +} diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go index 17808b366f092d..f268dea1a6f9cd 100644 --- a/src/go/parser/parser.go +++ b/src/go/parser/parser.go @@ -1676,6 +1676,8 @@ func (p *parser) parseElementList() (list []ast.Expr) { } func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "LiteralValue")) } diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go index eea743c2b5b261..2c33e9ef314ad3 100644 --- a/src/go/parser/parser_test.go +++ b/src/go/parser/parser_test.go @@ -598,10 +598,11 @@ var parseDepthTests = []struct { {name: "chan2", format: "package main; var x «<-chan »int"}, {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType {name: "map", format: "package main; var x «map[int]»int"}, - {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "slicelit", format: "package main; var x = []any{«[]any{«»}»}", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 3}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "element", format: "package main; var x = struct{x any}{x: «{«»}»}"}, {name: "dot", format: "package main; var x = «x.»x"}, {name: "index", format: "package main; var x = x«[1]»"}, {name: "slice", format: "package main; var x = x«[1:2]»"}, diff --git a/src/go/types/alias.go b/src/go/types/alias.go index af43471a324176..7adb3deb58bbc7 100644 --- a/src/go/types/alias.go +++ b/src/go/types/alias.go @@ -137,10 +137,10 @@ func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { // newAliasInstance creates a new alias instance for the given origin and type // arguments, recording pos as the position of its synthetic object (for error // reporting). -func (check *Checker) newAliasInstance(pos token.Pos, orig *Alias, targs []Type, ctxt *Context) *Alias { +func (check *Checker) newAliasInstance(pos token.Pos, orig *Alias, targs []Type, expanding *Named, ctxt *Context) *Alias { assert(len(targs) > 0) obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil) - rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), nil, ctxt) + rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), expanding, ctxt) res := check.newAlias(obj, rhs) res.orig = orig res.tparams = orig.tparams diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index beed94f3557996..a7aa6488028ecd 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2904,22 +2904,48 @@ func TestFileVersions(t *testing.T) { fileVersion string wantVersion string }{ - {"", "", ""}, // no versions specified - {"go1.19", "", "go1.19"}, // module version specified - {"", "go1.20", ""}, // file upgrade ignored - {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted - {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"", "", ""}, // no versions specified + {"go1.19", "", "go1.19"}, // module version specified + {"", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "", "go1"}, // no file version specified + {"go1", "goo1.22", "go1"}, // invalid file version specified + {"go1", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.19", "", "go1.19"}, // no file version specified + {"go1.19", "goo1.22", "go1.19"}, // invalid file version specified + {"go1.19", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.19", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.19", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.20", "", "go1.20"}, // no file version specified + {"go1.20", "goo1.22", "go1.20"}, // invalid file version specified + {"go1.20", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.20", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.21", "", "go1.21"}, // no file version specified + {"go1.21", "goo1.22", "go1.21"}, // invalid file version specified + {"go1.21", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.21", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.22", "", "go1.22"}, // no file version specified + {"go1.22", "goo1.22", "go1.22"}, // invalid file version specified + {"go1.22", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.22", "go1.22", "go1.22"}, // file version specified above 1.21 // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified - {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored - {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted - {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.20.1", "go1.19.1", "go1.20.1"}, // invalid file version + {"go1.20.1", "go1.21.1", "go1.20.1"}, // invalid file version + {"go1.21.1", "go1.19.1", "go1.21.1"}, // invalid file version + {"go1.21.1", "go1.21.1", "go1.21.1"}, // invalid file version + {"go1.22.1", "go1.19.1", "go1.22.1"}, // invalid file version + {"go1.22.1", "go1.21.1", "go1.22.1"}, // invalid file version } { var src string if test.fileVersion != "" { diff --git a/src/go/types/check.go b/src/go/types/check.go index 1a5a41a3bb4b99..8a729094961fe2 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -349,7 +349,6 @@ func (check *Checker) initFiles(files []*ast.File) { check.errorf(files[0], TooNew, "package requires newer Go version %v (application built with %v)", check.version, go_current) } - downgradeOk := check.version.cmp(go1_21) >= 0 // determine Go version for each file for _, file := range check.files { @@ -358,33 +357,19 @@ func (check *Checker) initFiles(files []*ast.File) { // unlike file versions which are Go language versions only, if valid.) v := check.conf.GoVersion - fileVersion := asGoVersion(file.GoVersion) - if fileVersion.isValid() { - // use the file version, if applicable - // (file versions are either the empty string or of the form go1.dd) - if pkgVersionOk { - cmp := fileVersion.cmp(check.version) - // Go 1.21 introduced the feature of setting the go.mod - // go line to an early version of Go and allowing //go:build lines - // to “upgrade” (cmp > 0) the Go version in a given file. - // We can do that backwards compatibly. - // - // Go 1.21 also introduced the feature of allowing //go:build lines - // to “downgrade” (cmp < 0) the Go version in a given file. - // That can't be done compatibly in general, since before the - // build lines were ignored and code got the module's Go version. - // To work around this, downgrades are only allowed when the - // module's Go version is Go 1.21 or later. - // - // If there is no valid check.version, then we don't really know what - // Go version to apply. - // Legacy tools may do this, and they historically have accepted everything. - // Preserve that behavior by ignoring //go:build constraints entirely in that - // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { - v = file.GoVersion - } - } + // If the file specifies a version, use max(fileVersion, go1.21). + if fileVersion := asGoVersion(file.GoVersion); fileVersion.isValid() { + // Go 1.21 introduced the feature of setting the go.mod + // go line to an early version of Go and allowing //go:build lines + // to set the Go version in a given file. Versions Go 1.21 and later + // can be set backwards compatibly as that was the first version + // files with go1.21 or later build tags could be built with. + // + // Set the version to max(fileVersion, go1.21): That will allow a + // downgrade to a version before go1.22, where the for loop semantics + // change was made, while being backwards compatible with versions of + // go before the new //go:build semantics were introduced. + v = string(versionMax(fileVersion, go1_21)) // Report a specific error for each tagged file that's too new. // (Normally the build system will have filtered files by version, @@ -399,6 +384,13 @@ func (check *Checker) initFiles(files []*ast.File) { } } +func versionMax(a, b goVersion) goVersion { + if a.cmp(b) < 0 { + return b + } + return a +} + // A bailout panic is used for early termination. type bailout struct{} diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go index 7bec790b5586ad..0435f2bf261647 100644 --- a/src/go/types/instantiate.go +++ b/src/go/types/instantiate.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "go/token" + "internal/buildcfg" . "internal/types/errors" ) @@ -129,8 +130,9 @@ func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, ex res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily case *Alias: - // TODO(gri) is this correct? - assert(expanding == nil) // Alias instances cannot be reached from Named types + if !buildcfg.Experiment.AliasTypeParams { + assert(expanding == nil) // Alias instances cannot be reached from Named types + } tparams := orig.TypeParams() // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) @@ -141,7 +143,7 @@ func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, ex return orig // nothing to do (minor optimization) } - return check.newAliasInstance(pos, orig, targs, ctxt) + return check.newAliasInstance(pos, orig, targs, expanding, ctxt) case *Signature: assert(expanding == nil) // function instances cannot be reached from Named types diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go index 3f459d3883017e..da0c0c1255b63e 100644 --- a/src/go/types/issues_test.go +++ b/src/go/types/issues_test.go @@ -1131,3 +1131,23 @@ func f(x int) { t.Errorf("got: %s want: %s", got, want) } } + +func TestIssue68877(t *testing.T) { + const src = ` +package p + +type ( + S struct{} + A = S + T A +)` + + t.Setenv("GODEBUG", "gotypesalias=1") + pkg := mustTypecheck(src, nil, nil) + T := pkg.Scope().Lookup("T").(*TypeName) + got := T.String() // this must not panic (was issue) + const want = "type p.T struct{}" + if got != want { + t.Errorf("got %s, want %s", got, want) + } +} diff --git a/src/go/types/named.go b/src/go/types/named.go index b44fa9d788c345..d55b023812d108 100644 --- a/src/go/types/named.go +++ b/src/go/types/named.go @@ -285,7 +285,7 @@ func (t *Named) cleanup() { if t.TypeArgs().Len() == 0 { panic("nil underlying") } - case *Named: + case *Named, *Alias: t.under() // t.under may add entries to check.cleaners } t.check = nil diff --git a/src/go/types/subst.go b/src/go/types/subst.go index 5ad2ff61eb1d30..6be106d3aa99d6 100644 --- a/src/go/types/subst.go +++ b/src/go/types/subst.go @@ -118,7 +118,7 @@ func (subst *subster) typ(typ Type) Type { // that has a type argument for it. targs, updated := subst.typeList(t.TypeArgs().list()) if updated { - return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.ctxt) + return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.expanding, subst.ctxt) } case *Array: diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go index d280bf2f5ff5cf..a1d7e6cc994e48 100644 --- a/src/go/types/typeset.go +++ b/src/go/types/typeset.go @@ -134,8 +134,8 @@ func (s *_TypeSet) underIs(f func(Type) bool) bool { } for _, t := range s.terms { assert(t.typ != nil) - // x == under(x) for ~x terms - u := t.typ + // Unalias(x) == under(x) for ~x terms + u := Unalias(t.typ) if !t.tilde { u = under(u) } diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 786bafff723c96..b8eefe0da8dbba 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -177,6 +177,15 @@ func TypeOf(a any) *Type { return (*Type)(NoEscape(unsafe.Pointer(eface.Type))) } +// TypeFor returns the abi.Type for a type parameter. +func TypeFor[T any]() *Type { + var v T + if t := TypeOf(v); t != nil { + return t // optimize for T being a non-interface kind + } + return TypeOf((*T)(nil)).Elem() // only for an interface kind +} + func (t *Type) Kind() Kind { return t.Kind_ & KindMask } func (t *Type) HasName() bool { diff --git a/src/internal/godebugs/godebugs_test.go b/src/internal/godebugs/godebugs_test.go index 046193b5c6b13b..168acc134aa753 100644 --- a/src/internal/godebugs/godebugs_test.go +++ b/src/internal/godebugs/godebugs_test.go @@ -46,7 +46,8 @@ func TestAll(t *testing.T) { if info.Old != "" && info.Changed == 0 { t.Errorf("Name=%s has Old, missing Changed", info.Name) } - if !strings.Contains(doc, "`"+info.Name+"`") { + if !strings.Contains(doc, "`"+info.Name+"`") && + !strings.Contains(doc, "`"+info.Name+"=") { t.Errorf("Name=%s not documented in doc/godebug.md", info.Name) } if !info.Opaque && !incs[info.Name] { diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index a802ac9c3708d1..f5831bc54cc114 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -25,6 +25,7 @@ type Info struct { // Note: After adding entries to this table, update the list in doc/godebug.md as well. // (Otherwise the test in this package will fail.) var All = []Info{ + {Name: "allowmultiplevcs", Package: "cmd/go"}, {Name: "asynctimerchan", Package: "time", Changed: 23, Old: "1"}, {Name: "execerrdot", Package: "os/exec"}, {Name: "gocachehash", Package: "cmd/go"}, @@ -54,8 +55,8 @@ var All = []Info{ {Name: "tlsmaxrsasize", Package: "crypto/tls"}, {Name: "tlsrsakex", Package: "crypto/tls", Changed: 22, Old: "1"}, {Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"}, - {Name: "winreadlinkvolume", Package: "os", Changed: 22, Old: "0"}, - {Name: "winsymlink", Package: "os", Changed: 22, Old: "0"}, + {Name: "winreadlinkvolume", Package: "os", Changed: 23, Old: "0"}, + {Name: "winsymlink", Package: "os", Changed: 23, Old: "0"}, {Name: "x509keypairleaf", Package: "crypto/tls", Changed: 23, Old: "0"}, {Name: "x509negativeserial", Package: "crypto/x509", Changed: 23, Old: "1"}, {Name: "x509sha1", Package: "crypto/x509"}, diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 669df94cc12e0d..d1023d4ebb9938 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -32,28 +32,46 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, if int64(n) > remain { n = int(remain) } + m := n pos1 := pos n, err = syscall.Sendfile(dst, src, &pos1, n) if n > 0 { pos += int64(n) written += int64(n) remain -= int64(n) + // (n, nil) indicates that sendfile(2) has transferred + // the exact number of bytes we requested, or some unretryable + // error have occurred with partial bytes sent. Either way, we + // don't need to go through the following logic to check EINTR + // or fell into dstFD.pd.waitWrite, just continue to send the + // next chunk or break the loop. + if n == m { + continue + } else if err != syscall.EAGAIN && + err != syscall.EINTR && + err != syscall.EBUSY { + // Particularly, EPIPE. Errors like that would normally lead + // the subsequent sendfile(2) call to (-1, EBADF). + break + } + } else if err != syscall.EAGAIN && err != syscall.EINTR { + // This includes syscall.ENOSYS (no kernel + // support) and syscall.EINVAL (fd types which + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + break } if err == syscall.EINTR { continue } - // This includes syscall.ENOSYS (no kernel - // support) and syscall.EINVAL (fd types which - // don't implement sendfile), and other errors. - // We should end the loop when there is no error - // returned from sendfile(2) or it is not a retryable error. - if err != syscall.EAGAIN { - break - } if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } - handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) + if err == syscall.EAGAIN { + err = nil + } + handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL && err != syscall.EOPNOTSUPP && err != syscall.ENOTSUP) return } diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go index d1c4d5c0d3d34d..1c4130d45da89c 100644 --- a/src/internal/poll/sendfile_linux.go +++ b/src/internal/poll/sendfile_linux.go @@ -50,6 +50,9 @@ func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handl break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go index ec675833a225dc..b7c3f81a1efdcd 100644 --- a/src/internal/poll/sendfile_solaris.go +++ b/src/internal/poll/sendfile_solaris.go @@ -61,6 +61,9 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/testenv/testenv.go b/src/internal/testenv/testenv.go index 9fb92406e8d85e..a6ebcb0f705ec8 100644 --- a/src/internal/testenv/testenv.go +++ b/src/internal/testenv/testenv.go @@ -522,3 +522,26 @@ func ParallelOn64Bit(t *testing.T) { } t.Parallel() } + +// CPUProfilingBroken returns true if CPU profiling has known issues on this +// platform. +func CPUProfilingBroken() bool { + switch runtime.GOOS { + case "plan9": + // Profiling unimplemented. + return true + case "aix": + // See https://golang.org/issue/45170. + return true + case "ios", "dragonfly", "netbsd", "illumos", "solaris": + // See https://golang.org/issue/13841. + return true + case "openbsd": + if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { + // See https://golang.org/issue/13841. + return true + } + } + + return false +} diff --git a/src/internal/types/testdata/check/go1_20_19.go b/src/internal/types/testdata/check/go1_20_19.go index 08365a7cfb564d..e040d396c7808b 100644 --- a/src/internal/types/testdata/check/go1_20_19.go +++ b/src/internal/types/testdata/check/go1_20_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ok because Go 1.20 ignored the //go:build go1.19 */) +var p = (Array)(s /* ok because file versions below go1.21 set the langage version to go1.21 */) diff --git a/src/internal/types/testdata/check/go1_21_19.go b/src/internal/types/testdata/check/go1_21_19.go index 2acd25865d4b69..5866033eafe6f8 100644 --- a/src/internal/types/testdata/check/go1_21_19.go +++ b/src/internal/types/testdata/check/go1_21_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ERROR "requires go1.20 or later" */) +var p = (Array)(s /* ok because file versions below go1.21 set the langage version to go1.21 */) diff --git a/src/internal/types/testdata/check/go1_21_22.go b/src/internal/types/testdata/check/go1_21_22.go new file mode 100644 index 00000000000000..3939b7b1d868c0 --- /dev/null +++ b/src/internal/types/testdata/check/go1_21_22.go @@ -0,0 +1,16 @@ +// -lang=go1.21 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.22 + +package p + +func f() { + for _ = range /* ok because of upgrade to 1.22 */ 10 { + } +} diff --git a/src/internal/types/testdata/check/go1_22_21.go b/src/internal/types/testdata/check/go1_22_21.go new file mode 100644 index 00000000000000..f910ecb59cbc78 --- /dev/null +++ b/src/internal/types/testdata/check/go1_22_21.go @@ -0,0 +1,16 @@ +// -lang=go1.22 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.21 + +package p + +func f() { + for _ = range 10 /* ERROR "requires go1.22 or later" */ { + } +} diff --git a/src/internal/types/testdata/fixedbugs/issue66285.go b/src/internal/types/testdata/fixedbugs/issue66285.go index 9811fec3f35549..4af76f05da8e41 100644 --- a/src/internal/types/testdata/fixedbugs/issue66285.go +++ b/src/internal/types/testdata/fixedbugs/issue66285.go @@ -1,14 +1,9 @@ -// -lang=go1.21 +// -lang=go1.13 // Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Note: Downgrading to go1.13 requires at least go1.21, -// hence the need for -lang=go1.21 at the top. - -//go:build go1.13 - package p import "io" diff --git a/src/internal/types/testdata/fixedbugs/issue68903.go b/src/internal/types/testdata/fixedbugs/issue68903.go new file mode 100644 index 00000000000000..b1369aa0f6faa7 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68903.go @@ -0,0 +1,24 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = [4]int +type B = map[string]interface{} + +func _[T ~A](x T) { + _ = len(x) +} + +func _[U ~A](x U) { + _ = cap(x) +} + +func _[V ~A]() { + _ = V{} +} + +func _[W ~B](a interface{}) { + _ = a.(W)["key"] +} diff --git a/src/internal/types/testdata/fixedbugs/issue68935.go b/src/internal/types/testdata/fixedbugs/issue68935.go new file mode 100644 index 00000000000000..2e72468f05eb0c --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68935.go @@ -0,0 +1,26 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = struct { + F string + G int +} + +func Make[T ~A]() T { + return T{ + F: "blah", + G: 1234, + } +} + +type N struct { + F string + G int +} + +func _() { + _ = Make[N]() +} diff --git a/src/internal/weak/pointer_test.go b/src/internal/weak/pointer_test.go index e143749230f0a5..5a861bb9ca39d7 100644 --- a/src/internal/weak/pointer_test.go +++ b/src/internal/weak/pointer_test.go @@ -5,9 +5,12 @@ package weak_test import ( + "context" "internal/weak" "runtime" + "sync" "testing" + "time" ) type T struct { @@ -128,3 +131,82 @@ func TestPointerFinalizer(t *testing.T) { t.Errorf("weak pointer is non-nil even after finalization: %v", wt) } } + +// Regression test for issue 69210. +// +// Weak-to-strong conversions must shade the new strong pointer, otherwise +// that might be creating the only strong pointer to a white object which +// is hidden in a blackened stack. +// +// Never fails if correct, fails with some high probability if incorrect. +func TestIssue69210(t *testing.T) { + if testing.Short() { + t.Skip("this is a stress test that takes seconds to run on its own") + } + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // What we're trying to do is manufacture the conditions under which this + // bug happens. Specifically, we want: + // + // 1. To create a whole bunch of objects that are only weakly-pointed-to, + // 2. To call Strong while the GC is in the mark phase, + // 3. The new strong pointer to be missed by the GC, + // 4. The following GC cycle to mark a free object. + // + // Unfortunately, (2) and (3) are hard to control, but we can increase + // the likelihood by having several goroutines do (1) at once while + // another goroutine constantly keeps us in the GC with runtime.GC. + // Like throwing darts at a dart board until they land just right. + // We can increase the likelihood of (4) by adding some delay after + // creating the strong pointer, but only if it's non-nil. If it's nil, + // that means it was already collected in which case there's no chance + // of triggering the bug, so we want to retry as fast as possible. + // Our heap here is tiny, so the GCs will go by fast. + // + // As of 2024-09-03, removing the line that shades pointers during + // the weak-to-strong conversion causes this test to fail about 50% + // of the time. + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + runtime.GC() + + select { + case <-ctx.Done(): + return + default: + } + } + }() + for range max(runtime.GOMAXPROCS(-1)-1, 1) { + wg.Add(1) + go func() { + defer wg.Done() + for { + for range 5 { + bt := new(T) + wt := weak.Make(bt) + bt = nil + time.Sleep(1 * time.Millisecond) + bt = wt.Strong() + if bt != nil { + time.Sleep(4 * time.Millisecond) + bt.t = bt + bt.a = 12 + } + runtime.KeepAlive(bt) + } + select { + case <-ctx.Done(): + return + default: + } + } + }() + } + wg.Wait() +} diff --git a/src/net/http/client.go b/src/net/http/client.go index cbf7c545019b8c..2fe49cb93dd60c 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -613,8 +613,9 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { reqBodyClosed = false // have we closed the current req.Body? // Redirect behavior: - redirectMethod string - includeBody bool + redirectMethod string + includeBody = true + stripSensitiveHeaders = false ) uerr := func(err error) error { // the body may have been closed already by c.send() @@ -681,7 +682,12 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { // in case the user set Referer on their first request. // If they really want to override, they can do it in // their CheckRedirect func. - copyHeaders(req) + if !stripSensitiveHeaders && reqs[0].URL.Host != req.URL.Host { + if !shouldCopyHeaderOnRedirect(reqs[0].URL, req.URL) { + stripSensitiveHeaders = true + } + } + copyHeaders(req, stripSensitiveHeaders) // Add the Referer header from the most recent // request URL to the new one, if it's not https->http: @@ -744,7 +750,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { // makeHeadersCopier makes a function that copies headers from the // initial Request, ireq. For every redirect, this function must be called // so that it can copy headers into the upcoming Request. -func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) { +func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensitiveHeaders bool) { // The headers to copy are from the very initial request. // We use a closured callback to keep a reference to these original headers. var ( @@ -758,8 +764,7 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) { } } - preq := ireq // The previous request - return func(req *Request) { + return func(req *Request, stripSensitiveHeaders bool) { // If Jar is present and there was some initial cookies provided // via the request header, then we may need to alter the initial // cookies as we follow redirects since each redirect may end up @@ -796,12 +801,16 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) { // Copy the initial request's Header values // (at least the safe ones). for k, vv := range ireqhdr { - if shouldCopyHeaderOnRedirect(k, preq.URL, req.URL) { + sensitive := false + switch CanonicalHeaderKey(k) { + case "Authorization", "Www-Authenticate", "Cookie", "Cookie2", + "Proxy-Authorization", "Proxy-Authenticate": + sensitive = true + } + if !(sensitive && stripSensitiveHeaders) { req.Header[k] = vv } } - - preq = req // Update previous Request with the current request } } @@ -977,28 +986,23 @@ func (b *cancelTimerBody) Close() error { return err } -func shouldCopyHeaderOnRedirect(headerKey string, initial, dest *url.URL) bool { - switch CanonicalHeaderKey(headerKey) { - case "Authorization", "Www-Authenticate", "Cookie", "Cookie2": - // Permit sending auth/cookie headers from "foo.com" - // to "sub.foo.com". - - // Note that we don't send all cookies to subdomains - // automatically. This function is only used for - // Cookies set explicitly on the initial outgoing - // client request. Cookies automatically added via the - // CookieJar mechanism continue to follow each - // cookie's scope as set by Set-Cookie. But for - // outgoing requests with the Cookie header set - // directly, we don't know their scope, so we assume - // it's for *.domain.com. - - ihost := idnaASCIIFromURL(initial) - dhost := idnaASCIIFromURL(dest) - return isDomainOrSubdomain(dhost, ihost) - } - // All other headers are copied: - return true +func shouldCopyHeaderOnRedirect(initial, dest *url.URL) bool { + // Permit sending auth/cookie headers from "foo.com" + // to "sub.foo.com". + + // Note that we don't send all cookies to subdomains + // automatically. This function is only used for + // Cookies set explicitly on the initial outgoing + // client request. Cookies automatically added via the + // CookieJar mechanism continue to follow each + // cookie's scope as set by Set-Cookie. But for + // outgoing requests with the Cookie header set + // directly, we don't know their scope, so we assume + // it's for *.domain.com. + + ihost := idnaASCIIFromURL(initial) + dhost := idnaASCIIFromURL(dest) + return isDomainOrSubdomain(dhost, ihost) } // isDomainOrSubdomain reports whether sub is a subdomain (or exact diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go index 1faa1516479e75..1f9eebea57dc87 100644 --- a/src/net/http/client_test.go +++ b/src/net/http/client_test.go @@ -1536,6 +1536,58 @@ func testClientCopyHeadersOnRedirect(t *testing.T, mode testMode) { } } +// Issue #70530: Once we strip a header on a redirect to a different host, +// the header should stay stripped across any further redirects. +func TestClientStripHeadersOnRepeatedRedirect(t *testing.T) { + run(t, testClientStripHeadersOnRepeatedRedirect) +} +func testClientStripHeadersOnRepeatedRedirect(t *testing.T, mode testMode) { + var proto string + ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Host+r.URL.Path != "a.example.com/" { + if h := r.Header.Get("Authorization"); h != "" { + t.Errorf("on request to %v%v, Authorization=%q, want no header", r.Host, r.URL.Path, h) + } else if h := r.Header.Get("Proxy-Authorization"); h != "" { + t.Errorf("on request to %v%v, Proxy-Authorization=%q, want no header", r.Host, r.URL.Path, h) + } + } + // Follow a chain of redirects from a to b and back to a. + // The Authorization header is stripped on the first redirect to b, + // and stays stripped even if we're sent back to a. + switch r.Host + r.URL.Path { + case "a.example.com/": + Redirect(w, r, proto+"://b.example.com/", StatusFound) + case "b.example.com/": + Redirect(w, r, proto+"://b.example.com/redirect", StatusFound) + case "b.example.com/redirect": + Redirect(w, r, proto+"://a.example.com/redirect", StatusFound) + case "a.example.com/redirect": + w.Header().Set("X-Done", "true") + default: + t.Errorf("unexpected request to %v", r.URL) + } + })).ts + proto, _, _ = strings.Cut(ts.URL, ":") + + c := ts.Client() + c.Transport.(*Transport).Dial = func(_ string, _ string) (net.Conn, error) { + return net.Dial("tcp", ts.Listener.Addr().String()) + } + + req, _ := NewRequest("GET", proto+"://a.example.com/", nil) + req.Header.Add("Cookie", "foo=bar") + req.Header.Add("Authorization", "secretpassword") + req.Header.Add("Proxy-Authorization", "secretpassword") + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.Header.Get("X-Done") != "true" { + t.Fatalf("response missing expected header: X-Done=true") + } +} + // Issue 22233: copy host when Client follows a relative redirect. func TestClientCopyHostOnRedirect(t *testing.T) { run(t, testClientCopyHostOnRedirect) } func testClientCopyHostOnRedirect(t *testing.T, mode testMode) { @@ -1702,43 +1754,39 @@ func testClientAltersCookiesOnRedirect(t *testing.T, mode testMode) { // Part of Issue 4800 func TestShouldCopyHeaderOnRedirect(t *testing.T) { tests := []struct { - header string initialURL string destURL string want bool }{ - {"User-Agent", "http://foo.com/", "http://bar.com/", true}, - {"X-Foo", "http://foo.com/", "http://bar.com/", true}, - // Sensitive headers: - {"cookie", "http://foo.com/", "http://bar.com/", false}, - {"cookie2", "http://foo.com/", "http://bar.com/", false}, - {"authorization", "http://foo.com/", "http://bar.com/", false}, - {"authorization", "http://foo.com/", "https://foo.com/", true}, - {"authorization", "http://foo.com:1234/", "http://foo.com:4321/", true}, - {"www-authenticate", "http://foo.com/", "http://bar.com/", false}, - {"authorization", "http://foo.com/", "http://[::1%25.foo.com]/", false}, + {"http://foo.com/", "http://bar.com/", false}, + {"http://foo.com/", "http://bar.com/", false}, + {"http://foo.com/", "http://bar.com/", false}, + {"http://foo.com/", "https://foo.com/", true}, + {"http://foo.com:1234/", "http://foo.com:4321/", true}, + {"http://foo.com/", "http://bar.com/", false}, + {"http://foo.com/", "http://[::1%25.foo.com]/", false}, // But subdomains should work: - {"www-authenticate", "http://foo.com/", "http://foo.com/", true}, - {"www-authenticate", "http://foo.com/", "http://sub.foo.com/", true}, - {"www-authenticate", "http://foo.com/", "http://notfoo.com/", false}, - {"www-authenticate", "http://foo.com/", "https://foo.com/", true}, - {"www-authenticate", "http://foo.com:80/", "http://foo.com/", true}, - {"www-authenticate", "http://foo.com:80/", "http://sub.foo.com/", true}, - {"www-authenticate", "http://foo.com:443/", "https://foo.com/", true}, - {"www-authenticate", "http://foo.com:443/", "https://sub.foo.com/", true}, - {"www-authenticate", "http://foo.com:1234/", "http://foo.com/", true}, - - {"authorization", "http://foo.com/", "http://foo.com/", true}, - {"authorization", "http://foo.com/", "http://sub.foo.com/", true}, - {"authorization", "http://foo.com/", "http://notfoo.com/", false}, - {"authorization", "http://foo.com/", "https://foo.com/", true}, - {"authorization", "http://foo.com:80/", "http://foo.com/", true}, - {"authorization", "http://foo.com:80/", "http://sub.foo.com/", true}, - {"authorization", "http://foo.com:443/", "https://foo.com/", true}, - {"authorization", "http://foo.com:443/", "https://sub.foo.com/", true}, - {"authorization", "http://foo.com:1234/", "http://foo.com/", true}, + {"http://foo.com/", "http://foo.com/", true}, + {"http://foo.com/", "http://sub.foo.com/", true}, + {"http://foo.com/", "http://notfoo.com/", false}, + {"http://foo.com/", "https://foo.com/", true}, + {"http://foo.com:80/", "http://foo.com/", true}, + {"http://foo.com:80/", "http://sub.foo.com/", true}, + {"http://foo.com:443/", "https://foo.com/", true}, + {"http://foo.com:443/", "https://sub.foo.com/", true}, + {"http://foo.com:1234/", "http://foo.com/", true}, + + {"http://foo.com/", "http://foo.com/", true}, + {"http://foo.com/", "http://sub.foo.com/", true}, + {"http://foo.com/", "http://notfoo.com/", false}, + {"http://foo.com/", "https://foo.com/", true}, + {"http://foo.com:80/", "http://foo.com/", true}, + {"http://foo.com:80/", "http://sub.foo.com/", true}, + {"http://foo.com:443/", "https://foo.com/", true}, + {"http://foo.com:443/", "https://sub.foo.com/", true}, + {"http://foo.com:1234/", "http://foo.com/", true}, } for i, tt := range tests { u0, err := url.Parse(tt.initialURL) @@ -1751,10 +1799,10 @@ func TestShouldCopyHeaderOnRedirect(t *testing.T) { t.Errorf("%d. dest URL %q parse error: %v", i, tt.destURL, err) continue } - got := Export_shouldCopyHeaderOnRedirect(tt.header, u0, u1) + got := Export_shouldCopyHeaderOnRedirect(u0, u1) if got != tt.want { - t.Errorf("%d. shouldCopyHeaderOnRedirect(%q, %q => %q) = %v; want %v", - i, tt.header, tt.initialURL, tt.destURL, got, tt.want) + t.Errorf("%d. shouldCopyHeaderOnRedirect(%q => %q) = %v; want %v", + i, tt.initialURL, tt.destURL, got, tt.want) } } } diff --git a/src/net/http/internal/chunked.go b/src/net/http/internal/chunked.go index 196b5d892589ab..0b08a97a0831ec 100644 --- a/src/net/http/internal/chunked.go +++ b/src/net/http/internal/chunked.go @@ -164,6 +164,19 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) { } return nil, err } + + // RFC 9112 permits parsers to accept a bare \n as a line ending in headers, + // but not in chunked encoding lines. See https://www.rfc-editor.org/errata/eid7633, + // which explicitly rejects a clarification permitting \n as a chunk terminator. + // + // Verify that the line ends in a CRLF, and that no CRs appear before the end. + if idx := bytes.IndexByte(p, '\r'); idx == -1 { + return nil, errors.New("chunked line ends with bare LF") + } else if idx != len(p)-2 { + return nil, errors.New("invalid CR in chunked line") + } + p = p[:len(p)-2] // trim CRLF + if len(p) >= maxLineLength { return nil, ErrLineTooLong } @@ -171,14 +184,14 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) { } func trimTrailingWhitespace(b []byte) []byte { - for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + for len(b) > 0 && isOWS(b[len(b)-1]) { b = b[:len(b)-1] } return b } -func isASCIISpace(b byte) bool { - return b == ' ' || b == '\t' || b == '\n' || b == '\r' +func isOWS(b byte) bool { + return b == ' ' || b == '\t' } var semi = []byte(";") diff --git a/src/net/http/internal/chunked_test.go b/src/net/http/internal/chunked_test.go index af79711781a7ed..312f1734a6385c 100644 --- a/src/net/http/internal/chunked_test.go +++ b/src/net/http/internal/chunked_test.go @@ -280,6 +280,33 @@ func TestChunkReaderByteAtATime(t *testing.T) { } } +func TestChunkInvalidInputs(t *testing.T) { + for _, test := range []struct { + name string + b string + }{{ + name: "bare LF in chunk size", + b: "1\na\r\n0\r\n", + }, { + name: "extra LF in chunk size", + b: "1\r\r\na\r\n0\r\n", + }, { + name: "bare LF in chunk data", + b: "1\r\na\n0\r\n", + }, { + name: "bare LF in chunk extension", + b: "1;\na\r\n0\r\n", + }} { + t.Run(test.name, func(t *testing.T) { + r := NewChunkedReader(strings.NewReader(test.b)) + got, err := io.ReadAll(r) + if err == nil { + t.Fatalf("unexpectedly parsed invalid chunked data:\n%q", got) + } + }) + } +} + type funcReader struct { f func(iteration int) ([]byte, error) i int diff --git a/src/net/http/internal/testcert/testcert.go b/src/net/http/internal/testcert/testcert.go index d510e791d617cb..78ce42e2282679 100644 --- a/src/net/http/internal/testcert/testcert.go +++ b/src/net/http/internal/testcert/testcert.go @@ -10,56 +10,56 @@ import "strings" // LocalhostCert is a PEM-encoded TLS cert with SAN IPs // "127.0.0.1" and "[::1]", expiring at Jan 29 16:00:00 2084 GMT. // generated from src/crypto/tls: -// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com,*.example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var LocalhostCert = []byte(`-----BEGIN CERTIFICATE----- -MIIDOTCCAiGgAwIBAgIQSRJrEpBGFc7tNb1fb5pKFzANBgkqhkiG9w0BAQsFADAS +MIIDSDCCAjCgAwIBAgIQEP/md970HysdBTpuzDOf0DANBgkqhkiG9w0BAQsFADAS MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA6Gba5tHV1dAKouAaXO3/ebDUU4rvwCUg/CNaJ2PT5xLD4N1Vcb8r -bFSW2HXKq+MPfVdwIKR/1DczEoAGf/JWQTW7EgzlXrCd3rlajEX2D73faWJekD0U -aUgz5vtrTXZ90BQL7WvRICd7FlEZ6FPOcPlumiyNmzUqtwGhO+9ad1W5BqJaRI6P -YfouNkwR6Na4TzSj5BrqUfP0FwDizKSJ0XXmh8g8G9mtwxOSN3Ru1QFc61Xyeluk -POGKBV/q6RBNklTNe0gI8usUMlYyoC7ytppNMW7X2vodAelSu25jgx2anj9fDVZu -h7AXF5+4nJS4AAt0n1lNY7nGSsdZas8PbQIDAQABo4GIMIGFMA4GA1UdDwEB/wQE +MIIBCgKCAQEAxcl69ROJdxjN+MJZnbFrYxyQooADCsJ6VDkuMyNQIix/Hk15Nk/u +FyBX1Me++aEpGmY3RIY4fUvELqT/srvAHsTXwVVSttMcY8pcAFmXSqo3x4MuUTG/ +jCX3Vftj0r3EM5M8ImY1rzA/jqTTLJg00rD+DmuDABcqQvoXw/RV8w1yTRi5BPoH +DFD/AWTt/YgMvk1l2Yq/xI8VbMUIpjBoGXxWsSevQ5i2s1mk9/yZzu0Ysp1tTlzD +qOPa4ysFjBitdXiwfxjxtv5nXqOCP5rheKO0sWLk0fetMp1OV5JSJMAJw6c2ZMkl +U2WMqAEpRjdE/vHfIuNg+yGaRRqI07NZRQIDAQABo4GXMIGUMA4GA1UdDwEB/wQE AwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud -DgQWBBStsdjh3/JCXXYlQryOrL4Sh7BW5TAuBgNVHREEJzAlggtleGFtcGxlLmNv -bYcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG9w0BAQsFAAOCAQEAxWGI -5NhpF3nwwy/4yB4i/CwwSpLrWUa70NyhvprUBC50PxiXav1TeDzwzLx/o5HyNwsv -cxv3HdkLW59i/0SlJSrNnWdfZ19oTcS+6PtLoVyISgtyN6DpkKpdG1cOkW3Cy2P2 -+tK/tKHRP1Y/Ra0RiDpOAmqn0gCOFGz8+lqDIor/T7MTpibL3IxqWfPrvfVRHL3B -grw/ZQTTIVjjh4JBSW3WyWgNo/ikC1lrVxzl4iPUGptxT36Cr7Zk2Bsg0XqwbOvK -5d+NTDREkSnUbie4GeutujmX3Dsx88UiV6UY/4lHJa6I5leHUNOHahRbpbWeOfs/ -WkBKOclmOV2xlTVuPw== +DgQWBBQR5QIzmacmw78ZI1C4MXw7Q0wJ1jA9BgNVHREENjA0ggtleGFtcGxlLmNv +bYINKi5leGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG +9w0BAQsFAAOCAQEACrRNgiioUDzxQftd0fwOa6iRRcPampZRDtuaF68yNHoNWbOu +LUwc05eOWxRq3iABGSk2xg+FXM3DDeW4HhAhCFptq7jbVZ+4Jj6HeJG9mYRatAxR +Y/dEpa0D0EHhDxxVg6UzKOXB355n0IetGE/aWvyTV9SiDs6QsaC57Q9qq1/mitx5 +2GFBoapol9L5FxCc77bztzK8CpLujkBi25Vk6GAFbl27opLfpyxkM+rX/T6MXCPO +6/YBacNZ7ff1/57Etg4i5mNA6ubCpuc4Gi9oYqCNNohftr2lkJr7REdDR6OW0lsL +rF7r4gUnKeC7mYIH1zypY7laskopiLFAfe96Kg== -----END CERTIFICATE-----`) // LocalhostKey is the private key for LocalhostCert. var LocalhostKey = []byte(testingKey(`-----BEGIN RSA TESTING KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDoZtrm0dXV0Aqi -4Bpc7f95sNRTiu/AJSD8I1onY9PnEsPg3VVxvytsVJbYdcqr4w99V3AgpH/UNzMS -gAZ/8lZBNbsSDOVesJ3euVqMRfYPvd9pYl6QPRRpSDPm+2tNdn3QFAvta9EgJ3sW -URnoU85w+W6aLI2bNSq3AaE771p3VbkGolpEjo9h+i42TBHo1rhPNKPkGupR8/QX -AOLMpInRdeaHyDwb2a3DE5I3dG7VAVzrVfJ6W6Q84YoFX+rpEE2SVM17SAjy6xQy -VjKgLvK2mk0xbtfa+h0B6VK7bmODHZqeP18NVm6HsBcXn7iclLgAC3SfWU1jucZK -x1lqzw9tAgMBAAECggEABWzxS1Y2wckblnXY57Z+sl6YdmLV+gxj2r8Qib7g4ZIk -lIlWR1OJNfw7kU4eryib4fc6nOh6O4AWZyYqAK6tqNQSS/eVG0LQTLTTEldHyVJL -dvBe+MsUQOj4nTndZW+QvFzbcm2D8lY5n2nBSxU5ypVoKZ1EqQzytFcLZpTN7d89 -EPj0qDyrV4NZlWAwL1AygCwnlwhMQjXEalVF1ylXwU3QzyZ/6MgvF6d3SSUlh+sq -XefuyigXw484cQQgbzopv6niMOmGP3of+yV4JQqUSb3IDmmT68XjGd2Dkxl4iPki -6ZwXf3CCi+c+i/zVEcufgZ3SLf8D99kUGE7v7fZ6AQKBgQD1ZX3RAla9hIhxCf+O -3D+I1j2LMrdjAh0ZKKqwMR4JnHX3mjQI6LwqIctPWTU8wYFECSh9klEclSdCa64s -uI/GNpcqPXejd0cAAdqHEEeG5sHMDt0oFSurL4lyud0GtZvwlzLuwEweuDtvT9cJ -Wfvl86uyO36IW8JdvUprYDctrQKBgQDycZ697qutBieZlGkHpnYWUAeImVA878sJ -w44NuXHvMxBPz+lbJGAg8Cn8fcxNAPqHIraK+kx3po8cZGQywKHUWsxi23ozHoxo -+bGqeQb9U661TnfdDspIXia+xilZt3mm5BPzOUuRqlh4Y9SOBpSWRmEhyw76w4ZP -OPxjWYAgwQKBgA/FehSYxeJgRjSdo+MWnK66tjHgDJE8bYpUZsP0JC4R9DL5oiaA -brd2fI6Y+SbyeNBallObt8LSgzdtnEAbjIH8uDJqyOmknNePRvAvR6mP4xyuR+Bv -m+Lgp0DMWTw5J9CKpydZDItc49T/mJ5tPhdFVd+am0NAQnmr1MCZ6nHxAoGABS3Y -LkaC9FdFUUqSU8+Chkd/YbOkuyiENdkvl6t2e52jo5DVc1T7mLiIrRQi4SI8N9bN -/3oJWCT+uaSLX2ouCtNFunblzWHBrhxnZzTeqVq4SLc8aESAnbslKL4i8/+vYZlN -s8xtiNcSvL+lMsOBORSXzpj/4Ot8WwTkn1qyGgECgYBKNTypzAHeLE6yVadFp3nQ -Ckq9yzvP/ib05rvgbvrne00YeOxqJ9gtTrzgh7koqJyX1L4NwdkEza4ilDWpucn0 -xiUZS4SoaJq6ZvcBYS62Yr1t8n09iG47YL8ibgtmH3L+svaotvpVxVK+d7BLevA/ -ZboOWVe3icTy64BT3OQhmg== +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDFyXr1E4l3GM34 +wlmdsWtjHJCigAMKwnpUOS4zI1AiLH8eTXk2T+4XIFfUx775oSkaZjdEhjh9S8Qu +pP+yu8AexNfBVVK20xxjylwAWZdKqjfHgy5RMb+MJfdV+2PSvcQzkzwiZjWvMD+O +pNMsmDTSsP4Oa4MAFypC+hfD9FXzDXJNGLkE+gcMUP8BZO39iAy+TWXZir/EjxVs +xQimMGgZfFaxJ69DmLazWaT3/JnO7RiynW1OXMOo49rjKwWMGK11eLB/GPG2/mde +o4I/muF4o7SxYuTR960ynU5XklIkwAnDpzZkySVTZYyoASlGN0T+8d8i42D7IZpF +GojTs1lFAgMBAAECggEAIYthUi1lFBDd5gG4Rzlu+BlBIn5JhcqkCqLEBiJIFfOr +/4yuMRrvS3bNzqWt6xJ9MSAC4ZlN/VobRLnxL/QNymoiGYUKCT3Ww8nvPpPzR9OE +sE68TUL9tJw/zZJcRMKwgvrGqSLimfq53MxxkE+kLdOc0v9C8YH8Re26mB5ZcWYa +7YFyZQpKsQYnsmu/05cMbpOQrQWhtmIqRoyn8mG/par2s3NzjtpSE9NINyz26uFc +k/3ovFJQIHkUmTS7KHD3BgY5vuCqP98HramYnOysJ0WoYgvSDNCWw3037s5CCwJT +gCKuM+Ow6liFrj83RrdKBpm5QUGjfNpYP31o+QNP4QKBgQDSrUQ2XdgtAnibAV7u +7kbxOxro0EhIKso0Y/6LbDQgcXgxLqltkmeqZgG8nC3Z793lhlSasz2snhzzooV5 +5fTy1y8ikXqjhG0nNkInFyOhsI0auE28CFoDowaQd+5cmCatpN4Grqo5PNRXxm1w +HktfPEgoP11NNCFHvvN5fEKbbQKBgQDwVlOaV20IvW3IPq7cXZyiyabouFF9eTRo +VJka1Uv+JtyvL2P0NKkjYHOdN8gRblWqxQtJoTNk020rVA4UP1heiXALy50gvj/p +hMcybPTLYSPOhAGx838KIcvGR5oskP1aUCmFbFQzGELxhJ9diVVjxUtbG2DuwPKd +tD9TLxT2OQKBgQCcdlHSjp+dzdgERmBa0ludjGfPv9/uuNizUBAbO6D690psPFtY +JQMYaemgSd1DngEOFVWADt4e9M5Lose+YCoqr+UxpxmNlyv5kzJOFcFAs/4XeglB +PHKdgNW/NVKxMc6H54l9LPr+x05sYdGlEtqnP/3W5jhEvhJ5Vjc8YiyVgQKBgQCl +zwjyrGo+42GACy7cPYE5FeIfIDqoVByB9guC5bD98JXEDu/opQQjsgFRcBCJZhOY +M0UsURiB8ROaFu13rpQq9KrmmF0ZH+g8FSzQbzcbsTLg4VXCDXmR5esOKowFPypr +Sm667BfTAGP++D5ya7MLmCv6+RKQ5XD8uEQQAaV2kQKBgAD8qeJuWIXZT0VKkQrn +nIhgtzGERF/6sZdQGW2LxTbUDWG74AfFkkEbeBfwEkCZXY/xmnYqYABhvlSex8jU +supU6Eea21esIxIub2zv/Np0ojUb6rlqTPS4Ox1E27D787EJ3VOXpriSD10vyNnZ +jel6uj2FOP9g54s+GzlSVg/T -----END RSA TESTING KEY-----`)) func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") } diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index b2858ba8f2b559..2543935ff61959 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -7284,3 +7284,52 @@ func testServerReadAfterHandlerAbort100Continue(t *testing.T, mode testMode) { readyc <- struct{}{} // server starts reading from the request body readyc <- struct{}{} // server finishes reading from the request body } + +func TestInvalidChunkedBodies(t *testing.T) { + for _, test := range []struct { + name string + b string + }{{ + name: "bare LF in chunk size", + b: "1\na\r\n0\r\n\r\n", + }, { + name: "bare LF at body end", + b: "1\r\na\r\n0\r\n\n", + }} { + t.Run(test.name, func(t *testing.T) { + reqc := make(chan error) + ts := newClientServerTest(t, http1Mode, HandlerFunc(func(w ResponseWriter, r *Request) { + got, err := io.ReadAll(r.Body) + if err == nil { + t.Logf("read body: %q", got) + } + reqc <- err + })).ts + + serverURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + conn, err := net.Dial("tcp", serverURL.Host) + if err != nil { + t.Fatal(err) + } + + if _, err := conn.Write([]byte( + "POST / HTTP/1.1\r\n" + + "Host: localhost\r\n" + + "Transfer-Encoding: chunked\r\n" + + "Connection: close\r\n" + + "\r\n" + + test.b)); err != nil { + t.Fatal(err) + } + conn.(*net.TCPConn).CloseWrite() + + if err := <-reqc; err == nil { + t.Errorf("server handler: io.ReadAll(r.Body) succeeded, want error") + } + }) + } +} diff --git a/src/net/sendfile_unix_alt.go b/src/net/sendfile_unix_alt.go index 9e46c4e607d4d8..4056856f306175 100644 --- a/src/net/sendfile_unix_alt.go +++ b/src/net/sendfile_unix_alt.go @@ -53,6 +53,9 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { if err != nil { return 0, err, false } + if fi.Mode()&(fs.ModeSymlink|fs.ModeDevice|fs.ModeCharDevice|fs.ModeIrregular) != 0 { + return 0, nil, false + } remain = fi.Size() } diff --git a/src/net/sendfile_unix_test.go b/src/net/sendfile_unix_test.go new file mode 100644 index 00000000000000..79fb23b31010d5 --- /dev/null +++ b/src/net/sendfile_unix_test.go @@ -0,0 +1,86 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import ( + "internal/testpty" + "io" + "os" + "sync" + "syscall" + "testing" +) + +// Issue 70763: test that we don't fail on sendfile from a tty. +func TestCopyFromTTY(t *testing.T) { + pty, ttyName, err := testpty.Open() + if err != nil { + t.Skipf("skipping test because pty open failed: %v", err) + } + defer pty.Close() + + // Use syscall.Open so that the tty is blocking. + ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0) + if err != nil { + t.Skipf("skipping test because tty open failed: %v", err) + } + defer syscall.Close(ttyFD) + + tty := os.NewFile(uintptr(ttyFD), "tty") + defer tty.Close() + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + ch := make(chan bool) + + const data = "data\n" + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + conn, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer conn.Close() + + buf := make([]byte, len(data)) + if _, err := io.ReadFull(conn, buf); err != nil { + t.Error(err) + } + + ch <- true + }() + + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + wg.Add(1) + go func() { + defer wg.Done() + if _, err := pty.Write([]byte(data)); err != nil { + t.Error(err) + } + <-ch + if err := pty.Close(); err != nil { + t.Error(err) + } + }() + + lr := io.LimitReader(tty, int64(len(data))) + if _, err := io.Copy(conn, lr); err != nil { + t.Error(err) + } +} diff --git a/src/os/copy_test.go b/src/os/copy_test.go new file mode 100644 index 00000000000000..82346ca4e57e3e --- /dev/null +++ b/src/os/copy_test.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "errors" + "io" + "math/rand/v2" + "net" + "os" + "runtime" + "sync" + "testing" + + "golang.org/x/net/nettest" +) + +// Exercise sendfile/splice fast paths with a moderately large file. +// +// https://go.dev/issue/70000 + +func TestLargeCopyViaNetwork(t *testing.T) { + const size = 10 * 1024 * 1024 + dir := t.TempDir() + + src, err := os.Create(dir + "/src") + if err != nil { + t.Fatal(err) + } + defer src.Close() + if _, err := io.CopyN(src, newRandReader(), size); err != nil { + t.Fatal(err) + } + if _, err := src.Seek(0, 0); err != nil { + t.Fatal(err) + } + + dst, err := os.Create(dir + "/dst") + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + client, server := createSocketPair(t, "tcp") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + if n, err := io.Copy(dst, server); n != size || err != nil { + t.Errorf("copy to destination = %v, %v; want %v, nil", n, err, size) + } + }() + go func() { + defer wg.Done() + defer client.Close() + if n, err := io.Copy(client, src); n != size || err != nil { + t.Errorf("copy from source = %v, %v; want %v, nil", n, err, size) + } + }() + wg.Wait() + + if _, err := dst.Seek(0, 0); err != nil { + t.Fatal(err) + } + if err := compareReaders(dst, io.LimitReader(newRandReader(), size)); err != nil { + t.Fatal(err) + } +} + +func compareReaders(a, b io.Reader) error { + bufa := make([]byte, 4096) + bufb := make([]byte, 4096) + for { + na, erra := io.ReadFull(a, bufa) + if erra != nil && erra != io.EOF { + return erra + } + nb, errb := io.ReadFull(b, bufb) + if errb != nil && errb != io.EOF { + return errb + } + if !bytes.Equal(bufa[:na], bufb[:nb]) { + return errors.New("contents mismatch") + } + if erra == io.EOF && errb == io.EOF { + break + } + } + return nil +} + +type randReader struct { + rand *rand.Rand +} + +func newRandReader() *randReader { + return &randReader{rand.New(rand.NewPCG(0, 0))} +} + +func (r *randReader) Read(p []byte) (int, error) { + var v uint64 + var n int + for i := range p { + if n == 0 { + v = r.rand.Uint64() + n = 8 + } + p[i] = byte(v & 0xff) + v >>= 8 + n-- + } + return len(p), nil +} + +func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { + t.Helper() + if !nettest.TestableNetwork(proto) { + t.Skipf("%s does not support %q", runtime.GOOS, proto) + } + + ln, err := nettest.NewLocalListener(proto) + if err != nil { + t.Fatalf("NewLocalListener error: %v", err) + } + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if client != nil { + client.Close() + } + if server != nil { + server.Close() + } + }) + ch := make(chan struct{}) + go func() { + var err error + server, err = ln.Accept() + if err != nil { + t.Errorf("Accept new connection error: %v", err) + } + ch <- struct{}{} + }() + client, err = net.Dial(proto, ln.Addr().String()) + <-ch + if err != nil { + t.Fatalf("Dial new connection error: %v", err) + } + return client, server +} diff --git a/src/os/dir.go b/src/os/dir.go index 471a29134582b3..04392193aa6b03 100644 --- a/src/os/dir.go +++ b/src/os/dir.go @@ -132,15 +132,18 @@ func ReadDir(name string) ([]DirEntry, error) { // CopyFS copies the file system fsys into the directory dir, // creating dir if necessary. // -// Newly created directories and files have their default modes -// where any bits from the file in fsys that are not part of the -// standard read, write, and execute permissions will be zeroed -// out, and standard read and write permissions are set for owner, -// group, and others while retaining any existing execute bits from -// the file in fsys. +// Files are created with mode 0o666 plus any execute permissions +// from the source, and directories are created with mode 0o777 +// (before umask). // -// Symbolic links in fsys are not supported, a *PathError with Err set -// to ErrInvalid is returned on symlink. +// CopyFS will not overwrite existing files. If a file name in fsys +// already exists in the destination, CopyFS will return an error +// such that errors.Is(err, fs.ErrExist) will be true. +// +// Symbolic links in fsys are not supported. A *PathError with Err set +// to ErrInvalid is returned when copying from a symbolic link. +// +// Symbolic links in dir are followed. // // Copying stops at and returns the first error encountered. func CopyFS(dir string, fsys fs.FS) error { @@ -174,7 +177,7 @@ func CopyFS(dir string, fsys fs.FS) error { if err != nil { return err } - w, err := OpenFile(newPath, O_CREATE|O_TRUNC|O_WRONLY, 0666|info.Mode()&0777) + w, err := OpenFile(newPath, O_CREATE|O_EXCL|O_WRONLY, 0666|info.Mode()&0777) if err != nil { return err } diff --git a/src/os/example_test.go b/src/os/example_test.go index 7437a74cd0c66d..c507d46c46303a 100644 --- a/src/os/example_test.go +++ b/src/os/example_test.go @@ -61,7 +61,7 @@ func ExampleFileMode() { log.Fatal(err) } - fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0400, 0777, etc. + fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0o400, 0o777, etc. switch mode := fi.Mode(); { case mode.IsRegular(): fmt.Println("regular file") diff --git a/src/os/exec/exec_posix_test.go b/src/os/exec/exec_posix_test.go index 5d828b3475a40a..7c77bfa712559d 100644 --- a/src/os/exec/exec_posix_test.go +++ b/src/os/exec/exec_posix_test.go @@ -11,12 +11,15 @@ import ( "internal/testenv" "io" "os" + "os/exec" + "os/signal" "os/user" "path/filepath" "reflect" "runtime" "strconv" "strings" + "sync" "syscall" "testing" "time" @@ -24,6 +27,7 @@ import ( func init() { registerHelperCommand("pwd", cmdPwd) + registerHelperCommand("signaltest", cmdSignalTest) } func cmdPwd(...string) { @@ -274,3 +278,55 @@ func TestExplicitPWD(t *testing.T) { }) } } + +// Issue 71828. +func TestSIGCHLD(t *testing.T) { + cmd := helperCommand(t, "signaltest") + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + if err != nil { + t.Error(err) + } +} + +// cmdSignaltest is for TestSIGCHLD. +// This runs in a separate process because the bug only happened +// the first time that a child process was started. +func cmdSignalTest(...string) { + chSig := make(chan os.Signal, 1) + signal.Notify(chSig, syscall.SIGCHLD) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + c := 0 + for range chSig { + c++ + fmt.Printf("SIGCHLD %d\n", c) + if c > 1 { + fmt.Println("too many SIGCHLD signals") + os.Exit(1) + } + } + }() + defer func() { + signal.Reset(syscall.SIGCHLD) + close(chSig) + wg.Wait() + }() + + exe, err := os.Executable() + if err != nil { + fmt.Printf("os.Executable failed: %v\n", err) + os.Exit(1) + } + + cmd := exec.Command(exe, "hang", "200ms") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + fmt.Printf("failed to run child process: %v\n", err) + os.Exit(1) + } +} diff --git a/src/os/exec_posix.go b/src/os/exec_posix.go index cba2e151673aba..ff51247d56b72d 100644 --- a/src/os/exec_posix.go +++ b/src/os/exec_posix.go @@ -35,10 +35,11 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e } } + attrSys, shouldDupPidfd := ensurePidfd(attr.Sys) sysattr := &syscall.ProcAttr{ Dir: attr.Dir, Env: attr.Env, - Sys: ensurePidfd(attr.Sys), + Sys: attrSys, } if sysattr.Env == nil { sysattr.Env, err = execenv.Default(sysattr.Sys) @@ -63,7 +64,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e // For Windows, syscall.StartProcess above already returned a process handle. if runtime.GOOS != "windows" { var ok bool - h, ok = getPidfd(sysattr.Sys) + h, ok = getPidfd(sysattr.Sys, shouldDupPidfd) if !ok { return newPIDProcess(pid), nil } diff --git a/src/os/file.go b/src/os/file.go index c3ee31583e32f6..ad869fc4938d17 100644 --- a/src/os/file.go +++ b/src/os/file.go @@ -366,7 +366,7 @@ func Open(name string) (*File, error) { } // Create creates or truncates the named file. If the file already exists, -// it is truncated. If the file does not exist, it is created with mode 0666 +// it is truncated. If the file does not exist, it is created with mode 0o666 // (before umask). If successful, methods on the returned File can // be used for I/O; the associated file descriptor has mode O_RDWR. // If there is an error, it will be of type *PathError. @@ -602,11 +602,11 @@ func UserHomeDir() (string, error) { // On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and // ModeSticky are used. // -// On Windows, only the 0200 bit (owner writable) of mode is used; it +// On Windows, only the 0o200 bit (owner writable) of mode is used; it // controls whether the file's read-only attribute is set or cleared. // The other bits are currently unused. For compatibility with Go 1.12 -// and earlier, use a non-zero mode. Use mode 0400 for a read-only -// file and 0600 for a readable+writable file. +// and earlier, use a non-zero mode. Use mode 0o400 for a read-only +// file and 0o600 for a readable+writable file. // // On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive, // and ModeTemporary are used. diff --git a/src/os/os_test.go b/src/os/os_test.go index 878974384dbcba..24a1d84b16f490 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -1376,8 +1376,7 @@ func TestChtimes(t *testing.T) { t.Parallel() f := newFile(t) - - f.Write([]byte("hello, world\n")) + // This should be an empty file (see #68687, #68663). f.Close() testChtimes(t, f.Name()) @@ -1395,12 +1394,9 @@ func TestChtimesOmit(t *testing.T) { func testChtimesOmit(t *testing.T, omitAt, omitMt bool) { t.Logf("omit atime: %v, mtime: %v", omitAt, omitMt) file := newFile(t) - _, err := file.Write([]byte("hello, world\n")) - if err != nil { - t.Fatal(err) - } + // This should be an empty file (see #68687, #68663). name := file.Name() - err = file.Close() + err := file.Close() if err != nil { t.Error(err) } @@ -2178,6 +2174,24 @@ func TestAppend(t *testing.T) { } } +func TestOpenFileCreateExclDanglingSymlink(t *testing.T) { + defer chtmpdir(t)() + const link = "link" + if err := Symlink("does_not_exist", link); err != nil { + t.Fatal(err) + } + f, err := OpenFile(link, O_WRONLY|O_CREATE|O_EXCL, 0o666) + if err == nil { + f.Close() + } + if !errors.Is(err, ErrExist) { + t.Errorf("OpenFile of a dangling symlink with O_CREATE|O_EXCL = %v, want ErrExist", err) + } + if _, err := Stat(link); err == nil { + t.Errorf("OpenFile of a dangling symlink with O_CREATE|O_EXCL created a file") + } +} + func TestStatDirWithTrailingSlash(t *testing.T) { t.Parallel() @@ -3358,6 +3372,14 @@ func TestCopyFS(t *testing.T) { t.Fatal("comparing two directories:", err) } + // Test whether CopyFS disallows copying for disk filesystem when there is any + // existing file in the destination directory. + if err := CopyFS(tmpDir, fsys); !errors.Is(err, fs.ErrExist) { + t.Errorf("CopyFS should have failed and returned error when there is"+ + "any existing file in the destination directory (in disk filesystem), "+ + "got: %v, expected any error that indicates ", err) + } + // Test with memory filesystem. fsys = fstest.MapFS{ "william": {Data: []byte("Shakespeare\n")}, @@ -3395,6 +3417,14 @@ func TestCopyFS(t *testing.T) { }); err != nil { t.Fatal("comparing two directories:", err) } + + // Test whether CopyFS disallows copying for memory filesystem when there is any + // existing file in the destination directory. + if err := CopyFS(tmpDir, fsys); !errors.Is(err, fs.ErrExist) { + t.Errorf("CopyFS should have failed and returned error when there is"+ + "any existing file in the destination directory (in memory filesystem), "+ + "got: %v, expected any error that indicates ", err) + } } func TestCopyFSWithSymlinks(t *testing.T) { diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 0404c4ff64b72e..0bfef7759cc679 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -8,20 +8,28 @@ // v5.3: pidfd_open syscall, clone3 syscall; // v5.4: P_PIDFD idtype support for waitid syscall; // v5.6: pidfd_getfd syscall. +// +// N.B. Alternative Linux implementations may not follow this ordering. e.g., +// QEMU user mode 7.2 added pidfd_open, but CLONE_PIDFD was not added until +// 8.0. package os import ( "errors" "internal/syscall/unix" + "runtime" "sync" "syscall" "unsafe" ) -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { +// ensurePidfd initializes the PidFD field in sysAttr if it is not already set. +// It returns the original or modified SysProcAttr struct and a flag indicating +// whether the PidFD should be duplicated before using. +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { if !pidfdWorks() { - return sysAttr + return sysAttr, false } var pidfd int @@ -29,23 +37,33 @@ func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { if sysAttr == nil { return &syscall.SysProcAttr{ PidFD: &pidfd, - } + }, false } if sysAttr.PidFD == nil { newSys := *sysAttr // copy newSys.PidFD = &pidfd - return &newSys + return &newSys, false } - return sysAttr + return sysAttr, true } -func getPidfd(sysAttr *syscall.SysProcAttr) (uintptr, bool) { +// getPidfd returns the value of sysAttr.PidFD (or its duplicate if needDup is +// set) and a flag indicating whether the value can be used. +func getPidfd(sysAttr *syscall.SysProcAttr, needDup bool) (uintptr, bool) { if !pidfdWorks() { return 0, false } - return uintptr(*sysAttr.PidFD), true + h := *sysAttr.PidFD + if needDup { + dupH, e := unix.Fcntl(h, syscall.F_DUPFD_CLOEXEC, 0) + if e != nil { + return 0, false + } + h = dupH + } + return uintptr(h), true } func pidfdFind(pid int) (uintptr, error) { @@ -126,14 +144,21 @@ func pidfdWorks() bool { var checkPidfdOnce = sync.OnceValue(checkPidfd) -// checkPidfd checks whether all required pidfd-related syscalls work. -// This consists of pidfd_open and pidfd_send_signal syscalls, and waitid -// syscall with idtype of P_PIDFD. +// checkPidfd checks whether all required pidfd-related syscalls work. This +// consists of pidfd_open and pidfd_send_signal syscalls, waitid syscall with +// idtype of P_PIDFD, and clone(CLONE_PIDFD). // // Reasons for non-working pidfd syscalls include an older kernel and an // execution environment in which the above system calls are restricted by // seccomp or a similar technology. func checkPidfd() error { + // In Android version < 12, pidfd-related system calls are not allowed + // by seccomp and trigger the SIGSYS signal. See issue #69065. + if runtime.GOOS == "android" { + ignoreSIGSYS() + defer restoreSIGSYS() + } + // Get a pidfd of the current process (opening of "/proc/self" won't // work for waitid). fd, err := unix.PidFDOpen(syscall.Getpid(), 0) @@ -159,5 +184,27 @@ func checkPidfd() error { return NewSyscallError("pidfd_send_signal", err) } + // Verify that clone(CLONE_PIDFD) works. + // + // This shouldn't be necessary since pidfd_open was added in Linux 5.3, + // after CLONE_PIDFD in Linux 5.2, but some alternative Linux + // implementations may not adhere to this ordering. + if err := checkClonePidfd(); err != nil { + return err + } + return nil } + +// Provided by syscall. +// +//go:linkname checkClonePidfd +func checkClonePidfd() error + +// Provided by runtime. +// +//go:linkname ignoreSIGSYS +func ignoreSIGSYS() + +//go:linkname restoreSIGSYS +func restoreSIGSYS() diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index 837593706bae8e..c1f41d02d66c73 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -6,8 +6,10 @@ package os_test import ( "errors" + "internal/syscall/unix" "internal/testenv" "os" + "os/exec" "syscall" "testing" ) @@ -57,3 +59,93 @@ func TestFindProcessViaPidfd(t *testing.T) { t.Fatalf("Release: got %v, want ", err) } } + +func TestStartProcessWithPidfd(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + if err := os.CheckPidfdOnce(); err != nil { + // Non-pidfd code paths tested in exec_unix_test.go. + t.Skipf("skipping: pidfd not available: %v", err) + } + + var pidfd int + p, err := os.StartProcess(testenv.GoToolPath(t), []string{"go"}, &os.ProcAttr{ + Sys: &syscall.SysProcAttr{ + PidFD: &pidfd, + }, + }) + if err != nil { + t.Fatalf("starting test process: %v", err) + } + defer syscall.Close(pidfd) + + if _, err := p.Wait(); err != nil { + t.Fatalf("Wait: got %v, want ", err) + } + + // Check the pidfd is still valid + err = unix.PidFDSendSignal(uintptr(pidfd), syscall.Signal(0)) + if !errors.Is(err, syscall.ESRCH) { + t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) + } +} + +// Issue #69284 +func TestPidfdLeak(t *testing.T) { + testenv.MustHaveExec(t) + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + // Find the next 10 descriptors. + // We need to get more than one descriptor in practice; + // the pidfd winds up not being the next descriptor. + const count = 10 + want := make([]int, count) + for i := range count { + var err error + want[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors. + for _, d := range want { + syscall.Close(d) + } + + // Start a process 10 times. + for range 10 { + // For testing purposes this has to be an absolute path. + // Otherwise we will fail finding the executable + // and won't start a process at all. + cmd := exec.Command("/noSuchExecutable") + cmd.Run() + } + + // Open the next 10 descriptors again. + got := make([]int, count) + for i := range count { + var err error + got[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors + for _, d := range got { + syscall.Close(d) + } + + t.Logf("got %v", got) + t.Logf("want %v", want) + + // Allow some slack for runtime epoll descriptors and the like. + if got[count-1] > want[count-1]+5 { + t.Errorf("got descriptor %d, want %d", got[count-1], want[count-1]) + } +} diff --git a/src/os/pidfd_other.go b/src/os/pidfd_other.go index dda4bd0feccae6..ba9cbcb93830c0 100644 --- a/src/os/pidfd_other.go +++ b/src/os/pidfd_other.go @@ -8,11 +8,11 @@ package os import "syscall" -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { - return sysAttr +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { + return sysAttr, false } -func getPidfd(_ *syscall.SysProcAttr) (uintptr, bool) { +func getPidfd(_ *syscall.SysProcAttr, _ bool) (uintptr, bool) { return 0, false } diff --git a/src/os/readfrom_linux_test.go b/src/os/readfrom_linux_test.go index 8dcb9cb2172882..45867477dc26b2 100644 --- a/src/os/readfrom_linux_test.go +++ b/src/os/readfrom_linux_test.go @@ -14,15 +14,12 @@ import ( "net" . "os" "path/filepath" - "runtime" "strconv" "strings" "sync" "syscall" "testing" "time" - - "golang.org/x/net/nettest" ) func TestCopyFileRange(t *testing.T) { @@ -784,41 +781,3 @@ func testGetPollFDAndNetwork(t *testing.T, proto string) { t.Fatalf("server Control error: %v", err) } } - -func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { - t.Helper() - if !nettest.TestableNetwork(proto) { - t.Skipf("%s does not support %q", runtime.GOOS, proto) - } - - ln, err := nettest.NewLocalListener(proto) - if err != nil { - t.Fatalf("NewLocalListener error: %v", err) - } - t.Cleanup(func() { - if ln != nil { - ln.Close() - } - if client != nil { - client.Close() - } - if server != nil { - server.Close() - } - }) - ch := make(chan struct{}) - go func() { - var err error - server, err = ln.Accept() - if err != nil { - t.Errorf("Accept new connection error: %v", err) - } - ch <- struct{}{} - }() - client, err = net.Dial(proto, ln.Addr().String()) - <-ch - if err != nil { - t.Fatalf("Dial new connection error: %v", err) - } - return client, server -} diff --git a/src/reflect/iter.go b/src/reflect/iter.go index 36472013cb7c12..03df87b17882ba 100644 --- a/src/reflect/iter.go +++ b/src/reflect/iter.go @@ -4,15 +4,24 @@ package reflect -import "iter" +import ( + "iter" +) func rangeNum[T int8 | int16 | int32 | int64 | int | uint8 | uint16 | uint32 | uint64 | uint | - uintptr, N int64 | uint64](v N) iter.Seq[Value] { + uintptr, N int64 | uint64](num N, t Type) iter.Seq[Value] { return func(yield func(v Value) bool) { + convert := t.PkgPath() != "" // cannot use range T(v) because no core type. - for i := T(0); i < T(v); i++ { - if !yield(ValueOf(i)) { + for i := T(0); i < T(num); i++ { + tmp := ValueOf(i) + // if the iteration value type is define by + // type T built-in type. + if convert { + tmp = tmp.Convert(t) + } + if !yield(tmp) { return } } @@ -27,7 +36,7 @@ func rangeNum[T int8 | int16 | int32 | int64 | int | // Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, // Array, Chan, Map, Slice, or String. func (v Value) Seq() iter.Seq[Value] { - if canRangeFunc(v.typ()) { + if canRangeFunc(v.abiType()) { return func(yield func(Value) bool) { rf := MakeFunc(v.Type().In(0), func(in []Value) []Value { return []Value{ValueOf(yield(in[0]))} @@ -35,29 +44,29 @@ func (v Value) Seq() iter.Seq[Value] { v.Call([]Value{rf}) } } - switch v.Kind() { + switch v.kind() { case Int: - return rangeNum[int](v.Int()) + return rangeNum[int](v.Int(), v.Type()) case Int8: - return rangeNum[int8](v.Int()) + return rangeNum[int8](v.Int(), v.Type()) case Int16: - return rangeNum[int16](v.Int()) + return rangeNum[int16](v.Int(), v.Type()) case Int32: - return rangeNum[int32](v.Int()) + return rangeNum[int32](v.Int(), v.Type()) case Int64: - return rangeNum[int64](v.Int()) + return rangeNum[int64](v.Int(), v.Type()) case Uint: - return rangeNum[uint](v.Uint()) + return rangeNum[uint](v.Uint(), v.Type()) case Uint8: - return rangeNum[uint8](v.Uint()) + return rangeNum[uint8](v.Uint(), v.Type()) case Uint16: - return rangeNum[uint16](v.Uint()) + return rangeNum[uint16](v.Uint(), v.Type()) case Uint32: - return rangeNum[uint32](v.Uint()) + return rangeNum[uint32](v.Uint(), v.Type()) case Uint64: - return rangeNum[uint64](v.Uint()) + return rangeNum[uint64](v.Uint(), v.Type()) case Uintptr: - return rangeNum[uintptr](v.Uint()) + return rangeNum[uintptr](v.Uint(), v.Type()) case Pointer: if v.Elem().kind() != Array { break @@ -113,7 +122,7 @@ func (v Value) Seq() iter.Seq[Value] { // If v's kind is Pointer, the pointer element type must have kind Array. // Otherwise v's kind must be Array, Map, Slice, or String. func (v Value) Seq2() iter.Seq2[Value, Value] { - if canRangeFunc2(v.typ()) { + if canRangeFunc2(v.abiType()) { return func(yield func(Value, Value) bool) { rf := MakeFunc(v.Type().In(0), func(in []Value) []Value { return []Value{ValueOf(yield(in[0], in[1]))} diff --git a/src/reflect/iter_test.go b/src/reflect/iter_test.go index 9b78fcf7247f5f..1d9869789a1880 100644 --- a/src/reflect/iter_test.go +++ b/src/reflect/iter_test.go @@ -7,10 +7,13 @@ package reflect_test import ( "iter" "maps" + "reflect" . "reflect" "testing" ) +type N int8 + func TestValueSeq(t *testing.T) { m := map[string]int{ "1": 1, @@ -175,6 +178,33 @@ func TestValueSeq(t *testing.T) { t.Fatalf("should loop four times") } }}, + {"method", ValueOf(methodIter{}).Method(0), func(t *testing.T, s iter.Seq[Value]) { + i := int64(0) + for v := range s { + if v.Int() != i { + t.Fatalf("got %d, want %d", v.Int(), i) + } + i++ + } + if i != 4 { + t.Fatalf("should loop four times") + } + }}, + {"type N int8", ValueOf(N(4)), func(t *testing.T, s iter.Seq[Value]) { + i := N(0) + for v := range s { + if v.Int() != int64(i) { + t.Fatalf("got %d, want %d", v.Int(), i) + } + i++ + if v.Type() != reflect.TypeOf(i) { + t.Fatalf("got %s, want %s", v.Type(), reflect.TypeOf(i)) + } + } + if i != 4 { + t.Fatalf("should loop four times") + } + }}, } for _, tc := range tests { seq := tc.val.Seq() @@ -296,9 +326,84 @@ func TestValueSeq2(t *testing.T) { t.Fatalf("should loop four times") } }}, + {"method", ValueOf(methodIter2{}).Method(0), func(t *testing.T, s iter.Seq2[Value, Value]) { + i := int64(0) + for v1, v2 := range s { + if v1.Int() != i { + t.Fatalf("got %d, want %d", v1.Int(), i) + } + i++ + if v2.Int() != i { + t.Fatalf("got %d, want %d", v2.Int(), i) + } + } + if i != 4 { + t.Fatalf("should loop four times") + } + }}, + {"[4]N", ValueOf([4]N{0, 1, 2, 3}), func(t *testing.T, s iter.Seq2[Value, Value]) { + i := N(0) + for v1, v2 := range s { + if v1.Int() != int64(i) { + t.Fatalf("got %d, want %d", v1.Int(), i) + } + if v2.Int() != int64(i) { + t.Fatalf("got %d, want %d", v2.Int(), i) + } + i++ + if v2.Type() != reflect.TypeOf(i) { + t.Fatalf("got %s, want %s", v2.Type(), reflect.TypeOf(i)) + } + } + if i != 4 { + t.Fatalf("should loop four times") + } + }}, + {"[]N", ValueOf([]N{1, 2, 3, 4}), func(t *testing.T, s iter.Seq2[Value, Value]) { + i := N(0) + for v1, v2 := range s { + if v1.Int() != int64(i) { + t.Fatalf("got %d, want %d", v1.Int(), i) + } + i++ + if v2.Int() != int64(i) { + t.Fatalf("got %d, want %d", v2.Int(), i) + } + if v2.Type() != reflect.TypeOf(i) { + t.Fatalf("got %s, want %s", v2.Type(), reflect.TypeOf(i)) + } + } + if i != 4 { + t.Fatalf("should loop four times") + } + }}, } for _, tc := range tests { seq := tc.val.Seq2() tc.check(t, seq) } } + +// methodIter is a type from which we can derive a method +// value that is an iter.Seq. +type methodIter struct{} + +func (methodIter) Seq(yield func(int) bool) { + for i := range 4 { + if !yield(i) { + return + } + } +} + +// methodIter2 is a type from which we can derive a method +// value that is an iter.Seq2. +type methodIter2 struct{} + +func (methodIter2) Seq2(yield func(int, int) bool) { + for i := range 4 { + if !yield(i, i+1) { + return + } + } +} diff --git a/src/reflect/value.go b/src/reflect/value.go index 0854371ed413db..6e8e46aa594c87 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -93,6 +93,9 @@ func (f flag) ro() flag { return 0 } +// typ returns the *abi.Type stored in the Value. This method is fast, +// but it doesn't always return the correct type for the Value. +// See abiType and Type, which do return the correct type. func (v Value) typ() *abi.Type { // Types are either static (for compiler-created types) or // heap-allocated but always reachable (for reflection-created @@ -2682,14 +2685,26 @@ func (v Value) Type() Type { return v.typeSlow() } +//go:noinline func (v Value) typeSlow() Type { + return toRType(v.abiTypeSlow()) +} + +func (v Value) abiType() *abi.Type { + if v.flag != 0 && v.flag&flagMethod == 0 { + return v.typ() + } + return v.abiTypeSlow() +} + +func (v Value) abiTypeSlow() *abi.Type { if v.flag == 0 { panic(&ValueError{"reflect.Value.Type", Invalid}) } typ := v.typ() if v.flag&flagMethod == 0 { - return toRType(v.typ()) + return v.typ() } // Method value. @@ -2702,7 +2717,7 @@ func (v Value) typeSlow() Type { panic("reflect: internal error: invalid method index") } m := &tt.Methods[i] - return toRType(typeOffFor(typ, m.Typ)) + return typeOffFor(typ, m.Typ) } // Method on concrete type. ms := typ.ExportedMethods() @@ -2710,7 +2725,7 @@ func (v Value) typeSlow() Type { panic("reflect: internal error: invalid method index") } m := ms[i] - return toRType(typeOffFor(typ, m.Mtyp)) + return typeOffFor(typ, m.Mtyp) } // CanUint reports whether [Value.Uint] can be used without panicking. diff --git a/src/runtime/arena.go b/src/runtime/arena.go index cd9a9dfae10abc..ab81a8dd704d32 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1063,10 +1063,18 @@ func (h *mheap) allocUserArenaChunk() *mspan { h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() - s.limit = s.base() + s.elemsize s.freeindex = 1 s.allocCount = 1 + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole chunk or just skip it + // since we're in the mark phase anyway. + s.limit = s.base() + s.elemsize + // Account for this new arena chunk memory. gcController.heapInUse.add(int64(userArenaChunkBytes)) gcController.heapReleased.add(-int64(userArenaChunkBytes)) diff --git a/src/runtime/cgo/cgo.go b/src/runtime/cgo/cgo.go index 1e3a50291838d1..6b0acf70235dd6 100644 --- a/src/runtime/cgo/cgo.go +++ b/src/runtime/cgo/cgo.go @@ -25,7 +25,8 @@ package cgo // Use -fno-stack-protector to avoid problems locating the // proper support functions. See issues #52919, #54313, #58385. -#cgo CFLAGS: -Wall -Werror -fno-stack-protector +// Use -Wdeclaration-after-statement because some CI builds use it. +#cgo CFLAGS: -Wall -Werror -fno-stack-protector -Wdeclaration-after-statement #cgo solaris CPPFLAGS: -D_POSIX_PTHREAD_SEMANTICS diff --git a/src/runtime/cgo/gcc_darwin_arm64.c b/src/runtime/cgo/gcc_darwin_arm64.c index f1344de8e19be3..7e313767ac398d 100644 --- a/src/runtime/cgo/gcc_darwin_arm64.c +++ b/src/runtime/cgo/gcc_darwin_arm64.c @@ -75,19 +75,27 @@ threadentry(void *v) static void init_working_dir() { - CFBundleRef bundle = CFBundleGetMainBundle(); + CFBundleRef bundle; + CFURLRef url_ref; + CFStringRef url_str_ref; + char buf[MAXPATHLEN]; + Boolean res; + int url_len; + char *dir; + CFStringRef wd_ref; + + bundle = CFBundleGetMainBundle(); if (bundle == NULL) { fprintf(stderr, "runtime/cgo: no main bundle\n"); return; } - CFURLRef url_ref = CFBundleCopyResourceURL(bundle, CFSTR("Info"), CFSTR("plist"), NULL); + url_ref = CFBundleCopyResourceURL(bundle, CFSTR("Info"), CFSTR("plist"), NULL); if (url_ref == NULL) { // No Info.plist found. It can happen on Corellium virtual devices. return; } - CFStringRef url_str_ref = CFURLGetString(url_ref); - char buf[MAXPATHLEN]; - Boolean res = CFStringGetCString(url_str_ref, buf, sizeof(buf), kCFStringEncodingUTF8); + url_str_ref = CFURLGetString(url_ref); + res = CFStringGetCString(url_str_ref, buf, sizeof(buf), kCFStringEncodingUTF8); CFRelease(url_ref); if (!res) { fprintf(stderr, "runtime/cgo: cannot get URL string\n"); @@ -96,13 +104,13 @@ init_working_dir() // url is of the form "file:///path/to/Info.plist". // strip it down to the working directory "/path/to". - int url_len = strlen(buf); + url_len = strlen(buf); if (url_len < sizeof("file://")+sizeof("/Info.plist")) { fprintf(stderr, "runtime/cgo: bad URL: %s\n", buf); return; } buf[url_len-sizeof("/Info.plist")+1] = 0; - char *dir = &buf[0] + sizeof("file://")-1; + dir = &buf[0] + sizeof("file://")-1; if (chdir(dir) != 0) { fprintf(stderr, "runtime/cgo: chdir(%s) failed\n", dir); @@ -110,7 +118,7 @@ init_working_dir() // The test harness in go_ios_exec passes the relative working directory // in the GoExecWrapperWorkingDirectory property of the app bundle. - CFStringRef wd_ref = CFBundleGetValueForInfoDictionaryKey(bundle, CFSTR("GoExecWrapperWorkingDirectory")); + wd_ref = CFBundleGetValueForInfoDictionaryKey(bundle, CFSTR("GoExecWrapperWorkingDirectory")); if (wd_ref != NULL) { if (!CFStringGetCString(wd_ref, buf, sizeof(buf), kCFStringEncodingUTF8)) { fprintf(stderr, "runtime/cgo: cannot get GoExecWrapperWorkingDirectory string\n"); diff --git a/src/runtime/cgo/gcc_libinit.c b/src/runtime/cgo/gcc_libinit.c index 33a9ff93cad527..6cceae34c6c354 100644 --- a/src/runtime/cgo/gcc_libinit.c +++ b/src/runtime/cgo/gcc_libinit.c @@ -48,9 +48,11 @@ x_cgo_sys_thread_create(void* (*func)(void*), void* arg) { uintptr_t _cgo_wait_runtime_init_done(void) { void (*pfn)(struct context_arg*); + int done; + pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME); - int done = 2; + done = 2; if (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) != done) { pthread_mutex_lock(&runtime_init_mu); while (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) == 0) { diff --git a/src/runtime/cgo/gcc_libinit_windows.c b/src/runtime/cgo/gcc_libinit_windows.c index 9a8c65ea291ad8..d43d12a24ae28d 100644 --- a/src/runtime/cgo/gcc_libinit_windows.c +++ b/src/runtime/cgo/gcc_libinit_windows.c @@ -69,8 +69,10 @@ x_cgo_sys_thread_create(void (*func)(void*), void* arg) { int _cgo_is_runtime_initialized() { + int status; + EnterCriticalSection(&runtime_init_cs); - int status = runtime_init_done; + status = runtime_init_done; LeaveCriticalSection(&runtime_init_cs); return status; } diff --git a/src/runtime/cgo/gcc_stack_unix.c b/src/runtime/cgo/gcc_stack_unix.c index fcb03d0dea7e34..df0049a4f37ab3 100644 --- a/src/runtime/cgo/gcc_stack_unix.c +++ b/src/runtime/cgo/gcc_stack_unix.c @@ -31,10 +31,11 @@ x_cgo_getstackbound(uintptr bounds[2]) pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); // low address #else - // We don't know how to get the current stacks, so assume they are the - // same as the default stack bounds. - pthread_attr_getstacksize(&attr, &size); - addr = __builtin_frame_address(0) + 4096 - size; + // We don't know how to get the current stacks, leave it as + // 0 and the caller will use an estimate based on the current + // SP. + addr = 0; + size = 0; #endif pthread_attr_destroy(&attr); diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index b943b1c2d6b4f8..375e9d6d4a12ca 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -231,34 +231,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 { func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { g0 := mp.g0 - inBound := sp > g0.stack.lo && sp <= g0.stack.hi - if mp.ncgo > 0 && !inBound { - // ncgo > 0 indicates that this M was in Go further up the stack - // (it called C and is now receiving a callback). - // - // !inBound indicates that we were called with SP outside the - // expected system stack bounds (C changed the stack out from - // under us between the cgocall and cgocallback?). - // - // It is not safe for the C call to change the stack out from - // under us, so throw. - - // Note that this case isn't possible for signal == true, as - // that is always passing a new M from needm. - - // Stack is bogus, but reset the bounds anyway so we can print. - hi := g0.stack.hi - lo := g0.stack.lo - g0.stack.hi = sp + 1024 - g0.stack.lo = sp - 32*1024 - g0.stackguard0 = g0.stack.lo + stackGuard - g0.stackguard1 = g0.stackguard0 - - print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]") - print("\n") - exit(2) - } - if !mp.isextra { // We allocated the stack for standard Ms. Don't replace the // stack bounds with estimated ones when we already initialized @@ -266,26 +238,37 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { return } - // This M does not have Go further up the stack. However, it may have - // previously called into Go, initializing the stack bounds. Between - // that call returning and now the stack may have changed (perhaps the - // C thread is running a coroutine library). We need to update the - // stack bounds for this case. + inBound := sp > g0.stack.lo && sp <= g0.stack.hi + if inBound && mp.g0StackAccurate { + // This M has called into Go before and has the stack bounds + // initialized. We have the accurate stack bounds, and the SP + // is in bounds. We expect it continues to run within the same + // bounds. + return + } + + // We don't have an accurate stack bounds (either it never calls + // into Go before, or we couldn't get the accurate bounds), or the + // current SP is not within the previous bounds (the stack may have + // changed between calls). We need to update the stack bounds. // // N.B. we need to update the stack bounds even if SP appears to - // already be in bounds. Our "bounds" may actually be estimated dummy - // bounds (below). The actual stack bounds could have shifted but still - // have partial overlap with our dummy bounds. If we failed to update - // in that case, we could find ourselves seemingly called near the - // bottom of the stack bounds, where we quickly run out of space. + // already be in bounds, if our bounds are estimated dummy bounds + // (below). We may be in a different region within the same actual + // stack bounds, but our estimates were not accurate. Or the actual + // stack bounds could have shifted but still have partial overlap with + // our dummy bounds. If we failed to update in that case, we could find + // ourselves seemingly called near the bottom of the stack bounds, where + // we quickly run out of space. // Set the stack bounds to match the current stack. If we don't // actually know how big the stack is, like we don't know how big any // scheduling stack is, but we assume there's at least 32 kB. If we // can get a more accurate stack bound from pthread, use that, provided - // it actually contains SP.. + // it actually contains SP. g0.stack.hi = sp + 1024 g0.stack.lo = sp - 32*1024 + mp.g0StackAccurate = false if !signal && _cgo_getstackbound != nil { // Don't adjust if called from the signal handler. // We are on the signal stack, not the pthread stack. @@ -296,12 +279,16 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds)) // getstackbound is an unsupported no-op on Windows. // + // On Unix systems, if the API to get accurate stack bounds is + // not available, it returns zeros. + // // Don't use these bounds if they don't contain SP. Perhaps we // were called by something not using the standard thread // stack. if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { g0.stack.lo = bounds[0] g0.stack.hi = bounds[1] + mp.g0StackAccurate = true } } g0.stackguard0 = g0.stack.lo + stackGuard @@ -319,6 +306,8 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { } sp := gp.m.g0.sched.sp // system sp saved by cgocallback. + oldStack := gp.m.g0.stack + oldAccurate := gp.m.g0StackAccurate callbackUpdateSystemStack(gp.m, sp, false) // The call from C is on gp.m's g0 stack, so we must ensure @@ -338,9 +327,14 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { // stack. However, since we're returning to an earlier stack frame and // need to pair with the entersyscall() call made by cgocall, we must // save syscall* and let reentersyscall restore them. + // + // Note: savedsp and savedbp MUST be held in locals as an unsafe.Pointer. + // When we call into Go, the stack is free to be moved. If these locals + // aren't visible in the stack maps, they won't get updated properly, + // and will end up being stale when restored by reentersyscall. savedsp := unsafe.Pointer(gp.syscallsp) savedpc := gp.syscallpc - savedbp := gp.syscallbp + savedbp := unsafe.Pointer(gp.syscallbp) exitsyscall() // coming out of cgo call gp.m.incgo = false if gp.m.isextra { @@ -361,7 +355,9 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { gp.m.incgo = true unlockOSThread() - if gp.m.isextra { + if gp.m.isextra && gp.m.ncgo == 0 { + // There are no active cgocalls above this frame (ncgo == 0), + // thus there can't be more Go frames above this frame. gp.m.isExtraInC = true } @@ -372,9 +368,15 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { osPreemptExtEnter(gp.m) // going back to cgo call - reentersyscall(savedpc, uintptr(savedsp), savedbp) + reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp)) gp.m.winsyscall = winsyscall + + // Restore the old g0 stack bounds + gp.m.g0.stack = oldStack + gp.m.g0.stackguard0 = oldStack.lo + stackGuard + gp.m.g0.stackguard1 = gp.m.g0.stackguard0 + gp.m.g0StackAccurate = oldAccurate } func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { diff --git a/src/runtime/coro.go b/src/runtime/coro.go index 30ada455e4985e..d93817f92f1caa 100644 --- a/src/runtime/coro.go +++ b/src/runtime/coro.go @@ -208,6 +208,18 @@ func coroswitch_m(gp *g) { // directly if possible. setGNoWB(&mp.curg, gnext) setMNoWB(&gnext.m, mp) + + // Synchronize with any out-standing goroutine profile. We're about to start + // executing, and an invariant of the profiler is that we tryRecordGoroutineProfile + // whenever a goroutine is about to start running. + // + // N.B. We must do this before transitioning to _Grunning but after installing gnext + // in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile + // may allocate). + if goroutineProfile.active { + tryRecordGoroutineProfile(gnext, nil, osyield) + } + if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) { // The CAS failed: use casgstatus, which will take care of // coordinating with the garbage collector about the state change. diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go index f9fbdd8f1cd14f..494a53429698ef 100644 --- a/src/runtime/crash_cgo_test.go +++ b/src/runtime/crash_cgo_test.go @@ -76,6 +76,22 @@ func TestCgoCallbackGC(t *testing.T) { } } +func TestCgoCallbackPprof(t *testing.T) { + t.Parallel() + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no pthreads on %s", runtime.GOOS) + } + if testenv.CPUProfilingBroken() { + t.Skip("skipping on platform with broken profiling") + } + + got := runTestProg(t, "testprogcgo", "CgoCallbackPprof") + if want := "OK\n"; got != want { + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} + func TestCgoExternalThreadPanic(t *testing.T) { t.Parallel() if runtime.GOOS == "plan9" { diff --git a/src/runtime/debug/mod.go b/src/runtime/debug/mod.go index a4705605b859a1..764fe6a431c500 100644 --- a/src/runtime/debug/mod.go +++ b/src/runtime/debug/mod.go @@ -77,6 +77,7 @@ type Module struct { // - CGO_CPPFLAGS: the effective CGO_CPPFLAGS environment variable // - CGO_CXXFLAGS: the effective CGO_CXXFLAGS environment variable // - CGO_LDFLAGS: the effective CGO_LDFLAGS environment variable +// - DefaultGODEBUG: the effective GODEBUG settings // - GOARCH: the architecture target // - GOAMD64/GOARM/GO386/etc: the architecture feature level for GOARCH // - GOOS: the operating system target diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index d55da1028dbb1c..4502fa72a10371 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1886,3 +1886,30 @@ func (m *TraceMap) PutString(s string) (uint64, bool) { func (m *TraceMap) Reset() { m.traceMap.reset() } + +func SetSpinInGCMarkDone(spin bool) { + gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin) +} + +func GCMarkDoneRestarted() bool { + // Only read this outside of the GC. If we're running during a GC, just report false. + mp := acquirem() + if gcphase != _GCoff { + releasem(mp) + return false + } + restarted := gcDebugMarkDone.restartedDueTo27993 + releasem(mp) + return restarted +} + +func GCMarkDoneResetRestartFlag() { + mp := acquirem() + for gcphase != _GCoff { + releasem(mp) + Gosched() + mp = acquirem() + } + gcDebugMarkDone.restartedDueTo27993 = false + releasem(mp) +} diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 908f6322466b17..4b92b200674386 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -6,6 +6,8 @@ package runtime_test import ( "fmt" + "internal/testenv" + "internal/weak" "math/bits" "math/rand" "os" @@ -787,3 +789,78 @@ func TestMemoryLimitNoGCPercent(t *testing.T) { func TestMyGenericFunc(t *testing.T) { runtime.MyGenericFunc[int]() } + +func TestWeakToStrongMarkTermination(t *testing.T) { + testenv.MustHaveParallelism(t) + + type T struct { + a *int + b int + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + w := make([]weak.Pointer[T], 2048) + + // Make sure there's no out-standing GC from a previous test. + runtime.GC() + + // Create many objects with a weak pointers to them. + for i := range w { + x := new(T) + x.a = new(int) + w[i] = weak.Make(x) + } + + // Reset the restart flag. + runtime.GCMarkDoneResetRestartFlag() + + // Prevent mark termination from completing. + runtime.SetSpinInGCMarkDone(true) + + // Start a GC, and wait a little bit to get something spinning in mark termination. + // Simultaneously, fire off another goroutine to disable spinning. If everything's + // working correctly, then weak.Strong will block, so we need to make sure something + // prevents the GC from continuing to spin. + done := make(chan struct{}) + go func() { + runtime.GC() + done <- struct{}{} + }() + go func() { + time.Sleep(100 * time.Millisecond) + + // Let mark termination continue. + runtime.SetSpinInGCMarkDone(false) + }() + time.Sleep(10 * time.Millisecond) + + // Perform many weak->strong conversions in the critical window. + var wg sync.WaitGroup + for _, wp := range w { + wg.Add(1) + go func() { + defer wg.Done() + wp.Strong() + }() + } + + // Make sure the GC completes. + <-done + + // Make sure all the weak->strong conversions finish. + wg.Wait() + + // The bug is triggered if there's still mark work after gcMarkDone stops the world. + // + // This can manifest in one of two ways today: + // - An exceedingly rare crash in mark termination. + // - gcMarkDone restarts, as if issue #27993 is at play. + // + // Check for the latter. This is a fairly controlled environment, so #27993 is very + // unlikely to happen (it's already rare to begin with) but we'll always _appear_ to + // trigger the same bug if weak->strong conversions aren't properly coordinated with + // mark termination. + if runtime.GCMarkDoneRestarted() { + t.Errorf("gcMarkDone restarted") + } +} diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 432ace728b8269..373838332f564a 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -17,6 +17,7 @@ const ( lockRankDefer lockRankSweepWaiters lockRankAssistQueue + lockRankStrongFromWeakQueue lockRankSweep lockRankTestR lockRankTestW @@ -84,64 +85,65 @@ const lockRankLeafRank lockRank = 1000 // lockNames gives the names associated with each of the above ranks. var lockNames = []string{ - lockRankSysmon: "sysmon", - lockRankScavenge: "scavenge", - lockRankForcegc: "forcegc", - lockRankDefer: "defer", - lockRankSweepWaiters: "sweepWaiters", - lockRankAssistQueue: "assistQueue", - lockRankSweep: "sweep", - lockRankTestR: "testR", - lockRankTestW: "testW", - lockRankTimerSend: "timerSend", - lockRankAllocmW: "allocmW", - lockRankExecW: "execW", - lockRankCpuprof: "cpuprof", - lockRankPollCache: "pollCache", - lockRankPollDesc: "pollDesc", - lockRankWakeableSleep: "wakeableSleep", - lockRankHchan: "hchan", - lockRankAllocmR: "allocmR", - lockRankExecR: "execR", - lockRankSched: "sched", - lockRankAllg: "allg", - lockRankAllp: "allp", - lockRankNotifyList: "notifyList", - lockRankSudog: "sudog", - lockRankTimers: "timers", - lockRankTimer: "timer", - lockRankNetpollInit: "netpollInit", - lockRankRoot: "root", - lockRankItab: "itab", - lockRankReflectOffs: "reflectOffs", - lockRankUserArenaState: "userArenaState", - lockRankTraceBuf: "traceBuf", - lockRankTraceStrings: "traceStrings", - lockRankFin: "fin", - lockRankSpanSetSpine: "spanSetSpine", - lockRankMspanSpecial: "mspanSpecial", - lockRankTraceTypeTab: "traceTypeTab", - lockRankGcBitsArenas: "gcBitsArenas", - lockRankProfInsert: "profInsert", - lockRankProfBlock: "profBlock", - lockRankProfMemActive: "profMemActive", - lockRankProfMemFuture: "profMemFuture", - lockRankGscan: "gscan", - lockRankStackpool: "stackpool", - lockRankStackLarge: "stackLarge", - lockRankHchanLeaf: "hchanLeaf", - lockRankWbufSpans: "wbufSpans", - lockRankMheap: "mheap", - lockRankMheapSpecial: "mheapSpecial", - lockRankGlobalAlloc: "globalAlloc", - lockRankTrace: "trace", - lockRankTraceStackTab: "traceStackTab", - lockRankPanic: "panic", - lockRankDeadlock: "deadlock", - lockRankRaceFini: "raceFini", - lockRankAllocmRInternal: "allocmRInternal", - lockRankExecRInternal: "execRInternal", - lockRankTestRInternal: "testRInternal", + lockRankSysmon: "sysmon", + lockRankScavenge: "scavenge", + lockRankForcegc: "forcegc", + lockRankDefer: "defer", + lockRankSweepWaiters: "sweepWaiters", + lockRankAssistQueue: "assistQueue", + lockRankStrongFromWeakQueue: "strongFromWeakQueue", + lockRankSweep: "sweep", + lockRankTestR: "testR", + lockRankTestW: "testW", + lockRankTimerSend: "timerSend", + lockRankAllocmW: "allocmW", + lockRankExecW: "execW", + lockRankCpuprof: "cpuprof", + lockRankPollCache: "pollCache", + lockRankPollDesc: "pollDesc", + lockRankWakeableSleep: "wakeableSleep", + lockRankHchan: "hchan", + lockRankAllocmR: "allocmR", + lockRankExecR: "execR", + lockRankSched: "sched", + lockRankAllg: "allg", + lockRankAllp: "allp", + lockRankNotifyList: "notifyList", + lockRankSudog: "sudog", + lockRankTimers: "timers", + lockRankTimer: "timer", + lockRankNetpollInit: "netpollInit", + lockRankRoot: "root", + lockRankItab: "itab", + lockRankReflectOffs: "reflectOffs", + lockRankUserArenaState: "userArenaState", + lockRankTraceBuf: "traceBuf", + lockRankTraceStrings: "traceStrings", + lockRankFin: "fin", + lockRankSpanSetSpine: "spanSetSpine", + lockRankMspanSpecial: "mspanSpecial", + lockRankTraceTypeTab: "traceTypeTab", + lockRankGcBitsArenas: "gcBitsArenas", + lockRankProfInsert: "profInsert", + lockRankProfBlock: "profBlock", + lockRankProfMemActive: "profMemActive", + lockRankProfMemFuture: "profMemFuture", + lockRankGscan: "gscan", + lockRankStackpool: "stackpool", + lockRankStackLarge: "stackLarge", + lockRankHchanLeaf: "hchanLeaf", + lockRankWbufSpans: "wbufSpans", + lockRankMheap: "mheap", + lockRankMheapSpecial: "mheapSpecial", + lockRankGlobalAlloc: "globalAlloc", + lockRankTrace: "trace", + lockRankTraceStackTab: "traceStackTab", + lockRankPanic: "panic", + lockRankDeadlock: "deadlock", + lockRankRaceFini: "raceFini", + lockRankAllocmRInternal: "allocmRInternal", + lockRankExecRInternal: "execRInternal", + lockRankTestRInternal: "testRInternal", } func (rank lockRank) String() string { @@ -163,62 +165,63 @@ func (rank lockRank) String() string { // // Lock ranks that allow self-cycles list themselves. var lockPartialOrder [][]lockRank = [][]lockRank{ - lockRankSysmon: {}, - lockRankScavenge: {lockRankSysmon}, - lockRankForcegc: {lockRankSysmon}, - lockRankDefer: {}, - lockRankSweepWaiters: {}, - lockRankAssistQueue: {}, - lockRankSweep: {}, - lockRankTestR: {}, - lockRankTestW: {}, - lockRankTimerSend: {}, - lockRankAllocmW: {}, - lockRankExecW: {}, - lockRankCpuprof: {}, - lockRankPollCache: {}, - lockRankPollDesc: {}, - lockRankWakeableSleep: {}, - lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, - lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, - lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankNotifyList: {}, - lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, - lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, - lockRankRoot: {}, - lockRankItab: {}, - lockRankReflectOffs: {lockRankItab}, - lockRankUserArenaState: {}, - lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, - lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, - lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, - lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, - lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, - lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, - lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, - lockRankPanic: {}, - lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, - lockRankRaceFini: {lockRankPanic}, - lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, - lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, - lockRankTestRInternal: {lockRankTestR, lockRankTestW}, + lockRankSysmon: {}, + lockRankScavenge: {lockRankSysmon}, + lockRankForcegc: {lockRankSysmon}, + lockRankDefer: {}, + lockRankSweepWaiters: {}, + lockRankAssistQueue: {}, + lockRankStrongFromWeakQueue: {}, + lockRankSweep: {}, + lockRankTestR: {}, + lockRankTestW: {}, + lockRankTimerSend: {}, + lockRankAllocmW: {}, + lockRankExecW: {}, + lockRankCpuprof: {}, + lockRankPollCache: {}, + lockRankPollDesc: {}, + lockRankWakeableSleep: {}, + lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, + lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, + lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankNotifyList: {}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, + lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, + lockRankRoot: {}, + lockRankItab: {}, + lockRankReflectOffs: {lockRankItab}, + lockRankUserArenaState: {}, + lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, + lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankPanic: {}, + lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, + lockRankRaceFini: {lockRankPanic}, + lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, + lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, + lockRankTestRInternal: {lockRankTestR, lockRankTestW}, } diff --git a/src/runtime/map.go b/src/runtime/map.go index 112084f5a74091..52d56fb57a4dc6 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -1209,6 +1209,11 @@ func (h *hmap) sameSizeGrow() bool { return h.flags&sameSizeGrow != 0 } +//go:linkname sameSizeGrowForIssue69110Test +func sameSizeGrowForIssue69110Test(h *hmap) bool { + return h.sameSizeGrow() +} + // noldbuckets calculates the number of buckets prior to the current map growth. func (h *hmap) noldbuckets() uintptr { oldB := h.B @@ -1668,7 +1673,16 @@ func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) } func mapclone2(t *maptype, src *hmap) *hmap { - dst := makemap(t, src.count, nil) + hint := src.count + if overLoadFactor(hint, src.B) { + // Note: in rare cases (e.g. during a same-sized grow) the map + // can be overloaded. Make sure we don't allocate a destination + // bucket array larger than the source bucket array. + // This will cause the cloned map to be overloaded also, + // but that's better than crashing. See issue 69110. + hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen)) + } + dst := makemap(t, hint, nil) dst.hash0 = src.hash0 dst.nevacuate = 0 // flags do not need to be copied here, just like a new map has no flags. diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index e8da133a69490e..e28dbb02019332 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -251,6 +251,14 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan { // Put the large span in the mcentral swept list so that it's // visible to the background sweeper. mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole object or just skip it + // since we're in the mark phase anyway. s.limit = s.base() + size s.initHeapBits(false) return s diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index bf597e1936da49..28c57eb30b8672 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -249,17 +249,10 @@ func (c *mcentral) uncacheSpan(s *mspan) { // grow allocates a new empty span from the heap and initializes it for c's size class. func (c *mcentral) grow() *mspan { npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) - size := uintptr(class_to_size[c.spanclass.sizeclass()]) - s := mheap_.alloc(npages, c.spanclass) if s == nil { return nil } - - // Use division by multiplication and shifts to quickly compute: - // n := (npages << _PageShift) / size - n := s.divideByElemSize(npages << _PageShift) - s.limit = s.base() + size*n s.initHeapBits(false) return s } diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go index da3d956d480beb..3575ca02d89cad 100644 --- a/src/runtime/metrics/doc.go +++ b/src/runtime/metrics/doc.go @@ -230,6 +230,11 @@ Below is the full list of supported metrics, ordered lexicographically. /gc/stack/starting-size:bytes The stack size of new goroutines. + /godebug/non-default-behavior/allowmultiplevcs:events + The number of non-default behaviors executed by the cmd/go + package due to a non-default GODEBUG=allowmultiplevcs=... + setting. + /godebug/non-default-behavior/asynctimerchan:events The number of non-default behaviors executed by the time package due to a non-default GODEBUG=asynctimerchan=... setting. diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 2654c696582211..9cf0c901097b15 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -190,6 +190,7 @@ func gcinit() { work.markDoneSema = 1 lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters) lockInit(&work.assistQueue.lock, lockRankAssistQueue) + lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue) lockInit(&work.wbufSpans.lock, lockRankWbufSpans) } @@ -418,6 +419,26 @@ type workType struct { list gList } + // strongFromWeak controls how the GC interacts with weak->strong + // pointer conversions. + strongFromWeak struct { + // block is a flag set during mark termination that prevents + // new weak->strong conversions from executing by blocking the + // goroutine and enqueuing it onto q. + // + // Mutated only by one goroutine at a time in gcMarkDone, + // with globally-synchronizing events like forEachP and + // stopTheWorld. + block bool + + // q is a queue of goroutines that attempted to perform a + // weak->strong conversion during mark termination. + // + // Protected by lock. + lock mutex + q gQueue + } + // cycles is the number of completed GC cycles, where a GC // cycle is sweep termination, mark, mark termination, and // sweep. This differs from memstats.numgc, which is @@ -800,6 +821,19 @@ func gcStart(trigger gcTrigger) { // This is protected by markDoneSema. var gcMarkDoneFlushed uint32 +// gcDebugMarkDone contains fields used to debug/test mark termination. +var gcDebugMarkDone struct { + // spinAfterRaggedBarrier forces gcMarkDone to spin after it executes + // the ragged barrier. + spinAfterRaggedBarrier atomic.Bool + + // restartedDueTo27993 indicates that we restarted mark termination + // due to the bug described in issue #27993. + // + // Protected by worldsema. + restartedDueTo27993 bool +} + // gcMarkDone transitions the GC from mark to mark termination if all // reachable objects have been marked (that is, there are no grey // objects and can be no more in the future). Otherwise, it flushes @@ -842,6 +876,10 @@ top: // stop the world later, so acquire worldsema now. semacquire(&worldsema) + // Prevent weak->strong conversions from generating additional + // GC work. forEachP will guarantee that it is observed globally. + work.strongFromWeak.block = true + // Flush all local buffers and collect flushedWork flags. gcMarkDoneFlushed = 0 forEachP(waitReasonGCMarkTermination, func(pp *p) { @@ -872,6 +910,10 @@ top: goto top } + // For debugging/testing. + for gcDebugMarkDone.spinAfterRaggedBarrier.Load() { + } + // There was no global work, no local work, and no Ps // communicated work since we took markDoneSema. Therefore // there are no grey objects and no more objects can be @@ -910,6 +952,8 @@ top: } }) if restart { + gcDebugMarkDone.restartedDueTo27993 = true + getg().m.preemptoff = "" systemstack(func() { // Accumulate the time we were stopped before we had to start again. @@ -936,6 +980,11 @@ top: // start the world again. gcWakeAllAssists() + // Wake all blocked weak->strong conversions. These will run + // when we start the world again. + work.strongFromWeak.block = false + gcWakeAllStrongFromWeak() + // Likewise, release the transition lock. Blocked // workers and assists will run when we start the // world again. @@ -970,7 +1019,7 @@ func gcMarkTermination(stw worldStop) { // N.B. The execution tracer is not aware of this status // transition and handles it specially based on the // wait reason. - casGToWaitingForGC(curgp, _Grunning, waitReasonGarbageCollection) + casGToWaitingForSuspendG(curgp, _Grunning, waitReasonGarbageCollection) // Run gc on the g0 stack. We do this so that the g stack // we're currently running on will no longer change. Cuts @@ -1422,7 +1471,8 @@ func gcBgMarkWorker(ready chan struct{}) { systemstack(func() { // Mark our goroutine preemptible so its stack - // can be scanned. This lets two mark workers + // can be scanned or observed by the execution + // tracer. This, for example, lets two mark workers // scan each other (otherwise, they would // deadlock). We must not modify anything on // the G stack. However, stack shrinking is @@ -1432,7 +1482,7 @@ func gcBgMarkWorker(ready chan struct{}) { // N.B. The execution tracer is not aware of this status // transition and handles it specially based on the // wait reason. - casGToWaitingForGC(gp, _Grunning, waitReasonGCWorkerActive) + casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCWorkerActive) switch pp.gcMarkWorkerMode { default: throw("gcBgMarkWorker: unexpected gcMarkWorkerMode") diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 61e917df41089b..2563580e30f2b1 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -217,7 +217,7 @@ func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 { userG := getg().m.curg selfScan := gp == userG && readgstatus(userG) == _Grunning if selfScan { - casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan) + casGToWaitingForSuspendG(userG, _Grunning, waitReasonGarbageCollectionScan) } // TODO: suspendG blocks (and spins) until gp @@ -645,7 +645,7 @@ func gcAssistAlloc1(gp *g, scanWork int64) { } // gcDrainN requires the caller to be preemptible. - casGToWaitingForGC(gp, _Grunning, waitReasonGCAssistMarking) + casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCAssistMarking) // drain own cached work first in the hopes that it // will be more cache friendly. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 35fd08af50c3c1..b27901cedc1557 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1390,7 +1390,6 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, if typ.manual() { s.manualFreeList = 0 s.nelems = 0 - s.limit = s.base() + s.npages*pageSize s.state.set(mSpanManual) } else { // We must set span properties before the span is published anywhere @@ -1418,6 +1417,9 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, s.gcmarkBits = newMarkBits(uintptr(s.nelems)) s.allocBits = newAllocBits(uintptr(s.nelems)) + // Adjust s.limit down to the object-containing part of the span. + s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) + // It's safe to access h.sweepgen without the heap lock because it's // only ever updated with the world stopped and we run on the // systemstack which blocks a STW transition. @@ -1701,6 +1703,7 @@ func (span *mspan) init(base uintptr, npages uintptr) { span.list = nil span.startAddr = base span.npages = npages + span.limit = base + npages*pageSize // see go.dev/issue/74288; adjusted later for heap spans span.allocCount = 0 span.spanclass = 0 span.elemsize = 0 @@ -2049,8 +2052,19 @@ func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { handle := (*atomic.Uintptr)(u) - // Prevent preemption. We want to make sure that another GC cycle can't start. + // Prevent preemption. We want to make sure that another GC cycle can't start + // and that work.strongFromWeak.block can't change out from under us. mp := acquirem() + + // Yield to the GC if necessary. + if work.strongFromWeak.block { + releasem(mp) + + // Try to park and wait for mark termination. + // N.B. gcParkStrongFromWeak calls acquirem before returning. + mp = gcParkStrongFromWeak() + } + p := handle.Load() if p == 0 { releasem(mp) @@ -2073,14 +2087,67 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { // Even if we just swept some random span that doesn't contain this object, because // this object is long dead and its memory has since been reused, we'll just observe nil. ptr := unsafe.Pointer(handle.Load()) + + // This is responsible for maintaining the same GC-related + // invariants as the Yuasa part of the write barrier. During + // the mark phase, it's possible that we just created the only + // valid pointer to the object pointed to by ptr. If it's only + // ever referenced from our stack, and our stack is blackened + // already, we could fail to mark it. So, mark it now. + if gcphase != _GCoff { + shade(uintptr(ptr)) + } releasem(mp) + + // Explicitly keep ptr alive. This seems unnecessary since we return ptr, + // but let's be explicit since it's important we keep ptr alive across the + // call to shade. + KeepAlive(ptr) return ptr } +// gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks. +func gcParkStrongFromWeak() *m { + // Prevent preemption as we check strongFromWeak, so it can't change out from under us. + mp := acquirem() + + for work.strongFromWeak.block { + lock(&work.strongFromWeak.lock) + releasem(mp) // N.B. Holding the lock prevents preemption. + + // Queue ourselves up. + work.strongFromWeak.q.pushBack(getg()) + + // Park. + goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2) + + // Re-acquire the current M since we're going to check the condition again. + mp = acquirem() + + // Re-check condition. We may have awoken in the next GC's mark termination phase. + } + return mp +} + +// gcWakeAllStrongFromWeak wakes all currently blocked weak->strong +// conversions. This is used at the end of a GC cycle. +// +// work.strongFromWeak.block must be false to prevent woken goroutines +// from immediately going back to sleep. +func gcWakeAllStrongFromWeak() { + lock(&work.strongFromWeak.lock) + list := work.strongFromWeak.q.popList() + injectglist(&list) + unlock(&work.strongFromWeak.lock) +} + // Retrieves or creates a weak pointer handle for the object p. func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // First try to retrieve without allocating. if handle := getWeakHandle(p); handle != nil { + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } @@ -2105,7 +2172,17 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil) releasem(mp) } - return s.handle + + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + // + // Same for handle, which is only stored in the special. + // There's a window where it might die if we don't keep it + // alive explicitly. Returning it here is probably good enough, + // but let's be defensive and explicit. See #70455. + KeepAlive(p) + KeepAlive(handle) + return handle } // There was an existing handle. Free the special @@ -2124,8 +2201,11 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { } // Keep p alive for the duration of the function to ensure - // that it cannot die while we're trying to this. + // that it cannot die while we're trying to do this. + // + // Same for handle, just to be defensive. KeepAlive(p) + KeepAlive(handle) return handle } @@ -2154,6 +2234,9 @@ func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr { unlock(&span.speciallock) releasem(mp) + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 1239b4a546ea39..3391afc6572509 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -50,6 +50,7 @@ NONE < defer; NONE < sweepWaiters, assistQueue, + strongFromWeakQueue, sweep; # Test only @@ -66,6 +67,7 @@ assistQueue, hchan, pollDesc, # pollDesc can interact with timers, which can lock sched. scavenge, + strongFromWeakQueue, sweep, sweepWaiters, testR, diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 006274757e66f1..8676f29a9cf78c 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) { for i := range p { cf := CallersFrames(p[i].Stack()) j := 0 - for ; j < len(expandedStack); j++ { + for j < len(expandedStack) { f, more := cf.Next() // f.PC is a "call PC", but later consumers will expect // "return PCs" expandedStack[j] = f.PC + 1 + j++ if !more { break } @@ -1270,7 +1271,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok // of calling ThreadCreateProfile directly. func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) { - copy(p[0].Stack0[:], r.Stack) + i := copy(p[0].Stack0[:], r.Stack) + clear(p[0].Stack0[i:]) p = p[1:] }) } @@ -1495,11 +1497,6 @@ func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) { // so here we check _Gdead first. return } - if isSystemGoroutine(gp1, true) { - // System goroutines should not appear in the profile. (The finalizer - // goroutine is marked as "already profiled".) - return - } for { prev := gp1.goroutineProfiled.Load() @@ -1537,6 +1534,17 @@ func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) { // stack), or from the scheduler in preparation to execute gp1 (running on the // system stack). func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) { + if isSystemGoroutine(gp1, false) { + // System goroutines should not appear in the profile. + // Check this here and not in tryRecordGoroutineProfile because isSystemGoroutine + // may change on a goroutine while it is executing, so while the scheduler might + // see a system goroutine, goroutineProfileWithLabelsConcurrent might not, and + // this inconsistency could cause invariants to be violated, such as trying to + // record the stack of a running goroutine below. In short, we still want system + // goroutines to participate in the same state machine on gp1.goroutineProfiled as + // everything else, we just don't record the stack in the profile. + return + } if readgstatus(gp1) == _Grunning { print("doRecordGoroutineProfile gp1=", gp1.goid, "\n") throw("cannot read stack of running goroutine") @@ -1649,7 +1657,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { return } for i, mr := range records[0:n] { - copy(p[i].Stack0[:], mr.Stack) + l := copy(p[i].Stack0[:], mr.Stack) + clear(p[i].Stack0[l:]) } return } diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 6ce656c70e146e..e80d390e0d09f2 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -879,8 +879,9 @@ func runPerThreadSyscall() { } const ( - _SI_USER = 0 - _SI_TKILL = -6 + _SI_USER = 0 + _SI_TKILL = -6 + _SYS_SECCOMP = 1 ) // sigFromUser reports whether the signal was sent because of a call @@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool { return code == _SI_USER || code == _SI_TKILL } +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + code := int32(c.sigcode()) + return code == _SYS_SECCOMP +} + //go:nosplit func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) { r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0) diff --git a/src/runtime/os_unix_nonlinux.go b/src/runtime/os_unix_nonlinux.go index b98753b8fe12b7..0e8b61c3b11aa2 100644 --- a/src/runtime/os_unix_nonlinux.go +++ b/src/runtime/os_unix_nonlinux.go @@ -13,3 +13,10 @@ package runtime func (c *sigctxt) sigFromUser() bool { return c.sigcode() == _SI_USER } + +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + return false +} diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 98e96b12bf522c..12bbf96a213674 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -391,10 +391,16 @@ func deferrangefunc() any { throw("defer on system stack") } + fn := findfunc(getcallerpc()) + if fn.deferreturn == 0 { + throw("no deferreturn") + } + d := newdefer() d.link = gp._defer gp._defer = d - d.pc = getcallerpc() + + d.pc = fn.entry() + uintptr(fn.deferreturn) // We must not be preempted between calling getcallersp and // storing it to d.sp because getcallersp's result is a // uintptr stack pointer. @@ -1215,6 +1221,8 @@ func recovery(gp *g) { // only gets us to the caller's fp. gp.sched.bp = sp - goarch.PtrSize } + // The value in ret is delivered IN A REGISTER, even if there is a + // stack ABI. gp.sched.ret = 1 gogo(&gp.sched) } diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 391588d4acd0ec..ef373b36848437 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p) diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 30ef50b1c0fa7a..ab82d9830a3fa3 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -15,6 +15,7 @@ import ( "internal/syscall/unix" "internal/testenv" "io" + "iter" "math" "math/big" "os" @@ -414,27 +415,6 @@ func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Loca return p } -func cpuProfilingBroken() bool { - switch runtime.GOOS { - case "plan9": - // Profiling unimplemented. - return true - case "aix": - // See https://golang.org/issue/45170. - return true - case "ios", "dragonfly", "netbsd", "illumos", "solaris": - // See https://golang.org/issue/13841. - return true - case "openbsd": - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - // See https://golang.org/issue/13841. - return true - } - } - - return false -} - // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need, // as interpreted by matches, and returns the parsed profile. func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile { @@ -452,7 +432,7 @@ func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Dura t.Skip("skipping on wasip1") } - broken := cpuProfilingBroken() + broken := testenv.CPUProfilingBroken() deadline, ok := t.Deadline() if broken || !ok { @@ -981,7 +961,7 @@ func TestBlockProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk) @@ -991,7 +971,7 @@ func TestBlockProfile(t *testing.T) { } -func stacks(p *profile.Profile) (res [][]string) { +func profileStacks(p *profile.Profile) (res [][]string) { for _, s := range p.Sample { var stk []string for _, l := range s.Location { @@ -1004,6 +984,22 @@ func stacks(p *profile.Profile) (res [][]string) { return res } +func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) { + for _, record := range records { + frames := runtime.CallersFrames(record.Stack()) + var stk []string + for { + frame, more := frames.Next() + stk = append(stk, frame.Function) + if !more { + break + } + } + res = append(res, stk) + } + return res +} + func containsStack(got [][]string, want []string) bool { for _, stk := range got { if len(stk) < len(want) { @@ -1288,7 +1284,7 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, want := range [][]string{ {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"}, } { @@ -1328,6 +1324,28 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D) } }) + + t.Run("records", func(t *testing.T) { + // Record a mutex profile using the structured record API. + var records []runtime.BlockProfileRecord + for { + n, ok := runtime.MutexProfile(records) + if ok { + records = records[:n] + break + } + records = make([]runtime.BlockProfileRecord, n*2) + } + + // Check that we see the same stack trace as the proto profile. For + // historical reason we expect a runtime.goexit root frame here that is + // omitted in the proto profile. + stks := blockRecordStacks(records) + want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"} + if !containsStack(stks, want) { + t.Errorf("No matching stack entry for %+v", want) + } + }) } func TestMutexProfileRateAdjust(t *testing.T) { @@ -1754,6 +1772,89 @@ func TestGoroutineProfileConcurrency(t *testing.T) { } } +// Regression test for #69998. +func TestGoroutineProfileCoro(t *testing.T) { + testenv.MustHaveParallelism(t) + + goroutineProf := Lookup("goroutine") + + // Set up a goroutine to just create and run coroutine goroutines all day. + iterFunc := func() { + p, stop := iter.Pull2( + func(yield func(int, int) bool) { + for i := 0; i < 10000; i++ { + if !yield(i, i) { + return + } + } + }, + ) + defer stop() + for { + _, _, ok := p() + if !ok { + break + } + } + } + var wg sync.WaitGroup + done := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + iterFunc() + select { + case <-done: + default: + } + } + }() + + // Take a goroutine profile. If the bug in #69998 is present, this will crash + // with high probability. We don't care about the output for this bug. + goroutineProf.WriteTo(io.Discard, 1) +} + +// This test tries to provoke a situation wherein the finalizer goroutine is +// erroneously inspected by the goroutine profiler in such a way that could +// cause a crash. See go.dev/issue/74090. +func TestGoroutineProfileIssue74090(t *testing.T) { + testenv.MustHaveParallelism(t) + + goroutineProf := Lookup("goroutine") + + // T is a pointer type so it won't be allocated by the tiny + // allocator, which can lead to its finalizer not being called + // during this test. + type T *byte + for range 10 { + // We use finalizers for this test because finalizers transition between + // system and user goroutine on each call, since there's substantially + // more work to do to set up a finalizer call. Cleanups, on the other hand, + // transition once for a whole batch, and so are less likely to trigger + // the failure. Under stress testing conditions this test fails approximately + // 5 times every 1000 executions on a 64 core machine without the appropriate + // fix, which is not ideal but if this test crashes at all, it's a clear + // signal that something is broken. + var objs []*T + for range 10000 { + obj := new(T) + runtime.SetFinalizer(obj, func(_ interface{}) {}) + objs = append(objs, obj) + } + objs = nil + + // Queue up all the finalizers. + runtime.GC() + + // Try to run a goroutine profile concurrently with finalizer execution + // to trigger the bug. + var w strings.Builder + goroutineProf.WriteTo(&w, 1) + } +} + func BenchmarkGoroutine(b *testing.B) { withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) { return func(b *testing.B) { @@ -2441,16 +2542,7 @@ func TestTimeVDSO(t *testing.T) { } func TestProfilerStackDepth(t *testing.T) { - // Disable sampling, otherwise it's difficult to assert anything. - oldMemRate := runtime.MemProfileRate - runtime.MemProfileRate = 1 - runtime.SetBlockProfileRate(1) - oldMutexRate := runtime.SetMutexProfileFraction(1) - t.Cleanup(func() { - runtime.MemProfileRate = oldMemRate - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(oldMutexRate) - }) + t.Cleanup(disableSampling()) const depth = 128 go produceProfileEvents(t, depth) @@ -2478,7 +2570,7 @@ func TestProfilerStackDepth(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) var stk []string for _, s := range stks { if hasPrefix(s, test.prefix) { @@ -2742,3 +2834,84 @@ runtime/pprof.inlineA`, }) } } + +func TestProfileRecordNullPadding(t *testing.T) { + // Produce events for the different profile types. + t.Cleanup(disableSampling()) + memSink = make([]byte, 1) // MemProfile + <-time.After(time.Millisecond) // BlockProfile + blockMutex(t) // MutexProfile + runtime.GC() + + // Test that all profile records are null padded. + testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile) + testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile) + testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, true) + }) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, false) + }) + // Not testing ThreadCreateProfile because it is broken, see issue 6104. +} + +func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) { + stack0 := func(sr *T) *[32]uintptr { + switch t := any(sr).(type) { + case *runtime.StackRecord: + return &t.Stack0 + case *runtime.MemProfileRecord: + return &t.Stack0 + case *runtime.BlockProfileRecord: + return &t.Stack0 + default: + panic(fmt.Sprintf("unexpected type %T", sr)) + } + } + + t.Run(name, func(t *testing.T) { + var p []T + for { + n, ok := fn(p) + if ok { + p = p[:n] + break + } + p = make([]T, n*2) + for i := range p { + s0 := stack0(&p[i]) + for j := range s0 { + // Poison the Stack0 array to identify lack of zero padding + s0[j] = ^uintptr(0) + } + } + } + + if len(p) == 0 { + t.Fatal("no records found") + } + + for _, sr := range p { + for i, v := range stack0(&sr) { + if v == ^uintptr(0) { + t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr) + } + } + } + }) +} + +// disableSampling configures the profilers to capture all events, otherwise +// it's difficult to assert anything. +func disableSampling() func() { + oldMemRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + runtime.SetBlockProfileRate(1) + oldMutexRate := runtime.SetMutexProfileFraction(1) + return func() { + runtime.MemProfileRate = oldMemRate + runtime.SetBlockProfileRate(0) + runtime.SetMutexProfileFraction(oldMutexRate) + } +} diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index 45b1b5e9c7d42c..839f3875be318b 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -419,14 +419,21 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) { name := u.srcFunc(uf).name() if stringslite.HasPrefix(name, "runtime.") || stringslite.HasPrefix(name, "runtime/internal/") || + stringslite.HasPrefix(name, "internal/runtime/") || stringslite.HasPrefix(name, "reflect.") { // For now we never async preempt the runtime or // anything closely tied to the runtime. Known issues // include: various points in the scheduler ("don't // preempt between here and here"), much of the defer // implementation (untyped info on stack), bulk write - // barriers (write barrier check), - // reflect.{makeFuncStub,methodValueCall}. + // barriers (write barrier check), atomic functions in + // internal/runtime/atomic, reflect.{makeFuncStub,methodValueCall}. + // + // Note that this is a subset of the runtimePkgs in pkgspecial.go + // and these checks are theoretically redundant because the compiler + // marks "all points" in runtime functions as unsafe for async preemption. + // But for some reason, we can't eliminate these checks until https://go.dev/issue/72031 + // is resolved. // // TODO(austin): We should improve this, or opt things // in incrementally. diff --git a/src/runtime/proc.go b/src/runtime/proc.go index c4f175b0b76b22..dfe298cc970716 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -1000,6 +1000,28 @@ func (mp *m) becomeSpinning() { sched.needspinning.Store(0) } +// Take a snapshot of allp, for use after dropping the P. +// +// Must be called with a P, but the returned slice may be used after dropping +// the P. The M holds a reference on the snapshot to keep the backing array +// alive. +// +//go:yeswritebarrierrec +func (mp *m) snapshotAllp() []*p { + mp.allpSnapshot = allp + return mp.allpSnapshot +} + +// Clear the saved allp snapshot. Should be called as soon as the snapshot is +// no longer required. +// +// Must be called after reacquiring a P, as it requires a write barrier. +// +//go:yeswritebarrierrec +func (mp *m) clearAllpSnapshot() { + mp.allpSnapshot = nil +} + func (mp *m) hasCgoOnStack() bool { return mp.ncgo > 0 || mp.isextra } @@ -1282,13 +1304,13 @@ func casGToWaiting(gp *g, old uint32, reason waitReason) { casgstatus(gp, old, _Gwaiting) } -// casGToWaitingForGC transitions gp from old to _Gwaiting, and sets the wait reason. -// The wait reason must be a valid isWaitingForGC wait reason. +// casGToWaitingForSuspendG transitions gp from old to _Gwaiting, and sets the wait reason. +// The wait reason must be a valid isWaitingForSuspendG wait reason. // // Use this over casgstatus when possible to ensure that a waitreason is set. -func casGToWaitingForGC(gp *g, old uint32, reason waitReason) { - if !reason.isWaitingForGC() { - throw("casGToWaitingForGC with non-isWaitingForGC wait reason") +func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) { + if !reason.isWaitingForSuspendG() { + throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason") } casGToWaiting(gp, old, reason) } @@ -1429,23 +1451,7 @@ func stopTheWorld(reason stwReason) worldStop { gp := getg() gp.m.preemptoff = reason.String() systemstack(func() { - // Mark the goroutine which called stopTheWorld preemptible so its - // stack may be scanned. - // This lets a mark worker scan us while we try to stop the world - // since otherwise we could get in a mutual preemption deadlock. - // We must not modify anything on the G stack because a stack shrink - // may occur. A stack shrink is otherwise OK though because in order - // to return from this function (and to leave the system stack) we - // must have preempted all goroutines, including any attempting - // to scan our stack, in which case, any stack shrinking will - // have already completed by the time we exit. - // - // N.B. The execution tracer is not aware of this status - // transition and handles it specially based on the - // wait reason. - casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld) stopTheWorldContext = stopTheWorldWithSema(reason) // avoid write to stack - casgstatus(gp, _Gwaiting, _Grunning) }) return stopTheWorldContext } @@ -1534,7 +1540,30 @@ var gcsema uint32 = 1 // // Returns the STW context. When starting the world, this context must be // passed to startTheWorldWithSema. +// +//go:systemstack func stopTheWorldWithSema(reason stwReason) worldStop { + // Mark the goroutine which called stopTheWorld preemptible so its + // stack may be scanned by the GC or observed by the execution tracer. + // + // This lets a mark worker scan us or the execution tracer take our + // stack while we try to stop the world since otherwise we could get + // in a mutual preemption deadlock. + // + // We must not modify anything on the G stack because a stack shrink + // may occur, now that we switched to _Gwaiting, specifically if we're + // doing this during the mark phase (mark termination excepted, since + // we know that stack scanning is done by that point). A stack shrink + // is otherwise OK though because in order to return from this function + // (and to leave the system stack) we must have preempted all + // goroutines, including any attempting to scan our stack, in which + // case, any stack shrinking will have already completed by the time we + // exit. + // + // N.B. The execution tracer is not aware of this status transition and + // andles it specially based on the wait reason. + casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld) + trace := traceAcquire() if trace.ok() { trace.STWStart(reason) @@ -1642,6 +1671,9 @@ func stopTheWorldWithSema(reason stwReason) worldStop { worldStopped() + // Switch back to _Grunning, now that the world is stopped. + casgstatus(getg().m.curg, _Gwaiting, _Grunning) + return worldStop{ reason: reason, startedStopping: start, @@ -1999,15 +2031,23 @@ found: func forEachP(reason waitReason, fn func(*p)) { systemstack(func() { gp := getg().m.curg - // Mark the user stack as preemptible so that it may be scanned. - // Otherwise, our attempt to force all P's to a safepoint could - // result in a deadlock as we attempt to preempt a worker that's - // trying to preempt us (e.g. for a stack scan). + // Mark the user stack as preemptible so that it may be scanned + // by the GC or observed by the execution tracer. Otherwise, our + // attempt to force all P's to a safepoint could result in a + // deadlock as we attempt to preempt a goroutine that's trying + // to preempt us (e.g. for a stack scan). + // + // We must not modify anything on the G stack because a stack shrink + // may occur. A stack shrink is otherwise OK though because in order + // to return from this function (and to leave the system stack) we + // must have preempted all goroutines, including any attempting + // to scan our stack, in which case, any stack shrinking will + // have already completed by the time we exit. // // N.B. The execution tracer is not aware of this status // transition and handles it specially based on the // wait reason. - casGToWaitingForGC(gp, _Grunning, reason) + casGToWaitingForSuspendG(gp, _Grunning, reason) forEachPInternal(fn) casgstatus(gp, _Gwaiting, _Grunning) }) @@ -2539,6 +2579,7 @@ func dropm() { g0.stack.lo = 0 g0.stackguard0 = 0 g0.stackguard1 = 0 + mp.g0StackAccurate = false putExtraM(mp) @@ -3254,6 +3295,11 @@ func findRunnable() (gp *g, inheritTime, tryWakeP bool) { // an M. top: + // We may have collected an allp snapshot below. The snapshot is only + // required in each loop iteration. Clear it to all GC to collect the + // slice. + mp.clearAllpSnapshot() + pp := mp.p.ptr() if sched.gcwaiting.Load() { gcstopm() @@ -3422,7 +3468,11 @@ top: // which can change underfoot once we no longer block // safe-points. We don't need to snapshot the contents because // everything up to cap(allp) is immutable. - allpSnapshot := allp + // + // We clear the snapshot from the M after return via + // mp.clearAllpSnapshop (in schedule) and on each iteration of the top + // loop. + allpSnapshot := mp.snapshotAllp() // Also snapshot masks. Value changes are OK, but we can't allow // len to change out from under us. idlepMaskSnapshot := idlepMask @@ -3554,6 +3604,9 @@ top: pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil) } + // We don't need allp anymore at this pointer, but can't clear the + // snapshot without a P for the write barrier.. + // Poll network until next timer. if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 { sched.pollUntil.Store(pollUntil) @@ -3872,23 +3925,23 @@ func injectglist(glist *gList) { if glist.empty() { return } - trace := traceAcquire() - if trace.ok() { - for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { - trace.GoUnpark(gp, 0) - } - traceRelease(trace) - } // Mark all the goroutines as runnable before we put them // on the run queues. head := glist.head.ptr() var tail *g qsize := 0 + trace := traceAcquire() for gp := head; gp != nil; gp = gp.schedlink.ptr() { tail = gp qsize++ casgstatus(gp, _Gwaiting, _Grunnable) + if trace.ok() { + trace.GoUnpark(gp, 0) + } + } + if trace.ok() { + traceRelease(trace) } // Turn the gList into a gQueue. @@ -3994,6 +4047,11 @@ top: gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available + // findRunnable may have collected an allp snapshot. The snapshot is + // only required within findRunnable. Clear it to all GC to collect the + // slice. + mp.clearAllpSnapshot() + if debug.dontfreezetheworld > 0 && freezing.Load() { // See comment in freezetheworld. We don't want to perturb // scheduler state, so we didn't gcstopm in findRunnable, but @@ -4415,7 +4473,13 @@ func reentersyscall(pc, sp, bp uintptr) { } if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { - print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + throw("entersyscall") + }) + } + if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp { + systemstack(func() { + print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscall") }) } @@ -4553,14 +4617,20 @@ func entersyscallblock() { sp2 := gp.sched.sp sp3 := gp.syscallsp systemstack(func() { - print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscallblock") }) } casgstatus(gp, _Grunning, _Gsyscall) if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { - print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + throw("entersyscallblock") + }) + } + if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp { + systemstack(func() { + print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscallblock") }) } diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s index 1670a809862a2b..74c57bb1dc9136 100644 --- a/src/runtime/rt0_aix_ppc64.s +++ b/src/runtime/rt0_aix_ppc64.s @@ -41,6 +41,8 @@ TEXT _main(SB),NOSPLIT,$-8 MOVD R12, CTR BR (CTR) +// Paramater save space required to cross-call into _cgo_sys_thread_create +#define PARAM_SPACE 16 TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 // Start with standard C stack frame layout and linkage. @@ -49,45 +51,45 @@ TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 MOVW CR, R0 // Save CR in caller's frame MOVD R0, 8(R1) - MOVDU R1, -344(R1) // Allocate frame. + MOVDU R1, -344-PARAM_SPACE(R1) // Allocate frame. // Preserve callee-save registers. - MOVD R14, 48(R1) - MOVD R15, 56(R1) - MOVD R16, 64(R1) - MOVD R17, 72(R1) - MOVD R18, 80(R1) - MOVD R19, 88(R1) - MOVD R20, 96(R1) - MOVD R21,104(R1) - MOVD R22, 112(R1) - MOVD R23, 120(R1) - MOVD R24, 128(R1) - MOVD R25, 136(R1) - MOVD R26, 144(R1) - MOVD R27, 152(R1) - MOVD R28, 160(R1) - MOVD R29, 168(R1) - MOVD g, 176(R1) // R30 - MOVD R31, 184(R1) - FMOVD F14, 192(R1) - FMOVD F15, 200(R1) - FMOVD F16, 208(R1) - FMOVD F17, 216(R1) - FMOVD F18, 224(R1) - FMOVD F19, 232(R1) - FMOVD F20, 240(R1) - FMOVD F21, 248(R1) - FMOVD F22, 256(R1) - FMOVD F23, 264(R1) - FMOVD F24, 272(R1) - FMOVD F25, 280(R1) - FMOVD F26, 288(R1) - FMOVD F27, 296(R1) - FMOVD F28, 304(R1) - FMOVD F29, 312(R1) - FMOVD F30, 320(R1) - FMOVD F31, 328(R1) + MOVD R14, 48+PARAM_SPACE(R1) + MOVD R15, 56+PARAM_SPACE(R1) + MOVD R16, 64+PARAM_SPACE(R1) + MOVD R17, 72+PARAM_SPACE(R1) + MOVD R18, 80+PARAM_SPACE(R1) + MOVD R19, 88+PARAM_SPACE(R1) + MOVD R20, 96+PARAM_SPACE(R1) + MOVD R21,104+PARAM_SPACE(R1) + MOVD R22, 112+PARAM_SPACE(R1) + MOVD R23, 120+PARAM_SPACE(R1) + MOVD R24, 128+PARAM_SPACE(R1) + MOVD R25, 136+PARAM_SPACE(R1) + MOVD R26, 144+PARAM_SPACE(R1) + MOVD R27, 152+PARAM_SPACE(R1) + MOVD R28, 160+PARAM_SPACE(R1) + MOVD R29, 168+PARAM_SPACE(R1) + MOVD g, 176+PARAM_SPACE(R1) // R30 + MOVD R31, 184+PARAM_SPACE(R1) + FMOVD F14, 192+PARAM_SPACE(R1) + FMOVD F15, 200+PARAM_SPACE(R1) + FMOVD F16, 208+PARAM_SPACE(R1) + FMOVD F17, 216+PARAM_SPACE(R1) + FMOVD F18, 224+PARAM_SPACE(R1) + FMOVD F19, 232+PARAM_SPACE(R1) + FMOVD F20, 240+PARAM_SPACE(R1) + FMOVD F21, 248+PARAM_SPACE(R1) + FMOVD F22, 256+PARAM_SPACE(R1) + FMOVD F23, 264+PARAM_SPACE(R1) + FMOVD F24, 272+PARAM_SPACE(R1) + FMOVD F25, 280+PARAM_SPACE(R1) + FMOVD F26, 288+PARAM_SPACE(R1) + FMOVD F27, 296+PARAM_SPACE(R1) + FMOVD F28, 304+PARAM_SPACE(R1) + FMOVD F29, 312+PARAM_SPACE(R1) + FMOVD F30, 320+PARAM_SPACE(R1) + FMOVD F31, 328+PARAM_SPACE(R1) // Synchronous initialization. MOVD $runtime·reginit(SB), R12 @@ -130,44 +132,44 @@ nocgo: done: // Restore saved registers. - MOVD 48(R1), R14 - MOVD 56(R1), R15 - MOVD 64(R1), R16 - MOVD 72(R1), R17 - MOVD 80(R1), R18 - MOVD 88(R1), R19 - MOVD 96(R1), R20 - MOVD 104(R1), R21 - MOVD 112(R1), R22 - MOVD 120(R1), R23 - MOVD 128(R1), R24 - MOVD 136(R1), R25 - MOVD 144(R1), R26 - MOVD 152(R1), R27 - MOVD 160(R1), R28 - MOVD 168(R1), R29 - MOVD 176(R1), g // R30 - MOVD 184(R1), R31 - FMOVD 196(R1), F14 - FMOVD 200(R1), F15 - FMOVD 208(R1), F16 - FMOVD 216(R1), F17 - FMOVD 224(R1), F18 - FMOVD 232(R1), F19 - FMOVD 240(R1), F20 - FMOVD 248(R1), F21 - FMOVD 256(R1), F22 - FMOVD 264(R1), F23 - FMOVD 272(R1), F24 - FMOVD 280(R1), F25 - FMOVD 288(R1), F26 - FMOVD 296(R1), F27 - FMOVD 304(R1), F28 - FMOVD 312(R1), F29 - FMOVD 320(R1), F30 - FMOVD 328(R1), F31 - - ADD $344, R1 + MOVD 48+PARAM_SPACE(R1), R14 + MOVD 56+PARAM_SPACE(R1), R15 + MOVD 64+PARAM_SPACE(R1), R16 + MOVD 72+PARAM_SPACE(R1), R17 + MOVD 80+PARAM_SPACE(R1), R18 + MOVD 88+PARAM_SPACE(R1), R19 + MOVD 96+PARAM_SPACE(R1), R20 + MOVD 104+PARAM_SPACE(R1), R21 + MOVD 112+PARAM_SPACE(R1), R22 + MOVD 120+PARAM_SPACE(R1), R23 + MOVD 128+PARAM_SPACE(R1), R24 + MOVD 136+PARAM_SPACE(R1), R25 + MOVD 144+PARAM_SPACE(R1), R26 + MOVD 152+PARAM_SPACE(R1), R27 + MOVD 160+PARAM_SPACE(R1), R28 + MOVD 168+PARAM_SPACE(R1), R29 + MOVD 176+PARAM_SPACE(R1), g // R30 + MOVD 184+PARAM_SPACE(R1), R31 + FMOVD 196+PARAM_SPACE(R1), F14 + FMOVD 200+PARAM_SPACE(R1), F15 + FMOVD 208+PARAM_SPACE(R1), F16 + FMOVD 216+PARAM_SPACE(R1), F17 + FMOVD 224+PARAM_SPACE(R1), F18 + FMOVD 232+PARAM_SPACE(R1), F19 + FMOVD 240+PARAM_SPACE(R1), F20 + FMOVD 248+PARAM_SPACE(R1), F21 + FMOVD 256+PARAM_SPACE(R1), F22 + FMOVD 264+PARAM_SPACE(R1), F23 + FMOVD 272+PARAM_SPACE(R1), F24 + FMOVD 280+PARAM_SPACE(R1), F25 + FMOVD 288+PARAM_SPACE(R1), F26 + FMOVD 296+PARAM_SPACE(R1), F27 + FMOVD 304+PARAM_SPACE(R1), F28 + FMOVD 312+PARAM_SPACE(R1), F29 + FMOVD 320+PARAM_SPACE(R1), F30 + FMOVD 328+PARAM_SPACE(R1), F31 + + ADD $344+PARAM_SPACE, R1 MOVD 8(R1), R0 MOVFL R0, $0xff diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 5defe2f615eaa4..14561330bbf281 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) { // Check that the backtrace matches the source code. types := []string{ - "[]main.astruct;", - "bucket;", - "hash;", - "main.astruct;", - "hash * map[string]main.astruct;", + "[]main.astruct", + "bucket", + "hash", + "main.astruct", + "hash * map[string]main.astruct", } for _, name := range types { if !strings.Contains(sgot, name) { - t.Fatalf("could not find %s in 'info typrs astruct' output", name) + t.Fatalf("could not find %q in 'info typrs astruct' output", name) } } } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 4a789639611fb7..63dfc46b00105a 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -556,47 +556,49 @@ type m struct { _ uint32 // align next field to 8 bytes // Fields not known to debuggers. - procid uint64 // for debuggers, but offset not hard-coded - gsignal *g // signal-handling g - goSigStack gsignalStack // Go-allocated signal handling stack - sigmask sigset // storage for saved signal mask - tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) - mstartfn func() - curg *g // current running goroutine - caughtsig guintptr // goroutine running during fatal signal - p puintptr // attached p for executing go code (nil if not executing go code) - nextp puintptr - oldp puintptr // the p that was attached before executing a syscall - id int64 - mallocing int32 - throwing throwType - preemptoff string // if != "", keep curg running on this m - locks int32 - dying int32 - profilehz int32 - spinning bool // m is out of work and is actively looking for work - blocked bool // m is blocked on a note - newSigstack bool // minit on C thread called sigaltstack - printlock int8 - incgo bool // m is executing a cgo call - isextra bool // m is an extra m - isExtraInC bool // m is an extra m that is not executing Go code - isExtraInSig bool // m is an extra m in a signal handler - freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) - needextram bool - traceback uint8 - ncgocall uint64 // number of cgo calls in total - ncgo int32 // number of cgo calls currently in progress - cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily - cgoCallers *cgoCallers // cgo traceback if crashing in cgo call - park note - alllink *m // on allm - schedlink muintptr - lockedg guintptr - createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. - lockedExt uint32 // tracking for external LockOSThread - lockedInt uint32 // tracking for internal lockOSThread - nextwaitm muintptr // next m waiting for lock + procid uint64 // for debuggers, but offset not hard-coded + gsignal *g // signal-handling g + goSigStack gsignalStack // Go-allocated signal handling stack + sigmask sigset // storage for saved signal mask + tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) + mstartfn func() + curg *g // current running goroutine + caughtsig guintptr // goroutine running during fatal signal + p puintptr // attached p for executing go code (nil if not executing go code) + nextp puintptr + oldp puintptr // the p that was attached before executing a syscall + id int64 + mallocing int32 + throwing throwType + preemptoff string // if != "", keep curg running on this m + locks int32 + dying int32 + profilehz int32 + spinning bool // m is out of work and is actively looking for work + blocked bool // m is blocked on a note + newSigstack bool // minit on C thread called sigaltstack + printlock int8 + incgo bool // m is executing a cgo call + isextra bool // m is an extra m + isExtraInC bool // m is an extra m that does not have any Go frames + isExtraInSig bool // m is an extra m in a signal handler + freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) + needextram bool + g0StackAccurate bool // whether the g0 stack has accurate bounds + traceback uint8 + allpSnapshot []*p // Snapshot of allp for use after dropping P in findRunnable, nil otherwise. + ncgocall uint64 // number of cgo calls in total + ncgo int32 // number of cgo calls currently in progress + cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily + cgoCallers *cgoCallers // cgo traceback if crashing in cgo call + park note + alllink *m // on allm + schedlink muintptr + lockedg guintptr + createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. + lockedExt uint32 // tracking for external LockOSThread + lockedInt uint32 // tracking for internal lockOSThread + nextwaitm muintptr // next m waiting for lock mLockProfile mLockProfile // fields relating to runtime.lock contention profStack []uintptr // used for memory/block/mutex stack traces @@ -1095,6 +1097,7 @@ const ( waitReasonTraceProcStatus // "trace proc status" waitReasonPageTraceFlush // "page trace flush" waitReasonCoroutine // "coroutine" + waitReasonGCWeakToStrongWait // "GC weak to strong wait" ) var waitReasonStrings = [...]string{ @@ -1135,6 +1138,7 @@ var waitReasonStrings = [...]string{ waitReasonTraceProcStatus: "trace proc status", waitReasonPageTraceFlush: "page trace flush", waitReasonCoroutine: "coroutine", + waitReasonGCWeakToStrongWait: "GC weak to strong wait", } func (w waitReason) String() string { @@ -1150,17 +1154,17 @@ func (w waitReason) isMutexWait() bool { w == waitReasonSyncRWMutexLock } -func (w waitReason) isWaitingForGC() bool { - return isWaitingForGC[w] +func (w waitReason) isWaitingForSuspendG() bool { + return isWaitingForSuspendG[w] } -// isWaitingForGC indicates that a goroutine is only entering _Gwaiting and -// setting a waitReason because it needs to be able to let the GC take ownership -// of its stack. The G is always actually executing on the system stack, in -// these cases. +// isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and +// setting a waitReason because it needs to be able to let the suspendG +// (used by the GC and the execution tracer) take ownership of its stack. +// The G is always actually executing on the system stack in these cases. // // TODO(mknyszek): Consider replacing this with a new dedicated G status. -var isWaitingForGC = [len(waitReasonStrings)]bool{ +var isWaitingForSuspendG = [len(waitReasonStrings)]bool{ waitReasonStoppingTheWorld: true, waitReasonGCMarkTermination: true, waitReasonGarbageCollection: true, diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 8ba498bdb238d5..6f40f440e807f8 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -605,6 +605,19 @@ var crashing atomic.Int32 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool var testSigusr1 func(gp *g) bool +// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065. +var sigsysIgnored uint32 + +//go:linkname ignoreSIGSYS os.ignoreSIGSYS +func ignoreSIGSYS() { + atomic.Store(&sigsysIgnored, 1) +} + +//go:linkname restoreSIGSYS os.restoreSIGSYS +func restoreSIGSYS() { + atomic.Store(&sigsysIgnored, 0) +} + // sighandler is invoked when a signal occurs. The global g will be // set to a gsignal goroutine and we will be running on the alternate // signal stack. The parameter gp will be the value of the global g @@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { return } + if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 { + return + } + if flags&_SigKill != 0 { dieFromSignal(sig) } diff --git a/src/runtime/stack.go b/src/runtime/stack.go index cdf859a7ff1342..f0efb176b5e049 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -69,7 +69,7 @@ const ( // to each stack below the usual guard area for OS-specific // purposes like signal handling. Used on Windows, Plan 9, // and iOS because they do not use a separate stack. - stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 + stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 // The minimum size of stack used by Go code stackMin = 2048 @@ -1173,14 +1173,14 @@ func isShrinkStackSafe(gp *g) bool { return false } // We also can't copy the stack while tracing is enabled, and - // gp is in _Gwaiting solely to make itself available to the GC. + // gp is in _Gwaiting solely to make itself available to suspendG. // In these cases, the G is actually executing on the system // stack, and the execution tracer may want to take a stack trace // of the G's stack. Note: it's safe to access gp.waitreason here. // We're only checking if this is true if we took ownership of the // G with the _Gscan bit. This prevents the goroutine from transitioning, // which prevents gp.waitreason from changing. - if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForGC() { + if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForSuspendG() { return false } return true @@ -1330,7 +1330,7 @@ func morestackc() { } // startingStackSize is the amount of stack that new goroutines start with. -// It is a power of 2, and between _FixedStack and maxstacksize, inclusive. +// It is a power of 2, and between fixedStack and maxstacksize, inclusive. // startingStackSize is updated every GC by tracking the average size of // stacks scanned during the GC. var startingStackSize uint32 = fixedStack diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s index 01992d59d434eb..408d52a17343b6 100644 --- a/src/runtime/sys_darwin_amd64.s +++ b/src/runtime/sys_darwin_amd64.s @@ -739,7 +739,7 @@ ok: // // syscall9 expects a 32-bit result and tests for 32-bit -1 // to decide there was an error. -TEXT runtime·syscall9(SB),NOSPLIT,$16 +TEXT runtime·syscall9(SB),NOSPLIT,$32 MOVQ (0*8)(DI), R13// fn MOVQ (2*8)(DI), SI // a2 MOVQ (3*8)(DI), DX // a3 @@ -747,15 +747,18 @@ TEXT runtime·syscall9(SB),NOSPLIT,$16 MOVQ (5*8)(DI), R8 // a5 MOVQ (6*8)(DI), R9 // a6 MOVQ (7*8)(DI), R10 // a7 + MOVQ R10, 0(SP) MOVQ (8*8)(DI), R11 // a8 + MOVQ R11, 8(SP) MOVQ (9*8)(DI), R12 // a9 - MOVQ DI, (SP) + MOVQ R12, 16(SP) + MOVQ DI, 24(SP) MOVQ (1*8)(DI), DI // a1 XORL AX, AX // vararg: say "no float args" CALL R13 - MOVQ (SP), DI + MOVQ 24(SP), DI MOVQ AX, (10*8)(DI) // r1 MOVQ DX, (11*8)(DI) // r2 @@ -764,7 +767,7 @@ TEXT runtime·syscall9(SB),NOSPLIT,$16 CALL libc_error(SB) MOVLQSX (AX), AX - MOVQ (SP), DI + MOVQ 24(SP), DI MOVQ AX, (12*8)(DI) // err ok: diff --git a/src/runtime/sys_linux_s390x.s b/src/runtime/sys_linux_s390x.s index adf5612c3cfbee..59e2f2ab31f204 100644 --- a/src/runtime/sys_linux_s390x.s +++ b/src/runtime/sys_linux_s390x.s @@ -112,9 +112,10 @@ TEXT runtime·usleep(SB),NOSPLIT,$16-4 MOVW $1000000, R3 DIVD R3, R2 MOVD R2, 8(R15) - MOVW $1000, R3 - MULLD R2, R3 + MULLD R2, R3 // Convert sec to usec and subtract SUB R3, R4 + MOVW $1000, R3 + MULLD R3, R4 // Convert remaining usec into nsec. MOVD R4, 16(R15) // nanosleep(&ts, 0) diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 69d720a395c48d..85b1b8c9024a73 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -454,43 +454,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint //go:linkname syscall_Syscall syscall.Syscall //go:nosplit func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3) } //go:linkname syscall_Syscall6 syscall.Syscall6 //go:nosplit func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6) } //go:linkname syscall_Syscall9 syscall.Syscall9 //go:nosplit func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9) } //go:linkname syscall_Syscall12 syscall.Syscall12 //go:nosplit func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) } //go:linkname syscall_Syscall15 syscall.Syscall15 //go:nosplit func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) } //go:linkname syscall_Syscall18 syscall.Syscall18 //go:nosplit func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) } // maxArgs should be divisible by 2, as Windows stack @@ -503,7 +497,15 @@ const maxArgs = 42 //go:linkname syscall_SyscallN syscall.SyscallN //go:nosplit func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { - if len(args) > maxArgs { + return syscall_syscalln(fn, uintptr(len(args)), args...) +} + +//go:nosplit +func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) { + if n > uintptr(len(args)) { + panic("syscall: n > len(args)") // should not be reachable from user code + } + if n > maxArgs { panic("runtime: SyscallN has too many arguments") } @@ -512,7 +514,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { // calls back into Go. c := &getg().m.winsyscall c.fn = fn - c.n = uintptr(len(args)) + c.n = n if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index 6a056c8d2b190c..156cf3eb8e5c71 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -1212,6 +1212,13 @@ func TestBigStackCallbackSyscall(t *testing.T) { } } +func TestSyscallStackUsage(t *testing.T) { + // Test that the stack usage of a syscall doesn't exceed the limit. + // See https://go.dev/issue/69813. + syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +} + var ( modwinmm = syscall.NewLazyDLL("winmm.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll") diff --git a/src/runtime/testdata/testprogcgo/callback_pprof.go b/src/runtime/testdata/testprogcgo/callback_pprof.go new file mode 100644 index 00000000000000..cd235d03419d11 --- /dev/null +++ b/src/runtime/testdata/testprogcgo/callback_pprof.go @@ -0,0 +1,138 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 && !windows + +package main + +// Regression test for https://go.dev/issue/72870. Go code called from C should +// never be reported as external code. + +/* +#include + +void go_callback1(); +void go_callback2(); + +static void *callback_pprof_thread(void *arg) { + go_callback1(); + return 0; +} + +static void c_callback(void) { + go_callback2(); +} + +static void start_callback_pprof_thread() { + pthread_t th; + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_create(&th, &attr, callback_pprof_thread, 0); + // Don't join, caller will watch pprof. +} +*/ +import "C" + +import ( + "bytes" + "fmt" + "internal/profile" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("CgoCallbackPprof", CgoCallbackPprof) +} + +func CgoCallbackPprof() { + C.start_callback_pprof_thread() + + var buf bytes.Buffer + if err := pprof.StartCPUProfile(&buf); err != nil { + fmt.Printf("Error starting CPU profile: %v\n", err) + os.Exit(1) + } + time.Sleep(1 * time.Second) + pprof.StopCPUProfile() + + p, err := profile.Parse(&buf) + if err != nil { + fmt.Printf("Error parsing profile: %v\n", err) + os.Exit(1) + } + + foundCallee := false + for _, s := range p.Sample { + funcs := flattenFrames(s) + if len(funcs) == 0 { + continue + } + + leaf := funcs[0] + if leaf.Name != "main.go_callback1_callee" { + continue + } + foundCallee = true + + if len(funcs) < 2 { + fmt.Printf("Profile: %s\n", p) + frames := make([]string, len(funcs)) + for i := range funcs { + frames[i] = funcs[i].Name + } + fmt.Printf("FAIL: main.go_callback1_callee sample missing caller in frames %v\n", frames) + os.Exit(1) + } + + if funcs[1].Name != "main.go_callback1" { + // In https://go.dev/issue/72870, this will be runtime._ExternalCode. + fmt.Printf("Profile: %s\n", p) + frames := make([]string, len(funcs)) + for i := range funcs { + frames[i] = funcs[i].Name + } + fmt.Printf("FAIL: main.go_callback1_callee sample caller got %s want main.go_callback1 in frames %v\n", funcs[1].Name, frames) + os.Exit(1) + } + } + + if !foundCallee { + fmt.Printf("Missing main.go_callback1_callee sample in profile %s\n", p) + os.Exit(1) + } + + fmt.Printf("OK\n") +} + +// Return the frame functions in s, regardless of inlining. +func flattenFrames(s *profile.Sample) []*profile.Function { + ret := make([]*profile.Function, 0, len(s.Location)) + for _, loc := range s.Location { + for _, line := range loc.Line { + ret = append(ret, line.Function) + } + } + return ret +} + +//export go_callback1 +func go_callback1() { + // This is a separate function just to ensure we have another Go + // function as the caller in the profile. + go_callback1_callee() +} + +func go_callback1_callee() { + C.c_callback() + + // Spin for CPU samples. + for { + } +} + +//export go_callback2 +func go_callback2() { +} diff --git a/src/runtime/time.go b/src/runtime/time.go index fc664f49eb8d7c..7b344a349610d3 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -26,10 +26,11 @@ type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex - astate atomic.Uint8 // atomic copy of state bits at last unlock - state uint8 // state bits - isChan bool // timer has a channel; immutable; can be read without lock - blocked uint32 // number of goroutines blocked on timer's channel + astate atomic.Uint8 // atomic copy of state bits at last unlock + state uint8 // state bits + isChan bool // timer has a channel; immutable; can be read without lock + + blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, seq, delay) in the timer goroutine, so f must be @@ -68,6 +69,20 @@ type timer struct { // sendLock protects sends on the timer's channel. // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. sendLock mutex + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // It is not used for tickers. + // The value is incremented when about to send a value on the channel, + // and decremented after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // isSending is incremented only when t.mu is held. + // isSending is decremented only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + isSending atomic.Int32 } // init initializes a newly allocated timer t. @@ -431,6 +446,15 @@ func (t *timer) stop() bool { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.period == 0 && t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -490,6 +514,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in t.maybeRunAsync() } t.trace("modify") + oldPeriod := t.period t.period = period if f != nil { t.f = f @@ -525,6 +550,15 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if oldPeriod == 0 && t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -1013,6 +1047,15 @@ func (t *timer) unlockAndRun(now int64) { } t.updateHeap() } + + async := debug.asynctimerchan.Load() != 0 + if !async && t.isChan && t.period == 0 { + // Tell Stop/Reset that we are sending a value. + if t.isSending.Add(1) < 0 { + throw("too many concurrent timer firings") + } + } + t.unlock() if raceenabled { @@ -1028,7 +1071,6 @@ func (t *timer) unlockAndRun(now int64) { ts.unlock() } - async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { // For a timer channel, we want to make sure that no stale sends // happen after a t.stop or t.modify, but we cannot hold t.mu @@ -1044,7 +1086,21 @@ func (t *timer) unlockAndRun(now int64) { // and double-check that t.seq is still the seq value we saw above. // If not, the timer has been updated and we should skip the send. // We skip the send by reassigning f to a no-op function. + // + // The isSending field tells t.stop or t.modify that we have + // started to send the value. That lets them correctly return + // true meaning that no value was sent. lock(&t.sendLock) + + if t.period == 0 { + // We are committed to possibly sending a value + // based on seq, so no need to keep telling + // stop/modify that we are sending. + if t.isSending.Add(-1) < 0 { + throw("mismatched isSending updates") + } + } + if t.seq != seq { f = func(any, uintptr, int64) {} } diff --git a/src/runtime/trace.go b/src/runtime/trace.go index adf7b0951dce4b..d59501e80e891f 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -375,7 +375,7 @@ func traceAdvance(stopTrace bool) { me := getg().m.curg // We don't have to handle this G status transition because we // already eliminated ourselves from consideration above. - casGToWaitingForGC(me, _Grunning, waitReasonTraceGoroutineStatus) + casGToWaitingForSuspendG(me, _Grunning, waitReasonTraceGoroutineStatus) // We need to suspend and take ownership of the G to safely read its // goid. Note that we can't actually emit the event at this point // because we might stop the G in a window where it's unsafe to write diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index 195b3e1c37f984..7c4cb5502377c0 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -99,24 +99,26 @@ const ( traceBlockDebugCall traceBlockUntilGCEnds traceBlockSleep + traceBlockGCWeakToStrongWait ) var traceBlockReasonStrings = [...]string{ - traceBlockGeneric: "unspecified", - traceBlockForever: "forever", - traceBlockNet: "network", - traceBlockSelect: "select", - traceBlockCondWait: "sync.(*Cond).Wait", - traceBlockSync: "sync", - traceBlockChanSend: "chan send", - traceBlockChanRecv: "chan receive", - traceBlockGCMarkAssist: "GC mark assist wait for work", - traceBlockGCSweep: "GC background sweeper wait", - traceBlockSystemGoroutine: "system goroutine wait", - traceBlockPreempted: "preempted", - traceBlockDebugCall: "wait for debug call", - traceBlockUntilGCEnds: "wait until GC ends", - traceBlockSleep: "sleep", + traceBlockGeneric: "unspecified", + traceBlockForever: "forever", + traceBlockNet: "network", + traceBlockSelect: "select", + traceBlockCondWait: "sync.(*Cond).Wait", + traceBlockSync: "sync", + traceBlockChanSend: "chan send", + traceBlockChanRecv: "chan receive", + traceBlockGCMarkAssist: "GC mark assist wait for work", + traceBlockGCSweep: "GC background sweeper wait", + traceBlockSystemGoroutine: "system goroutine wait", + traceBlockPreempted: "preempted", + traceBlockDebugCall: "wait for debug call", + traceBlockUntilGCEnds: "wait until GC ends", + traceBlockSleep: "sleep", + traceBlockGCWeakToStrongWait: "GC weak to strong wait", } // traceGoStopReason is an enumeration of reasons a goroutine might yield. diff --git a/src/runtime/tracestatus.go b/src/runtime/tracestatus.go index 77ccdd139841a7..5e109a9e346ac7 100644 --- a/src/runtime/tracestatus.go +++ b/src/runtime/tracestatus.go @@ -140,11 +140,12 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) traceGoStatus { // There are a number of cases where a G might end up in // _Gwaiting but it's actually running in a non-preemptive // state but needs to present itself as preempted to the - // garbage collector. In these cases, we're not going to - // emit an event, and we want these goroutines to appear in - // the final trace as if they're running, not blocked. + // garbage collector and traceAdvance (via suspendG). In + // these cases, we're not going to emit an event, and we + // want these goroutines to appear in the final trace as + // if they're running, not blocked. tgs = traceGoWaiting - if status == _Gwaiting && wr.isWaitingForGC() { + if status == _Gwaiting && wr.isWaitingForSuspendG() { tgs = traceGoRunning } case _Gdead: diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go index 81134cb0bd27ff..a7873e6ad8c93e 100644 --- a/src/syscall/dll_windows.go +++ b/src/syscall/dll_windows.go @@ -42,6 +42,7 @@ func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a // Deprecated: Use [SyscallN] instead. func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) +//go:noescape func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) func loadlibrary(filename *uint16) (handle uintptr, err Errno) func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno) diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go index 149cc2f11c128c..bbdab46de48c03 100644 --- a/src/syscall/exec_bsd.go +++ b/src/syscall/exec_bsd.go @@ -293,3 +293,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_freebsd.go b/src/syscall/exec_freebsd.go index 3226cb88cd999a..686fd23bef435d 100644 --- a/src/syscall/exec_freebsd.go +++ b/src/syscall/exec_freebsd.go @@ -317,3 +317,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go index 768e8c131c1323..0e886508737d1e 100644 --- a/src/syscall/exec_libc.go +++ b/src/syscall/exec_libc.go @@ -314,6 +314,11 @@ childerror: } } +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} + func ioctlPtr(fd, req uintptr, arg unsafe.Pointer) (err Errno) { return ioctl(fd, req, uintptr(arg)) } diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go index 7a6750084486cf..a0579627a300bf 100644 --- a/src/syscall/exec_libc2.go +++ b/src/syscall/exec_libc2.go @@ -289,3 +289,8 @@ childerror: rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index e4b9ce1bf47da3..d0319488452704 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -7,6 +7,7 @@ package syscall import ( + errpkg "errors" "internal/itoa" "runtime" "unsafe" @@ -328,6 +329,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att if clone3 != nil { pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0) } else { + // N.B. Keep in sync with doCheckClonePidfd. flags |= uintptr(SIGCHLD) if runtime.GOARCH == "s390x" { // On Linux/s390, the first two arguments of clone(2) are swapped. @@ -735,3 +737,95 @@ func writeUidGidMappings(pid int, sys *SysProcAttr) error { return nil } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + if sys.PidFD != nil && *sys.PidFD != -1 { + Close(*sys.PidFD) + *sys.PidFD = -1 + } +} + +// checkClonePidfd verifies that clone(CLONE_PIDFD) works by actually doing a +// clone. +// +//go:linkname os_checkClonePidfd os.checkClonePidfd +func os_checkClonePidfd() error { + pidfd := int32(-1) + pid, errno := doCheckClonePidfd(&pidfd) + if errno != 0 { + return errno + } + + if pidfd == -1 { + // Bad: CLONE_PIDFD failed to provide a pidfd. Reap the process + // before returning. + + var err error + for { + var status WaitStatus + // WCLONE is an untyped constant that sets bit 31, so + // it cannot convert directly to int on 32-bit + // GOARCHes. We must convert through another type + // first. + flags := uint(WCLONE) + _, err = Wait4(int(pid), &status, int(flags), nil) + if err != EINTR { + break + } + } + if err != nil { + return err + } + + return errpkg.New("clone(CLONE_PIDFD) failed to return pidfd") + } + + // Good: CLONE_PIDFD provided a pidfd. Reap the process and close the + // pidfd. + defer Close(int(pidfd)) + + for { + const _P_PIDFD = 3 + _, _, errno = Syscall6(SYS_WAITID, _P_PIDFD, uintptr(pidfd), 0, WEXITED | WCLONE, 0, 0) + if errno != EINTR { + break + } + } + if errno != 0 { + return errno + } + + return nil +} + +// doCheckClonePidfd implements the actual clone call of os_checkClonePidfd and +// child execution. This is a separate function so we can separate the child's +// and parent's stack frames if we're using vfork. +// +// This is go:noinline because the point is to keep the stack frames of this +// and os_checkClonePidfd separate. +// +//go:noinline +func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) { + flags := uintptr(CLONE_VFORK | CLONE_VM | CLONE_PIDFD) + if runtime.GOARCH == "s390x" { + // On Linux/s390, the first two arguments of clone(2) are swapped. + pid, errno = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(pidfd))) + } else { + pid, errno = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(pidfd))) + } + if errno != 0 || pid != 0 { + // If we're in the parent, we must return immediately + // so we're not in the same stack frame as the child. + // This can at most use the return PC, which the child + // will not modify, and the results of + // rawVforkSyscall, which must have been written after + // the child was replaced. + return + } + + for { + RawSyscall(SYS_EXIT_GROUP, 0, 0, 0) + } +} diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 1b90aa7e72e0ed..4747fa075834af 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -237,6 +237,10 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) for err1 == EINTR { _, err1 = Wait4(pid, &wstatus, 0, nil) } + + // OS-specific cleanup on failure. + forkAndExecFailureCleanup(attr, sys) + return 0, err } diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go index d49ee522c4fe88..bbc1a11784be0e 100644 --- a/src/syscall/syscall_windows.go +++ b/src/syscall/syscall_windows.go @@ -406,6 +406,9 @@ func Open(path string, mode int, perm uint32) (fd Handle, err error) { } } } + if createmode == CREATE_NEW { + attrs |= FILE_FLAG_OPEN_REPARSE_POINT // don't follow symlinks + } if createmode == OPEN_EXISTING && access == GENERIC_READ { // Necessary for opening directory handles. attrs |= FILE_FLAG_BACKUP_SEMANTICS diff --git a/src/syscall/syscall_windows_test.go b/src/syscall/syscall_windows_test.go index f67e8991591601..a6c6eff31f0c45 100644 --- a/src/syscall/syscall_windows_test.go +++ b/src/syscall/syscall_windows_test.go @@ -213,6 +213,51 @@ func TestGetStartupInfo(t *testing.T) { } } +func TestSyscallAllocations(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + + // Test that syscall.SyscallN arguments do not escape. + // The function used (in this case GetVersion) doesn't matter + // as long as it is always available and doesn't panic. + h, err := syscall.LoadLibrary("kernel32.dll") + if err != nil { + t.Fatal(err) + } + defer syscall.FreeLibrary(h) + proc, err := syscall.GetProcAddress(h, "GetVersion") + if err != nil { + t.Fatal(err) + } + + testAllocs := func(t *testing.T, name string, fn func() error) { + t.Run(name, func(t *testing.T) { + n := int(testing.AllocsPerRun(10, func() { + if err := fn(); err != nil { + t.Fatalf("%s: %v", name, err) + } + })) + if n > 0 { + t.Errorf("allocs = %d, want 0", n) + } + }) + } + + testAllocs(t, "SyscallN", func() error { + r0, _, e1 := syscall.SyscallN(proc, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) + testAllocs(t, "Syscall", func() error { + r0, _, e1 := syscall.Syscall(proc, 3, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) +} + func FuzzUTF16FromString(f *testing.F) { f.Add("hi") // ASCII f.Add("â") // latin1 diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 29f56ef7520baa..285a2e748c4af7 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -785,6 +785,119 @@ func TestAdjustTimers(t *testing.T) { } } +func TestStopResult(t *testing.T) { + testStopResetResult(t, true) +} + +func TestResetResult(t *testing.T) { + testStopResetResult(t, false) +} + +// Test that when racing between running a timer and stopping a timer Stop +// consistently indicates whether a value can be read from the channel. +// Issue #69312. +func testStopResetResult(t *testing.T, testStop bool) { + for _, name := range []string{"0", "1", "2"} { + t.Run("asynctimerchan="+name, func(t *testing.T) { + testStopResetResultGODEBUG(t, testStop, name) + }) + } +} + +func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { + t.Setenv("GODEBUG", "asynctimerchan="+godebug) + + stopOrReset := func(timer *Timer) bool { + if testStop { + return timer.Stop() + } else { + return timer.Reset(1 * Hour) + } + } + + start := make(chan struct{}) + var wg sync.WaitGroup + const N = 1000 + wg.Add(N) + for range N { + go func() { + defer wg.Done() + <-start + for j := 0; j < 100; j++ { + timer1 := NewTimer(1 * Millisecond) + timer2 := NewTimer(1 * Millisecond) + select { + case <-timer1.C: + if !stopOrReset(timer2) { + // The test fails if this + // channel read times out. + <-timer2.C + } + case <-timer2.C: + if !stopOrReset(timer1) { + // The test fails if this + // channel read times out. + <-timer1.C + } + } + } + }() + } + close(start) + wg.Wait() +} + +// Test having a large number of goroutines wake up a ticker simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTicker(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTicker(Microsecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 100000 { + select { + case <-timer.C: + case <-After(Millisecond): + } + } + }() + } + wg.Wait() +} + +// Test having a large number of goroutines wake up a timer simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTimer(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTimer(Nanosecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 10000 { + select { + case <-timer.C: + default: + } + timer.Reset(Nanosecond) + } + }() + } + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 diff --git a/src/time/time_test.go b/src/time/time_test.go index 70eb61478480e0..c12b9117d0f5c1 100644 --- a/src/time/time_test.go +++ b/src/time/time_test.go @@ -14,6 +14,7 @@ import ( "math/rand" "os" "runtime" + "slices" "strings" "sync" "testing" @@ -1084,10 +1085,15 @@ func TestLoadFixed(t *testing.T) { // So GMT+1 corresponds to -3600 in the Go zone, not +3600. name, offset := Now().In(loc).Zone() // The zone abbreviation is "-01" since tzdata-2016g, and "GMT+1" - // on earlier versions; we accept both. (Issue #17276). - if !(name == "GMT+1" || name == "-01") || offset != -1*60*60 { - t.Errorf("Now().In(loc).Zone() = %q, %d, want %q or %q, %d", - name, offset, "GMT+1", "-01", -1*60*60) + // on earlier versions; we accept both. (Issue 17276.) + wantName := []string{"GMT+1", "-01"} + // The zone abbreviation may be "+01" on OpenBSD. (Issue 69840.) + if runtime.GOOS == "openbsd" { + wantName = append(wantName, "+01") + } + if !slices.Contains(wantName, name) || offset != -1*60*60 { + t.Errorf("Now().In(loc).Zone() = %q, %d, want %q (one of), %d", + name, offset, wantName, -1*60*60) } } diff --git a/src/unique/clone_test.go b/src/unique/clone_test.go index 69a9a540c07fa0..b0ba5b312e1466 100644 --- a/src/unique/clone_test.go +++ b/src/unique/clone_test.go @@ -27,7 +27,7 @@ func cSeq(stringOffsets ...uintptr) cloneSeq { func testCloneSeq[T any](t *testing.T, want cloneSeq) { typName := reflect.TypeFor[T]().Name() - typ := abi.TypeOf(*new(T)) + typ := abi.TypeFor[T]() t.Run(typName, func(t *testing.T) { got := makeCloneSeq(typ) if !reflect.DeepEqual(got, want) { diff --git a/src/unique/handle.go b/src/unique/handle.go index 0842ae3185f2cc..abc620f60fe14e 100644 --- a/src/unique/handle.go +++ b/src/unique/handle.go @@ -31,7 +31,7 @@ func (h Handle[T]) Value() T { // are equal if and only if the values used to produce them are equal. func Make[T comparable](value T) Handle[T] { // Find the map for type T. - typ := abi.TypeOf(value) + typ := abi.TypeFor[T]() ma, ok := uniqueMaps.Load(typ) if !ok { // This is a good time to initialize cleanup, since we must go through @@ -50,13 +50,13 @@ func Make[T comparable](value T) Handle[T] { toInsert *T // Keep this around to keep it alive. toInsertWeak weak.Pointer[T] ) - newValue := func() weak.Pointer[T] { + newValue := func() (T, weak.Pointer[T]) { if toInsert == nil { toInsert = new(T) *toInsert = clone(value, &m.cloneSeq) toInsertWeak = weak.Make(toInsert) } - return toInsertWeak + return *toInsert, toInsertWeak } var ptr *T for { @@ -64,7 +64,8 @@ func Make[T comparable](value T) Handle[T] { wp, ok := m.Load(value) if !ok { // Try to insert a new value into the map. - wp, _ = m.LoadOrStore(value, newValue()) + k, v := newValue() + wp, _ = m.LoadOrStore(k, v) } // Now that we're sure there's a value in the map, let's // try to get the pointer we need out of it. diff --git a/src/unique/handle_test.go b/src/unique/handle_test.go index dffe10ac728189..dd4b01ef79900b 100644 --- a/src/unique/handle_test.go +++ b/src/unique/handle_test.go @@ -9,7 +9,10 @@ import ( "internal/abi" "reflect" "runtime" + "strings" "testing" + "time" + "unsafe" ) // Set up special types. Because the internal maps are sharded by type, @@ -41,6 +44,7 @@ func TestHandle(t *testing.T) { s: [2]testStringStruct{testStringStruct{"y"}, testStringStruct{"z"}}, }) testHandle[testStruct](t, testStruct{0.5, "184"}) + testHandle[testEface](t, testEface("hello")) } func testHandle[T comparable](t *testing.T, value T) { @@ -93,7 +97,7 @@ func drainMaps(t *testing.T) { func checkMapsFor[T comparable](t *testing.T, value T) { // Manually load the value out of the map. - typ := abi.TypeOf(value) + typ := abi.TypeFor[T]() a, ok := uniqueMaps.Load(typ) if !ok { return @@ -109,3 +113,22 @@ func checkMapsFor[T comparable](t *testing.T, value T) { } t.Errorf("failed to drain internal maps of %v", value) } + +func TestMakeClonesStrings(t *testing.T) { + s := strings.Clone("abcdefghijklmnopqrstuvwxyz") // N.B. Must be big enough to not be tiny-allocated. + ran := make(chan bool) + runtime.SetFinalizer(unsafe.StringData(s), func(_ *byte) { + ran <- true + }) + h := Make(s) + + // Clean up s (hopefully) and run the finalizer. + runtime.GC() + + select { + case <-time.After(1 * time.Second): + t.Fatal("string was improperly retained") + case <-ran: + } + runtime.KeepAlive(h) +} diff --git a/src/vendor/golang.org/x/net/http/httpproxy/proxy.go b/src/vendor/golang.org/x/net/http/httpproxy/proxy.go index 6404aaf157d6ad..d89c257ae72314 100644 --- a/src/vendor/golang.org/x/net/http/httpproxy/proxy.go +++ b/src/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "net" + "net/netip" "net/url" "os" "strings" @@ -177,8 +178,10 @@ func (cfg *config) useProxy(addr string) bool { if host == "localhost" { return false } - ip := net.ParseIP(host) - if ip != nil { + nip, err := netip.ParseAddr(host) + var ip net.IP + if err == nil { + ip = net.IP(nip.AsSlice()) if ip.IsLoopback() { return false } @@ -360,6 +363,9 @@ type domainMatch struct { } func (m domainMatch) match(host, port string, ip net.IP) bool { + if ip != nil { + return false + } if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { return m.port == "" || m.port == port } diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index b8a0b84a282a32..1c88c1299f14cf 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -8,7 +8,7 @@ golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/sha3 -# golang.org/x/net v0.25.1-0.20240603202750-6249541f2a6c +# golang.org/x/net v0.25.1-0.20250304182835-b70a9e3eaa27 ## explicit; go 1.18 golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts diff --git a/test/codegen/writebarrier.go b/test/codegen/writebarrier.go index cfcfe15a403856..4e0da144334dd5 100644 --- a/test/codegen/writebarrier.go +++ b/test/codegen/writebarrier.go @@ -53,3 +53,28 @@ func combine4slice(p *[4][]byte, a, b, c, d []byte) { // arm64:-`.*runtime[.]gcWriteBarrier` p[3] = d } + +type S struct { + a, b string + c *int +} + +var g1, g2 *int + +func issue71228(dst *S, ptr *int) { + // Make sure that the non-write-barrier write. + // "sp.c = ptr" happens before the large write + // barrier "*dst = *sp". We approximate testing + // that by ensuring that two global variable write + // barriers aren't combined. + _ = *dst + var s S + sp := &s + //amd64:`.*runtime[.]gcWriteBarrier1` + g1 = nil + sp.c = ptr // outside of any write barrier + //amd64:`.*runtime[.]gcWriteBarrier1` + g2 = nil + //amd64:`.*runtime[.]wbMove` + *dst = *sp +} diff --git a/test/fixedbugs/issue14636.go b/test/fixedbugs/issue14636.go index c8e751fb613c2e..a866c9a9e30e8e 100644 --- a/test/fixedbugs/issue14636.go +++ b/test/fixedbugs/issue14636.go @@ -12,22 +12,29 @@ import ( "bytes" "log" "os/exec" + "runtime" "strings" ) func main() { - checkLinkOutput("", "-B argument must start with 0x") + // The cannot open file error indicates that the parsing of -B flag + // succeeded and it failed at a later step. checkLinkOutput("0", "-B argument must start with 0x") - checkLinkOutput("0x", "usage") + checkLinkOutput("0x", "cannot open file nonexistent.o") checkLinkOutput("0x0", "-B argument must have even number of digits") - checkLinkOutput("0x00", "usage") + checkLinkOutput("0x00", "cannot open file nonexistent.o") checkLinkOutput("0xYZ", "-B argument contains invalid hex digit") - checkLinkOutput("0x"+strings.Repeat("00", 32), "usage") - checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)") + + maxLen := 32 + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + maxLen = 16 + } + checkLinkOutput("0x"+strings.Repeat("00", maxLen), "cannot open file nonexistent.o") + checkLinkOutput("0x"+strings.Repeat("00", maxLen+1), "-B option too long") } func checkLinkOutput(buildid string, message string) { - cmd := exec.Command("go", "tool", "link", "-B", buildid) + cmd := exec.Command("go", "tool", "link", "-B", buildid, "nonexistent.o") out, err := cmd.CombinedOutput() if err == nil { log.Fatalf("expected cmd/link to fail") @@ -39,6 +46,6 @@ func checkLinkOutput(buildid string, message string) { } if !strings.Contains(firstLine, message) { - log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine) + log.Fatalf("%s: cmd/link output did not include expected message %q: %s", buildid, message, firstLine) } } diff --git a/test/fixedbugs/issue63489a.go b/test/fixedbugs/issue63489a.go index b88120f2c045ef..2b46814f9566de 100644 --- a/test/fixedbugs/issue63489a.go +++ b/test/fixedbugs/issue63489a.go @@ -1,16 +1,20 @@ -// errorcheck -lang=go1.21 +// errorcheck -lang=go1.22 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.4 +// This file has been changed from its original version as +// //go:build file versions below 1.21 set the language version to 1.21. +// The original tested a -lang version of 1.21 with a file version of +// go1.4 while this new version tests a -lang version of go1.22 +// with a file version of go1.21. -package p - -const c = 0o123 // ERROR "file declares //go:build go1.4" +//go:build go1.21 -// ERROR "file declares //go:build go1.4" +package p -//line issue63489a.go:13:1 -const d = 0o124 +func f() { + for _ = range 10 { // ERROR "file declares //go:build go1.21" + } +} diff --git a/test/fixedbugs/issue63489b.go b/test/fixedbugs/issue63489b.go index 2ad590dfc33347..fd897dea97cb88 100644 --- a/test/fixedbugs/issue63489b.go +++ b/test/fixedbugs/issue63489b.go @@ -1,11 +1,20 @@ -// errorcheck -lang=go1.4 +// errorcheck -lang=go1.21 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.4 +// This file has been changed from its original version as +// //go:build file versions below 1.21 set the language version to 1.21. +// The original tested a -lang version of 1.4 with a file version of +// go1.4 while this new version tests a -lang version of go1.1 +// with a file version of go1.21. + +//go:build go1.21 package p -const c = 0o123 // ERROR "file declares //go:build go1.4" +func f() { + for _ = range 10 { // ERROR "file declares //go:build go1.21" + } +} diff --git a/test/fixedbugs/issue68580.go b/test/fixedbugs/issue68580.go new file mode 100644 index 00000000000000..b60a7447aaa77b --- /dev/null +++ b/test/fixedbugs/issue68580.go @@ -0,0 +1,15 @@ +// compile -goexperiment aliastypeparams + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type A[P any] = struct{ _ P } + +type N[P any] A[P] + +func f[P any](N[P]) {} + +var _ = f[int] diff --git a/test/fixedbugs/issue69110.go b/test/fixedbugs/issue69110.go new file mode 100644 index 00000000000000..71a4bcac31a16e --- /dev/null +++ b/test/fixedbugs/issue69110.go @@ -0,0 +1,57 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "maps" + _ "unsafe" +) + +func main() { + for i := 0; i < 100; i++ { + f() + } +} + +const NB = 4 + +func f() { + // Make a map with NB buckets, at max capacity. + // 6.5 entries/bucket. + ne := NB * 13 / 2 + m := map[int]int{} + for i := 0; i < ne; i++ { + m[i] = i + } + + // delete/insert a lot, to hopefully get lots of overflow buckets + // and trigger a same-size grow. + ssg := false + for i := ne; i < ne+1000; i++ { + delete(m, i-ne) + m[i] = i + if sameSizeGrow(m) { + ssg = true + break + } + } + if !ssg { + return + } + + // Insert 1 more entry, which would ordinarily trigger a growth. + // We can't grow while growing, so we instead go over our + // target capacity. + m[-1] = -1 + + // Cloning in this state will make a map with a destination bucket + // array twice the size of the source. + _ = maps.Clone(m) +} + +//go:linkname sameSizeGrow runtime.sameSizeGrowForIssue69110Test +func sameSizeGrow(m map[int]int) bool diff --git a/test/fixedbugs/issue69434.go b/test/fixedbugs/issue69434.go new file mode 100644 index 00000000000000..682046601960da --- /dev/null +++ b/test/fixedbugs/issue69434.go @@ -0,0 +1,173 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "io" + "iter" + "math/rand" + "os" + "strings" + "unicode" +) + +// WordReader is the struct that implements io.Reader +type WordReader struct { + scanner *bufio.Scanner +} + +// NewWordReader creates a new WordReader from an io.Reader +func NewWordReader(r io.Reader) *WordReader { + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanWords) + return &WordReader{ + scanner: scanner, + } +} + +// Read reads data from the input stream and returns a single lowercase word at a time +func (wr *WordReader) Read(p []byte) (n int, err error) { + if !wr.scanner.Scan() { + if err := wr.scanner.Err(); err != nil { + return 0, err + } + return 0, io.EOF + } + word := wr.scanner.Text() + cleanedWord := removeNonAlphabetic(word) + if len(cleanedWord) == 0 { + return wr.Read(p) + } + n = copy(p, []byte(cleanedWord)) + return n, nil +} + +// All returns an iterator allowing the caller to iterate over the WordReader using for/range. +func (wr *WordReader) All() iter.Seq[string] { + word := make([]byte, 1024) + return func(yield func(string) bool) { + var err error + var n int + for n, err = wr.Read(word); err == nil; n, err = wr.Read(word) { + if !yield(string(word[:n])) { + return + } + } + if err != io.EOF { + fmt.Fprintf(os.Stderr, "error reading word: %v\n", err) + } + } +} + +// removeNonAlphabetic removes non-alphabetic characters from a word using strings.Map +func removeNonAlphabetic(word string) string { + return strings.Map(func(r rune) rune { + if unicode.IsLetter(r) { + return unicode.ToLower(r) + } + return -1 + }, word) +} + +// ProbabilisticSkipper determines if an item should be retained with probability 1/(1<>= 1 + pr.counter-- + if pr.counter == 0 { + pr.refreshCounter() + } + return remove +} + +// EstimateUniqueWordsIter estimates the number of unique words using a probabilistic counting method +func EstimateUniqueWordsIter(reader io.Reader, memorySize int) int { + wordReader := NewWordReader(reader) + words := make(map[string]struct{}, memorySize) + + rounds := 0 + roundRemover := NewProbabilisticSkipper(1) + wordSkipper := NewProbabilisticSkipper(rounds) + wordSkipper.check(rounds) + + for word := range wordReader.All() { + wordSkipper.check(rounds) + if wordSkipper.ShouldSkip() { + delete(words, word) + } else { + words[word] = struct{}{} + + if len(words) >= memorySize { + rounds++ + + wordSkipper = NewProbabilisticSkipper(rounds) + for w := range words { + if roundRemover.ShouldSkip() { + delete(words, w) + } + } + } + } + wordSkipper.check(rounds) + } + + if len(words) == 0 { + return 0 + } + + invProbability := 1 << rounds + estimatedUniqueWords := len(words) * invProbability + return estimatedUniqueWords +} + +func main() { + input := "Hello, world! This is a test. Hello, world, hello!" + expectedUniqueWords := 6 // "hello", "world", "this", "is", "a", "test" (but "hello" and "world" are repeated) + memorySize := 6 + + reader := strings.NewReader(input) + estimatedUniqueWords := EstimateUniqueWordsIter(reader, memorySize) + if estimatedUniqueWords != expectedUniqueWords { + // ... + } +} diff --git a/test/fixedbugs/issue69507.go b/test/fixedbugs/issue69507.go new file mode 100644 index 00000000000000..fc300c848ee62f --- /dev/null +++ b/test/fixedbugs/issue69507.go @@ -0,0 +1,133 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + err := run() + if err != nil { + panic(err) + } +} + +func run() error { + methods := "AB" + + type node struct { + tag string + choices []string + } + all := []node{ + {"000", permutations(methods)}, + } + + next := 1 + for len(all) > 0 { + cur := all[0] + k := copy(all, all[1:]) + all = all[:k] + + if len(cur.choices) == 1 { + continue + } + + var bestM map[byte][]string + bMax := len(cur.choices) + 1 + bMin := -1 + for sel := range selections(methods) { + m := make(map[byte][]string) + for _, order := range cur.choices { + x := findFirstMatch(order, sel) + m[x] = append(m[x], order) + } + + min := len(cur.choices) + 1 + max := -1 + for _, v := range m { + if len(v) < min { + min = len(v) + } + if len(v) > max { + max = len(v) + } + } + if max < bMax || (max == bMax && min > bMin) { + bestM = m + bMin = min + bMax = max + } + } + + if bMax == len(cur.choices) { + continue + } + + cc := Keys(bestM) + for c := range cc { + choices := bestM[c] + next++ + + switch c { + case 'A': + case 'B': + default: + panic("unexpected selector type " + string(c)) + } + all = append(all, node{"", choices}) + } + } + return nil +} + +func permutations(s string) []string { + if len(s) <= 1 { + return []string{s} + } + + var result []string + for i, char := range s { + rest := s[:i] + s[i+1:] + for _, perm := range permutations(rest) { + result = append(result, string(char)+perm) + } + } + return result +} + +type Seq[V any] func(yield func(V) bool) + +func selections(s string) Seq[string] { + return func(yield func(string) bool) { + for bits := 1; bits < 1< pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy