diff --git a/VERSION b/VERSION index c284f6441bc49d..74f07e53c7161b 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.21.3 -time 2023-10-09T17:04:35Z +go1.21.4 +time 2023-11-01T20:46:39Z diff --git a/src/cmd/cgo/internal/testcarchive/carchive_test.go b/src/cmd/cgo/internal/testcarchive/carchive_test.go index cc810f9d3e2551..b140a9c61378a7 100644 --- a/src/cmd/cgo/internal/testcarchive/carchive_test.go +++ b/src/cmd/cgo/internal/testcarchive/carchive_test.go @@ -1365,3 +1365,35 @@ func TestDeepStack(t *testing.T) { t.Error(err) } } + +func TestSharedObject(t *testing.T) { + // Test that we can put a Go c-archive into a C shared object. + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + t.Parallel() + + if !testWork { + defer func() { + os.Remove("libgo_s.a") + os.Remove("libgo_s.h") + os.Remove("libgo_s.so") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo_s.a", "./libgo") + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + if err != nil { + t.Fatal(err) + } + + ccArgs := append(cc, "-shared", "-o", "libgo_s.so", "libgo_s.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + if err != nil { + t.Fatal(err) + } +} diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index cdb346321e561c..d3af465d0e0d03 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -981,7 +981,7 @@ (ConstNil ) (ConstNil )) -(NilCheck (GetG mem) mem) => mem +(NilCheck ptr:(GetG mem) mem) => ptr (If (Not cond) yes no) => (If cond no yes) (If (ConstBool [c]) yes no) && c => (First yes no) @@ -2055,19 +2055,19 @@ && isSameCall(call.Aux, "runtime.newobject") => mem -(NilCheck (SelectN [0] call:(StaticLECall _ _)) _) +(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) && isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") - => (Invalid) + => ptr -(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _) +(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) && isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") - => (Invalid) + => ptr // Addresses of globals are always non-nil. -(NilCheck (Addr {_} (SB)) _) => (Invalid) -(NilCheck (Convert (Addr {_} (SB)) _) _) => (Invalid) +(NilCheck ptr:(Addr {_} (SB)) _) => ptr +(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr // for late-expanded calls, recognize memequal applied to a single constant byte // Support is limited by 1, 2, 4, 8 byte sizes diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 53ff57f6b12e3c..aa5fb0e03e660a 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -471,7 +471,7 @@ var genericOps = []opData{ {name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil {name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0. {name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0. - {name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void. + {name: "NilCheck", argLength: 2, nilCheck: true}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified. // Pseudo-ops {name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index f34b9074197bfc..bbfdaceaad90b0 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -317,7 +317,28 @@ func checkFunc(f *Func) { if !v.Aux.(*ir.Name).Type().HasPointers() { f.Fatalf("vardef must have pointer type %s", v.Aux.(*ir.Name).Type().String()) } - + case OpNilCheck: + // nil checks have pointer type before scheduling, and + // void type after scheduling. + if f.scheduled { + if v.Uses != 0 { + f.Fatalf("nilcheck must have 0 uses %s", v.Uses) + } + if !v.Type.IsVoid() { + f.Fatalf("nilcheck must have void type %s", v.Type.String()) + } + } else { + if !v.Type.IsPtrShaped() && !v.Type.IsUintptr() { + f.Fatalf("nilcheck must have pointer type %s", v.Type.String()) + } + } + if !v.Args[0].Type.IsPtrShaped() && !v.Args[0].Type.IsUintptr() { + f.Fatalf("nilcheck must have argument of pointer type %s", v.Args[0].Type.String()) + } + if !v.Args[1].Type.IsMemory() { + f.Fatalf("bad arg 1 type to %s: want mem, have %s", + v.Op, v.Args[1].Type.String()) + } } // TODO: check for cycles in values diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go index 52cc7f2ca74d84..ae9fd2ef242681 100644 --- a/src/cmd/compile/internal/ssa/deadcode.go +++ b/src/cmd/compile/internal/ssa/deadcode.go @@ -110,16 +110,15 @@ func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value } } for _, v := range b.Values { - if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] { + if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects || opcodeTable[v.Op].nilCheck) && !live[v.ID] { live[v.ID] = true q = append(q, v) if v.Pos.IsStmt() != src.PosNotStmt { liveOrderStmts = append(liveOrderStmts, v) } } - if v.Type.IsVoid() && !live[v.ID] { - // The only Void ops are nil checks and inline marks. We must keep these. - if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] { + if v.Op == OpInlMark { + if !liveInlIdx[int(v.AuxInt)] { // We don't need marks for bodies that // have been completely optimized away. // TODO: save marks only for bodies which diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 648b68af78b14c..cb3427103c501b 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -73,9 +73,9 @@ func dse(f *Func) { } // Walk backwards looking for dead stores. Keep track of shadowed addresses. - // A "shadowed address" is a pointer and a size describing a memory region that - // is known to be written. We keep track of shadowed addresses in the shadowed - // map, mapping the ID of the address to the size of the shadowed region. + // A "shadowed address" is a pointer, offset, and size describing a memory region that + // is known to be written. We keep track of shadowed addresses in the shadowed map, + // mapping the ID of the address to a shadowRange where future writes will happen. // Since we're walking backwards, writes to a shadowed region are useless, // as they will be immediately overwritten. shadowed.clear() @@ -88,13 +88,20 @@ func dse(f *Func) { shadowed.clear() } if v.Op == OpStore || v.Op == OpZero { + ptr := v.Args[0] + var off int64 + for ptr.Op == OpOffPtr { // Walk to base pointer + off += ptr.AuxInt + ptr = ptr.Args[0] + } var sz int64 if v.Op == OpStore { sz = v.Aux.(*types.Type).Size() } else { // OpZero sz = v.AuxInt } - if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz { + sr := shadowRange(shadowed.get(ptr.ID)) + if sr.contains(off, off+sz) { // Modify the store/zero into a copy of the memory state, // effectively eliding the store operation. if v.Op == OpStore { @@ -108,10 +115,8 @@ func dse(f *Func) { v.AuxInt = 0 v.Op = OpCopy } else { - if sz > 0x7fffffff { // work around sparseMap's int32 value type - sz = 0x7fffffff - } - shadowed.set(v.Args[0].ID, int32(sz)) + // Extend shadowed region. + shadowed.set(ptr.ID, int32(sr.merge(off, off+sz))) } } // walk to previous store @@ -131,6 +136,49 @@ func dse(f *Func) { } } +// A shadowRange encodes a set of byte offsets [lo():hi()] from +// a given pointer that will be written to later in the block. +// A zero shadowRange encodes an empty shadowed range (and so +// does a -1 shadowRange, which is what sparsemap.get returns +// on a failed lookup). +type shadowRange int32 + +func (sr shadowRange) lo() int64 { + return int64(sr & 0xffff) +} +func (sr shadowRange) hi() int64 { + return int64((sr >> 16) & 0xffff) +} + +// contains reports whether [lo:hi] is completely within sr. +func (sr shadowRange) contains(lo, hi int64) bool { + return lo >= sr.lo() && hi <= sr.hi() +} + +// merge returns the union of sr and [lo:hi]. +// merge is allowed to return something smaller than the union. +func (sr shadowRange) merge(lo, hi int64) shadowRange { + if lo < 0 || hi > 0xffff { + // Ignore offsets that are too large or small. + return sr + } + if sr.lo() == sr.hi() { + // Old range is empty - use new one. + return shadowRange(lo + hi<<16) + } + if hi < sr.lo() || lo > sr.hi() { + // The two regions don't overlap or abut, so we would + // have to keep track of multiple disjoint ranges. + // Because we can only keep one, keep the larger one. + if sr.hi()-sr.lo() >= hi-lo { + return sr + } + return shadowRange(lo + hi<<16) + } + // Regions overlap or abut - compute the union. + return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16) +} + // elimDeadAutosGeneric deletes autos that are never accessed. To achieve this // we track the operations that the address of each auto reaches and if it only // reaches stores then we delete all the stores. The other operations will then @@ -201,7 +249,7 @@ func elimDeadAutosGeneric(f *Func) { } if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 { - // Nil check has no use, but we need to keep it. + // We need to keep nil checks even if they have no use. // Also keep calls and values that have side effects. return } diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go index 6d3fb70780591c..68defde7b4b956 100644 --- a/src/cmd/compile/internal/ssa/fuse.go +++ b/src/cmd/compile/internal/ssa/fuse.go @@ -169,7 +169,7 @@ func fuseBlockIf(b *Block) bool { // There may be false positives. func isEmpty(b *Block) bool { for _, v := range b.Values { - if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() { + if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() || opcodeTable[v.Op].nilCheck { return false } } diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go index fa7921a18f6f16..2f89938d1d9233 100644 --- a/src/cmd/compile/internal/ssa/fuse_test.go +++ b/src/cmd/compile/internal/ssa/fuse_test.go @@ -254,7 +254,7 @@ func TestFuseSideEffects(t *testing.T) { Valu("p", OpArg, c.config.Types.IntPtr, 0, nil), If("c1", "z0", "exit")), Bloc("z0", - Valu("nilcheck", OpNilCheck, types.TypeVoid, 0, nil, "p", "mem"), + Valu("nilcheck", OpNilCheck, c.config.Types.IntPtr, 0, nil, "p", "mem"), Goto("exit")), Bloc("exit", Exit("mem"), diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index 4f797a473f71ce..c69cd8c32ed394 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -38,11 +38,14 @@ func nilcheckelim(f *Func) { work := make([]bp, 0, 256) work = append(work, bp{block: f.Entry}) - // map from value ID to bool indicating if value is known to be non-nil - // in the current dominator path being walked. This slice is updated by + // map from value ID to known non-nil version of that value ID + // (in the current dominator path being walked). This slice is updated by // walkStates to maintain the known non-nil values. - nonNilValues := f.Cache.allocBoolSlice(f.NumValues()) - defer f.Cache.freeBoolSlice(nonNilValues) + // If there is extrinsic information about non-nil-ness, this map + // points a value to itself. If a value is known non-nil because we + // already did a nil check on it, it points to the nil check operation. + nonNilValues := f.Cache.allocValueSlice(f.NumValues()) + defer f.Cache.freeValueSlice(nonNilValues) // make an initial pass identifying any non-nil values for _, b := range f.Blocks { @@ -54,7 +57,7 @@ func nilcheckelim(f *Func) { // We assume that SlicePtr is non-nil because we do a bounds check // before the slice access (and all cap>0 slices have a non-nil ptr). See #30366. if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr { - nonNilValues[v.ID] = true + nonNilValues[v.ID] = v } } } @@ -68,16 +71,16 @@ func nilcheckelim(f *Func) { if v.Op == OpPhi { argsNonNil := true for _, a := range v.Args { - if !nonNilValues[a.ID] { + if nonNilValues[a.ID] == nil { argsNonNil = false break } } if argsNonNil { - if !nonNilValues[v.ID] { + if nonNilValues[v.ID] == nil { changed = true } - nonNilValues[v.ID] = true + nonNilValues[v.ID] = v } } } @@ -103,8 +106,8 @@ func nilcheckelim(f *Func) { if len(b.Preds) == 1 { p := b.Preds[0].b if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b { - if ptr := p.Controls[0].Args[0]; !nonNilValues[ptr.ID] { - nonNilValues[ptr.ID] = true + if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil { + nonNilValues[ptr.ID] = ptr work = append(work, bp{op: ClearPtr, ptr: ptr}) } } @@ -117,14 +120,11 @@ func nilcheckelim(f *Func) { pendingLines.clear() // Next, process values in the block. - i := 0 for _, v := range b.Values { - b.Values[i] = v - i++ switch v.Op { case OpIsNonNil: ptr := v.Args[0] - if nonNilValues[ptr.ID] { + if nonNilValues[ptr.ID] != nil { if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary. pendingLines.add(v.Pos) v.Pos = v.Pos.WithNotStmt() @@ -135,7 +135,7 @@ func nilcheckelim(f *Func) { } case OpNilCheck: ptr := v.Args[0] - if nonNilValues[ptr.ID] { + if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil { // This is a redundant implicit nil check. // Logging in the style of the former compiler -- and omit line 1, // which is usually in generated code. @@ -145,14 +145,13 @@ func nilcheckelim(f *Func) { if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary pendingLines.add(v.Pos) } - v.reset(OpUnknown) - f.freeValue(v) - i-- + v.Op = OpCopy + v.SetArgs1(nilCheck) continue } // Record the fact that we know ptr is non nil, and remember to // undo that information when this dominator subtree is done. - nonNilValues[ptr.ID] = true + nonNilValues[ptr.ID] = v work = append(work, bp{op: ClearPtr, ptr: ptr}) fallthrough // a non-eliminated nil check might be a good place for a statement boundary. default: @@ -163,7 +162,7 @@ func nilcheckelim(f *Func) { } } // This reduces the lost statement count in "go" by 5 (out of 500 total). - for j := 0; j < i; j++ { // is this an ordering problem? + for j := range b.Values { // is this an ordering problem? v := b.Values[j] if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) { v.Pos = v.Pos.WithIsStmt() @@ -174,7 +173,6 @@ func nilcheckelim(f *Func) { b.Pos = b.Pos.WithIsStmt() pendingLines.remove(b.Pos) } - b.truncateValues(i) // Add all dominated blocks to the work list. for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling { @@ -182,7 +180,7 @@ func nilcheckelim(f *Func) { } case ClearPtr: - nonNilValues[node.ptr.ID] = false + nonNilValues[node.ptr.ID] = nil continue } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1480fcf45bfd76..e7caf9050c15fe 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -39373,9 +39373,10 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "NilCheck", - argLen: 2, - generic: true, + name: "NilCheck", + argLen: 2, + nilCheck: true, + generic: true, }, { name: "GetG", diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 43843bda553629..1cfdc3e10d104b 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -859,6 +859,9 @@ func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool { offset += base.AuxInt base = base.Args[0] } + if opcodeTable[base.Op].nilCheck { + base = base.Args[0] + } return base, offset } p1, off1 := baseAndOffset(p1) @@ -1183,6 +1186,12 @@ func min(x, y int64) int64 { } return y } +func max(x, y int64) int64 { + if x > y { + return x + } + return y +} func isConstZero(v *Value) bool { switch v.Op { diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index e5bd8bc36f7d61..574ac7a1a3ab2b 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -18967,79 +18967,84 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { v_0 := v.Args[0] b := v.Block fe := b.Func.fe - // match: (NilCheck (GetG mem) mem) - // result: mem + // match: (NilCheck ptr:(GetG mem) mem) + // result: ptr for { - if v_0.Op != OpGetG { + ptr := v_0 + if ptr.Op != OpGetG { break } - mem := v_0.Args[0] + mem := ptr.Args[0] if mem != v_1 { break } - v.copyOf(mem) + v.copyOf(ptr) return true } - // match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) _) + // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") - // result: (Invalid) + // result: ptr for { - if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { + ptr := v_0 + if ptr.Op != OpSelectN || auxIntToInt64(ptr.AuxInt) != 0 { break } - call := v_0.Args[0] + call := ptr.Args[0] if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } - v.reset(OpInvalid) + v.copyOf(ptr) return true } - // match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _) + // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") - // result: (Invalid) + // result: ptr for { - if v_0.Op != OpOffPtr { + ptr := v_0 + if ptr.Op != OpOffPtr { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 { + ptr_0 := ptr.Args[0] + if ptr_0.Op != OpSelectN || auxIntToInt64(ptr_0.AuxInt) != 0 { break } - call := v_0_0.Args[0] + call := ptr_0.Args[0] if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } - v.reset(OpInvalid) + v.copyOf(ptr) return true } - // match: (NilCheck (Addr {_} (SB)) _) - // result: (Invalid) + // match: (NilCheck ptr:(Addr {_} (SB)) _) + // result: ptr for { - if v_0.Op != OpAddr { + ptr := v_0 + if ptr.Op != OpAddr { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSB { + ptr_0 := ptr.Args[0] + if ptr_0.Op != OpSB { break } - v.reset(OpInvalid) + v.copyOf(ptr) return true } - // match: (NilCheck (Convert (Addr {_} (SB)) _) _) - // result: (Invalid) + // match: (NilCheck ptr:(Convert (Addr {_} (SB)) _) _) + // result: ptr for { - if v_0.Op != OpConvert { + ptr := v_0 + if ptr.Op != OpConvert { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { + ptr_0 := ptr.Args[0] + if ptr_0.Op != OpAddr { break } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpSB { + ptr_0_0 := ptr_0.Args[0] + if ptr_0_0.Op != OpSB { break } - v.reset(OpInvalid) + v.copyOf(ptr) return true } return false diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index 19b98cc4b83b68..0a7425c017291e 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -312,14 +312,21 @@ func schedule(f *Func) { } // Remove SPanchored now that we've scheduled. + // Also unlink nil checks now that ordering is assured + // between the nil check and the uses of the nil-checked pointer. for _, b := range f.Blocks { for _, v := range b.Values { for i, a := range v.Args { - if a.Op == OpSPanchored { + if a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck { v.SetArg(i, a.Args[0]) } } } + for i, c := range b.ControlValues() { + if c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck { + b.ReplaceControl(i, c.Args[0]) + } + } } for _, b := range f.Blocks { i := 0 @@ -332,6 +339,15 @@ func schedule(f *Func) { v.resetArgs() f.freeValue(v) } else { + if opcodeTable[v.Op].nilCheck { + if v.Uses != 0 { + base.Fatalf("nilcheck still has %d uses", v.Uses) + } + // We can't delete the nil check, but we mark + // it as having void type so regalloc won't + // try to allocate a register for it. + v.Type = types.TypeVoid + } b.Values[i] = v i++ } diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index e89024b3c665db..9b52da1c58a978 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -552,7 +552,11 @@ func (v *Value) LackingPos() bool { // if its use count drops to 0. func (v *Value) removeable() bool { if v.Type.IsVoid() { - // Void ops, like nil pointer checks, must stay. + // Void ops (inline marks), must stay. + return false + } + if opcodeTable[v.Op].nilCheck { + // Nil pointer checks must stay. return false } if v.Type.IsMemory() { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 597a196ba8c45b..e994577c641dea 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1991,7 +1991,8 @@ func (s *state) stmt(n ir.Node) { case ir.OCHECKNIL: n := n.(*ir.UnaryExpr) p := s.expr(n.X) - s.nilCheck(p) + _ = s.nilCheck(p) + // TODO: check that throwing away the nilcheck result is ok. case ir.OINLMARK: n := n.(*ir.InlineMarkStmt) @@ -5621,18 +5622,20 @@ func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value { } return p } - s.nilCheck(p) + p = s.nilCheck(p) return p } // nilCheck generates nil pointer checking code. // Used only for automatically inserted nil checks, // not for user code like 'x != nil'. -func (s *state) nilCheck(ptr *ssa.Value) { +// Returns a "definitely not nil" copy of x to ensure proper ordering +// of the uses of the post-nilcheck pointer. +func (s *state) nilCheck(ptr *ssa.Value) *ssa.Value { if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() { - return + return ptr } - s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) + return s.newValue2(ssa.OpNilCheck, ptr.Type, ptr, s.mem()) } // boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not. @@ -5984,8 +5987,8 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) if !t.Elem().IsArray() { s.Fatalf("bad ptr to array in slice %v\n", t) } - s.nilCheck(v) - ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v) + nv := s.nilCheck(v) + ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), nv) len = s.constInt(types.Types[types.TINT], t.Elem().NumElem()) cap = len default: diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 1d1de5bf942c68..47d6c1e09a010f 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -94,40 +94,24 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type { // and has one float64 argument and no results, // the generated code looks like: // - // clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s} + // clos = &struct{F uintptr; X0 *int; X1 *string}{func.1, &i, &s} // // The use of the struct provides type information to the garbage - // collector so that it can walk the closure. We could use (in this case) - // [3]unsafe.Pointer instead, but that would leave the gc in the dark. - // The information appears in the binary in the form of type descriptors; - // the struct is unnamed so that closures in multiple packages with the - // same struct type can share the descriptor. - - // Make sure the .F field is in the same package as the rest of the - // fields. This deals with closures in instantiated functions, which are - // compiled as if from the source package of the generic function. - var pkg *types.Pkg - if len(clo.Func.ClosureVars) == 0 { - pkg = types.LocalPkg - } else { - for _, v := range clo.Func.ClosureVars { - if pkg == nil { - pkg = v.Sym().Pkg - } else if pkg != v.Sym().Pkg { - base.Fatalf("Closure variables from multiple packages: %+v", clo) - } - } - } - - fields := []*types.Field{ - types.NewField(base.Pos, pkg.Lookup(".F"), types.Types[types.TUINTPTR]), - } - for _, v := range clo.Func.ClosureVars { + // collector so that it can walk the closure. We could use (in this + // case) [3]unsafe.Pointer instead, but that would leave the gc in + // the dark. The information appears in the binary in the form of + // type descriptors; the struct is unnamed and uses exported field + // names so that closures in multiple packages with the same struct + // type can share the descriptor. + + fields := make([]*types.Field, 1+len(clo.Func.ClosureVars)) + fields[0] = types.NewField(base.AutogeneratedPos, types.LocalPkg.Lookup("F"), types.Types[types.TUINTPTR]) + for i, v := range clo.Func.ClosureVars { typ := v.Type() if !v.Byval() { typ = types.NewPtr(typ) } - fields = append(fields, types.NewField(base.Pos, v.Sym(), typ)) + fields[1+i] = types.NewField(base.AutogeneratedPos, types.LocalPkg.LookupNum("X", i), typ) } typ := types.NewStruct(fields) typ.SetNoalg(true) diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go index 0016fb96062014..c5d9941cfd7d77 100644 --- a/src/cmd/compile/internal/types/fmt.go +++ b/src/cmd/compile/internal/types/fmt.go @@ -642,9 +642,6 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty name = fmt.Sprint(f.Nname) } else if verb == 'L' { name = s.Name - if name == ".F" { - name = "F" // Hack for toolstash -cmp. - } if !IsExported(name) && mode != fmtTypeIDName { name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg) } diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go index f7a8a8dfcd3c04..24f54c36cf37c2 100644 --- a/src/cmd/compile/internal/types2/call.go +++ b/src/cmd/compile/internal/types2/call.go @@ -569,6 +569,14 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T for i, arg := range args { // generic arguments cannot have a defined (*Named) type - no need for underlying type below if asig, _ := arg.typ.(*Signature); asig != nil && asig.TypeParams().Len() > 0 { + // The argument type is a generic function signature. This type is + // pointer-identical with (it's copied from) the type of the generic + // function argument and thus the function object. + // Before we change the type (type parameter renaming, below), make + // a clone of it as otherwise we implicitly modify the object's type + // (go.dev/issues/63260). + clone := *asig + asig = &clone // Rename type parameters for cases like f(g, g); this gives each // generic function argument a unique type identity (go.dev/issues/59956). // TODO(gri) Consider only doing this if a function argument appears diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go index 9f67ad09025116..3ac345729b9a51 100644 --- a/src/cmd/compile/internal/types2/issues_test.go +++ b/src/cmd/compile/internal/types2/issues_test.go @@ -920,3 +920,47 @@ func _() { var conf Config conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic } + +func TestIssue63260(t *testing.T) { + const src = ` +package p + +func _() { + use(f[*string]) +} + +func use(func()) {} + +func f[I *T, T any]() { + var v T + _ = v +}` + + info := Info{ + Defs: make(map[*syntax.Name]Object), + } + pkg := mustTypecheck(src, nil, &info) + + // get type parameter T in signature of f + T := pkg.Scope().Lookup("f").Type().(*Signature).TypeParams().At(1) + if T.Obj().Name() != "T" { + t.Fatalf("got type parameter %s, want T", T) + } + + // get type of variable v in body of f + var v Object + for name, obj := range info.Defs { + if name.Value == "v" { + v = obj + break + } + } + if v == nil { + t.Fatal("variable v not found") + } + + // type of v and T must be pointer-identical + if v.Type() != T { + t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T) + } +} diff --git a/src/cmd/internal/moddeps/moddeps_test.go b/src/cmd/internal/moddeps/moddeps_test.go index f28e0bc077afa2..ae890b66cb479c 100644 --- a/src/cmd/internal/moddeps/moddeps_test.go +++ b/src/cmd/internal/moddeps/moddeps_test.go @@ -33,8 +33,6 @@ import ( // See issues 36852, 41409, and 43687. // (Also see golang.org/issue/27348.) func TestAllDependencies(t *testing.T) { - t.Skip("TODO(#63427): 1.21.3 contains unreleased changes from vendored modules") - goBin := testenv.GoToolPath(t) // Ensure that all packages imported within GOROOT diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 21b2e9a9d4896b..0550f07d5c04a6 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -2599,15 +2599,22 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64 // Return whether we may need to split text sections. // -// On PPC64x whem external linking a text section should not be larger than 2^25 bytes -// due to the size of call target offset field in the bl instruction. Splitting into -// smaller text sections smaller than this limit allows the system linker to modify the long -// calls appropriately. The limit allows for the space needed for tables inserted by the -// linker. +// On PPC64x, when external linking, a text section should not be +// larger than 2^25 bytes due to the size of call target offset field +// in the 'bl' instruction. Splitting into smaller text sections +// smaller than this limit allows the system linker to modify the long +// calls appropriately. The limit allows for the space needed for +// tables inserted by the linker. // // The same applies to Darwin/ARM64, with 2^27 byte threshold. +// +// Similarly for ARM, we split sections (at 2^25 bytes) to avoid +// inconsistencies between the Go linker's reachability calculations +// (e.g. will direct call from X to Y need a trampoline) and similar +// machinery in the external linker; see #58425 for more on the +// history here. func splitTextSections(ctxt *Link) bool { - return (ctxt.IsPPC64() || (ctxt.IsARM64() && ctxt.IsDarwin())) && ctxt.IsExternal() + return (ctxt.IsARM() || ctxt.IsPPC64() || (ctxt.IsARM64() && ctxt.IsDarwin())) && ctxt.IsExternal() } // On Wasm, we reserve 4096 bytes for zero page, then 8192 bytes for wasm_exec.js diff --git a/src/cmd/link/internal/ld/ld_test.go b/src/cmd/link/internal/ld/ld_test.go index aef880d5341c21..a7a6082f54b922 100644 --- a/src/cmd/link/internal/ld/ld_test.go +++ b/src/cmd/link/internal/ld/ld_test.go @@ -136,7 +136,7 @@ func TestArchiveBuildInvokeWithExec(t *testing.T) { func TestLargeTextSectionSplitting(t *testing.T) { switch runtime.GOARCH { - case "ppc64", "ppc64le": + case "ppc64", "ppc64le", "arm": case "arm64": if runtime.GOOS == "darwin" { break diff --git a/src/go.mod b/src/go.mod index 25829e17f2dd9e..3b24053b94da17 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d - golang.org/x/net v0.12.1-0.20230712162946-57553cbff163 + golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c ) require ( diff --git a/src/go.sum b/src/go.sum index e474b8be318c84..caf8ff010daafd 100644 --- a/src/go.sum +++ b/src/go.sum @@ -1,7 +1,7 @@ golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d h1:LiA25/KWKuXfIq5pMIBq1s5hz3HQxhJJSu/SUGlD+SM= golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/net v0.12.1-0.20230712162946-57553cbff163 h1:1EDKNuaCsog7zGLEml1qRuO4gt23jORUQX2f0IKZ860= -golang.org/x/net v0.12.1-0.20230712162946-57553cbff163/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c h1:d+VvAxu4S13DWtf73R5eY//VaCk3aUcVdyYjM1SX7zw= +golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index bea398fa9566e8..592f2fd72ad47b 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -157,7 +157,7 @@ var depsRules = ` unicode, fmt !< net, os, os/signal; - os/signal, STR + os/signal, internal/safefilepath, STR < path/filepath < io/ioutil; diff --git a/src/go/types/call.go b/src/go/types/call.go index 8a3cec7309eae5..f00290a74f4ea7 100644 --- a/src/go/types/call.go +++ b/src/go/types/call.go @@ -571,6 +571,14 @@ func (check *Checker) arguments(call *ast.CallExpr, sig *Signature, targs []Type for i, arg := range args { // generic arguments cannot have a defined (*Named) type - no need for underlying type below if asig, _ := arg.typ.(*Signature); asig != nil && asig.TypeParams().Len() > 0 { + // The argument type is a generic function signature. This type is + // pointer-identical with (it's copied from) the type of the generic + // function argument and thus the function object. + // Before we change the type (type parameter renaming, below), make + // a clone of it as otherwise we implicitly modify the object's type + // (go.dev/issues/63260). + clone := *asig + asig = &clone // Rename type parameters for cases like f(g, g); this gives each // generic function argument a unique type identity (go.dev/issues/59956). // TODO(gri) Consider only doing this if a function argument appears diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go index 64e1c20d7eb1c3..4a559cbab31581 100644 --- a/src/go/types/issues_test.go +++ b/src/go/types/issues_test.go @@ -930,3 +930,47 @@ func _() { var conf Config conf.Check(f.Name.Name, fset, []*ast.File{f}, nil) // must not panic } + +func TestIssue63260(t *testing.T) { + const src = ` +package p + +func _() { + use(f[*string]) +} + +func use(func()) {} + +func f[I *T, T any]() { + var v T + _ = v +}` + + info := Info{ + Defs: make(map[*ast.Ident]Object), + } + pkg := mustTypecheck(src, nil, &info) + + // get type parameter T in signature of f + T := pkg.Scope().Lookup("f").Type().(*Signature).TypeParams().At(1) + if T.Obj().Name() != "T" { + t.Fatalf("got type parameter %s, want T", T) + } + + // get type of variable v in body of f + var v Object + for name, obj := range info.Defs { + if name.Name == "v" { + v = obj + break + } + } + if v == nil { + t.Fatal("variable v not found") + } + + // type of v and T must be pointer-identical + if v.Type() != T { + t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T) + } +} diff --git a/src/internal/poll/splice_linux.go b/src/internal/poll/splice_linux.go index 9505c5dcfc1eb1..72cca34fe4ae5e 100644 --- a/src/internal/poll/splice_linux.go +++ b/src/internal/poll/splice_linux.go @@ -13,6 +13,12 @@ import ( ) const ( + // spliceNonblock doesn't make the splice itself necessarily nonblocking + // (because the actual file descriptors that are spliced from/to may block + // unless they have the O_NONBLOCK flag set), but it makes the splice pipe + // operations nonblocking. + spliceNonblock = 0x2 + // maxSpliceSize is the maximum amount of data Splice asks // the kernel to move in a single call to splice(2). // We use 1MB as Splice writes data through a pipe, and 1MB is the default maximum pipe buffer size, @@ -89,7 +95,11 @@ func spliceDrain(pipefd int, sock *FD, max int) (int, error) { return 0, err } for { - n, err := splice(pipefd, sock.Sysfd, max, 0) + // In theory calling splice(2) with SPLICE_F_NONBLOCK could end up an infinite loop here, + // because it could return EAGAIN ceaselessly when the write end of the pipe is full, + // but this shouldn't be a concern here, since the pipe buffer must be sufficient for + // this data transmission on the basis of the workflow in Splice. + n, err := splice(pipefd, sock.Sysfd, max, spliceNonblock) if err == syscall.EINTR { continue } @@ -127,7 +137,14 @@ func splicePump(sock *FD, pipefd int, inPipe int) (int, error) { } written := 0 for inPipe > 0 { - n, err := splice(sock.Sysfd, pipefd, inPipe, 0) + // In theory calling splice(2) with SPLICE_F_NONBLOCK could end up an infinite loop here, + // because it could return EAGAIN ceaselessly when the read end of the pipe is empty, + // but this shouldn't be a concern here, since the pipe buffer must contain inPipe size of + // data on the basis of the workflow in Splice. + n, err := splice(sock.Sysfd, pipefd, inPipe, spliceNonblock) + if err == syscall.EINTR { + continue + } // Here, the condition n == 0 && err == nil should never be // observed, since Splice controls the write side of the pipe. if n > 0 { diff --git a/src/internal/safefilepath/path_windows.go b/src/internal/safefilepath/path_windows.go index 909c150edc87a2..7cfd6ce2eac262 100644 --- a/src/internal/safefilepath/path_windows.go +++ b/src/internal/safefilepath/path_windows.go @@ -20,15 +20,10 @@ func fromFS(path string) (string, error) { for p := path; p != ""; { // Find the next path element. i := 0 - dot := -1 for i < len(p) && p[i] != '/' { switch p[i] { case 0, '\\', ':': return "", errInvalidPath - case '.': - if dot < 0 { - dot = i - } } i++ } @@ -39,22 +34,8 @@ func fromFS(path string) (string, error) { } else { p = "" } - // Trim the extension and look for a reserved name. - base := part - if dot >= 0 { - base = part[:dot] - } - if isReservedName(base) { - if dot < 0 { - return "", errInvalidPath - } - // The path element is a reserved name with an extension. - // Some Windows versions consider this a reserved name, - // while others do not. Use FullPath to see if the name is - // reserved. - if p, _ := syscall.FullPath(part); len(p) >= 4 && p[:4] == `\\.\` { - return "", errInvalidPath - } + if IsReservedName(part) { + return "", errInvalidPath } } if containsSlash { @@ -70,23 +51,88 @@ func fromFS(path string) (string, error) { return path, nil } -// isReservedName reports if name is a Windows reserved device name. +// IsReservedName reports if name is a Windows reserved device name. // It does not detect names with an extension, which are also reserved on some Windows versions. // // For details, search for PRN in // https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file. -func isReservedName(name string) bool { - if 3 <= len(name) && len(name) <= 4 { +func IsReservedName(name string) bool { + // Device names can have arbitrary trailing characters following a dot or colon. + base := name + for i := 0; i < len(base); i++ { + switch base[i] { + case ':', '.': + base = base[:i] + } + } + // Trailing spaces in the last path element are ignored. + for len(base) > 0 && base[len(base)-1] == ' ' { + base = base[:len(base)-1] + } + if !isReservedBaseName(base) { + return false + } + if len(base) == len(name) { + return true + } + // The path element is a reserved name with an extension. + // Some Windows versions consider this a reserved name, + // while others do not. Use FullPath to see if the name is + // reserved. + if p, _ := syscall.FullPath(name); len(p) >= 4 && p[:4] == `\\.\` { + return true + } + return false +} + +func isReservedBaseName(name string) bool { + if len(name) == 3 { switch string([]byte{toUpper(name[0]), toUpper(name[1]), toUpper(name[2])}) { case "CON", "PRN", "AUX", "NUL": - return len(name) == 3 + return true + } + } + if len(name) >= 4 { + switch string([]byte{toUpper(name[0]), toUpper(name[1]), toUpper(name[2])}) { case "COM", "LPT": - return len(name) == 4 && '1' <= name[3] && name[3] <= '9' + if len(name) == 4 && '1' <= name[3] && name[3] <= '9' { + return true + } + // Superscript ¹, ², and ³ are considered numbers as well. + switch name[3:] { + case "\u00b2", "\u00b3", "\u00b9": + return true + } + return false } } + + // Passing CONIN$ or CONOUT$ to CreateFile opens a console handle. + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#consoles + // + // While CONIN$ and CONOUT$ aren't documented as being files, + // they behave the same as CON. For example, ./CONIN$ also opens the console input. + if len(name) == 6 && name[5] == '$' && equalFold(name, "CONIN$") { + return true + } + if len(name) == 7 && name[6] == '$' && equalFold(name, "CONOUT$") { + return true + } return false } +func equalFold(a, b string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if toUpper(a[i]) != toUpper(b[i]) { + return false + } + } + return true +} + func toUpper(c byte) byte { if 'a' <= c && c <= 'z' { return c - ('a' - 'A') diff --git a/src/internal/syscall/windows/reparse_windows.go b/src/internal/syscall/windows/reparse_windows.go index 6e111392f09169..6caf47e8676c9c 100644 --- a/src/internal/syscall/windows/reparse_windows.go +++ b/src/internal/syscall/windows/reparse_windows.go @@ -12,6 +12,7 @@ import ( const ( FSCTL_SET_REPARSE_POINT = 0x000900A4 IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_DEDUP = 0x80000013 SYMLINK_FLAG_RELATIVE = 1 ) diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index 9cd6a3490fcd10..dd59e1f4f2ef43 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -7012,6 +7012,7 @@ func (sc *http2serverConn) startPush(msg *http2startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/src/os/types_windows.go b/src/os/types_windows.go index 9a3d508783940a..effb0148dfd5ed 100644 --- a/src/os/types_windows.go +++ b/src/os/types_windows.go @@ -141,7 +141,23 @@ func (fs *fileStat) Mode() (m FileMode) { m |= ModeDevice | ModeCharDevice } if fs.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 && m&ModeType == 0 { - m |= ModeIrregular + if fs.ReparseTag == windows.IO_REPARSE_TAG_DEDUP { + // If the Data Deduplication service is enabled on Windows Server, its + // Optimization job may convert regular files to IO_REPARSE_TAG_DEDUP + // whenever that job runs. + // + // However, DEDUP reparse points remain similar in most respects to + // regular files: they continue to support random-access reads and writes + // of persistent data, and they shouldn't add unexpected latency or + // unavailability in the way that a network filesystem might. + // + // Go programs may use ModeIrregular to filter out unusual files (such as + // raw device files on Linux, POSIX FIFO special files, and so on), so + // to avoid files changing unpredictably from regular to irregular we will + // consider DEDUP files to be close enough to regular to treat as such. + } else { + m |= ModeIrregular + } } return m } diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go index ca1d8b31164538..3bf3ff6b89887e 100644 --- a/src/path/filepath/path.go +++ b/src/path/filepath/path.go @@ -15,7 +15,6 @@ import ( "errors" "io/fs" "os" - "runtime" "sort" "strings" ) @@ -167,21 +166,7 @@ func Clean(path string) string { out.append('.') } - if runtime.GOOS == "windows" && out.volLen == 0 && out.buf != nil { - // If a ':' appears in the path element at the start of a Windows path, - // insert a .\ at the beginning to avoid converting relative paths - // like a/../c: into c:. - for _, c := range out.buf { - if os.IsPathSeparator(c) { - break - } - if c == ':' { - out.prepend('.', Separator) - break - } - } - } - + postClean(&out) // avoid creating absolute paths on Windows return FromSlash(out.string()) } diff --git a/src/path/filepath/path_nonwindows.go b/src/path/filepath/path_nonwindows.go new file mode 100644 index 00000000000000..db69f0228b0d3b --- /dev/null +++ b/src/path/filepath/path_nonwindows.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package filepath + +func postClean(out *lazybuf) {} diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go index 621208d31e848f..a6466183b4b964 100644 --- a/src/path/filepath/path_test.go +++ b/src/path/filepath/path_test.go @@ -116,6 +116,9 @@ var wincleantests = []PathTest{ {`a/../c:/a`, `.\c:\a`}, {`a/../../c:`, `..\c:`}, {`foo:bar`, `foo:bar`}, + + // Don't allow cleaning to create a Root Local Device path like \??\a. + {`/a/../??/a`, `\.\??\a`}, } func TestClean(t *testing.T) { @@ -177,8 +180,28 @@ var islocaltests = []IsLocalTest{ var winislocaltests = []IsLocalTest{ {"NUL", false}, {"nul", false}, + {"nul ", false}, {"nul.", false}, + {"a/nul:", false}, + {"a/nul : a", false}, + {"com0", true}, {"com1", false}, + {"com2", false}, + {"com3", false}, + {"com4", false}, + {"com5", false}, + {"com6", false}, + {"com7", false}, + {"com8", false}, + {"com9", false}, + {"com¹", false}, + {"com²", false}, + {"com³", false}, + {"com¹ : a", false}, + {"cOm1", false}, + {"lpt1", false}, + {"LPT1", false}, + {"lpt³", false}, {"./nul", false}, {`\`, false}, {`\a`, false}, @@ -384,6 +407,7 @@ var winjointests = []JoinTest{ {[]string{`\\a\`, `b`, `c`}, `\\a\b\c`}, {[]string{`//`, `a`}, `\\a`}, {[]string{`a:\b\c`, `x\..\y:\..\..\z`}, `a:\b\z`}, + {[]string{`\`, `??\a`}, `\.\??\a`}, } func TestJoin(t *testing.T) { @@ -1060,6 +1084,8 @@ var winisabstests = []IsAbsTest{ {`\\host\share\`, true}, {`\\host\share\foo`, true}, {`//host/share/foo/bar`, true}, + {`\\?\a\b\c`, true}, + {`\??\a\b\c`, true}, } func TestIsAbs(t *testing.T) { @@ -1560,7 +1586,8 @@ type VolumeNameTest struct { var volumenametests = []VolumeNameTest{ {`c:/foo/bar`, `c:`}, {`c:`, `c:`}, - {`2:`, ``}, + {`c:\`, `c:`}, + {`2:`, `2:`}, {``, ``}, {`\\\host`, `\\\host`}, {`\\\host\`, `\\\host`}, @@ -1580,12 +1607,23 @@ var volumenametests = []VolumeNameTest{ {`//host/share//foo///bar////baz`, `\\host\share`}, {`\\host\share\foo\..\bar`, `\\host\share`}, {`//host/share/foo/../bar`, `\\host\share`}, + {`//.`, `\\.`}, + {`//./`, `\\.\`}, {`//./NUL`, `\\.\NUL`}, - {`//?/NUL`, `\\?\NUL`}, + {`//?/`, `\\?`}, + {`//./a/b`, `\\.\a`}, + {`//?/`, `\\?`}, + {`//?/`, `\\?`}, {`//./C:`, `\\.\C:`}, + {`//./C:/`, `\\.\C:`}, {`//./C:/a/b/c`, `\\.\C:`}, {`//./UNC/host/share/a/b/c`, `\\.\UNC\host\share`}, {`//./UNC/host`, `\\.\UNC\host`}, + {`//./UNC/host\`, `\\.\UNC\host\`}, + {`//./UNC`, `\\.\UNC`}, + {`//./UNC/`, `\\.\UNC\`}, + {`\\?\x`, `\\?`}, + {`\??\x`, `\??`}, } func TestVolumeName(t *testing.T) { @@ -1855,3 +1893,28 @@ func TestIssue51617(t *testing.T) { t.Errorf("got directories %v, want %v", saw, want) } } + +func TestEscaping(t *testing.T) { + dir1 := t.TempDir() + dir2 := t.TempDir() + chdir(t, dir1) + + for _, p := range []string{ + filepath.Join(dir2, "x"), + } { + if !filepath.IsLocal(p) { + continue + } + f, err := os.Create(p) + if err != nil { + f.Close() + } + ents, err := os.ReadDir(dir2) + if err != nil { + t.Fatal(err) + } + for _, e := range ents { + t.Fatalf("found: %v", e.Name()) + } + } +} diff --git a/src/path/filepath/path_windows.go b/src/path/filepath/path_windows.go index 4dca9e0f55ff40..c490424f20c968 100644 --- a/src/path/filepath/path_windows.go +++ b/src/path/filepath/path_windows.go @@ -5,6 +5,8 @@ package filepath import ( + "internal/safefilepath" + "os" "strings" "syscall" ) @@ -20,34 +22,6 @@ func toUpper(c byte) byte { return c } -// isReservedName reports if name is a Windows reserved device name or a console handle. -// It does not detect names with an extension, which are also reserved on some Windows versions. -// -// For details, search for PRN in -// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file. -func isReservedName(name string) bool { - if 3 <= len(name) && len(name) <= 4 { - switch string([]byte{toUpper(name[0]), toUpper(name[1]), toUpper(name[2])}) { - case "CON", "PRN", "AUX", "NUL": - return len(name) == 3 - case "COM", "LPT": - return len(name) == 4 && '1' <= name[3] && name[3] <= '9' - } - } - // Passing CONIN$ or CONOUT$ to CreateFile opens a console handle. - // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#consoles - // - // While CONIN$ and CONOUT$ aren't documented as being files, - // they behave the same as CON. For example, ./CONIN$ also opens the console input. - if len(name) == 6 && name[5] == '$' && strings.EqualFold(name, "CONIN$") { - return true - } - if len(name) == 7 && name[6] == '$' && strings.EqualFold(name, "CONOUT$") { - return true - } - return false -} - func isLocal(path string) bool { if path == "" { return false @@ -68,25 +42,8 @@ func isLocal(path string) bool { if part == "." || part == ".." { hasDots = true } - // Trim the extension and look for a reserved name. - base, _, hasExt := strings.Cut(part, ".") - if isReservedName(base) { - if !hasExt { - return false - } - // The path element is a reserved name with an extension. Some Windows - // versions consider this a reserved name, while others do not. Use - // FullPath to see if the name is reserved. - // - // FullPath will convert references to reserved device names to their - // canonical form: \\.\${DEVICE_NAME} - // - // FullPath does not perform this conversion for paths which contain - // a reserved device name anywhere other than in the last element, - // so check the part rather than the full path. - if p, _ := syscall.FullPath(part); len(p) >= 4 && p[:4] == `\\.\` { - return false - } + if safefilepath.IsReservedName(part) { + return false } } if hasDots { @@ -118,40 +75,99 @@ func IsAbs(path string) (b bool) { // volumeNameLen returns length of the leading volume name on Windows. // It returns 0 elsewhere. // -// See: https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats +// See: +// https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats +// https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html func volumeNameLen(path string) int { - if len(path) < 2 { - return 0 - } - // with drive letter - c := path[0] - if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + switch { + case len(path) >= 2 && path[1] == ':': + // Path starts with a drive letter. + // + // Not all Windows functions necessarily enforce the requirement that + // drive letters be in the set A-Z, and we don't try to here. + // + // We don't handle the case of a path starting with a non-ASCII character, + // in which case the "drive letter" might be multiple bytes long. return 2 - } - // UNC and DOS device paths start with two slashes. - if !isSlash(path[0]) || !isSlash(path[1]) { + + case len(path) == 0 || !isSlash(path[0]): + // Path does not have a volume component. return 0 + + case pathHasPrefixFold(path, `\\.\UNC`): + // We're going to treat the UNC host and share as part of the volume + // prefix for historical reasons, but this isn't really principled; + // Windows's own GetFullPathName will happily remove the first + // component of the path in this space, converting + // \\.\unc\a\b\..\c into \\.\unc\a\c. + return uncLen(path, len(`\\.\UNC\`)) + + case pathHasPrefixFold(path, `\\.`): + // Path starts with \\., and is a Local Device path. + // + // We currently treat the next component after the \\.\ prefix + // as part of the volume name, although there doesn't seem to be + // a principled reason to do this. + if len(path) == 3 { + return 3 // exactly \\. + } + _, rest, ok := cutPath(path[4:]) + if !ok { + return len(path) + } + return len(path) - len(rest) - 1 + + case pathHasPrefixFold(path, `\\?`) || pathHasPrefixFold(path, `\??`): + // Path starts with \\?\ or \??\, and is a Root Local Device path. + // + // While Windows usually treats / and \ as equivalent, + // /??/ does not seem to be recognized as a Root Local Device path. + // We treat it as one anyway here to be safe. + return 3 + + case len(path) >= 2 && isSlash(path[1]): + // Path starts with \\, and is a UNC path. + return uncLen(path, 2) } - rest := path[2:] - p1, rest, _ := cutPath(rest) - p2, rest, ok := cutPath(rest) - if !ok { - return len(path) + return 0 +} + +// pathHasPrefixFold tests whether the path s begins with prefix, +// ignoring case and treating all path separators as equivalent. +// If s is longer than prefix, then s[len(prefix)] must be a path separator. +func pathHasPrefixFold(s, prefix string) bool { + if len(s) < len(prefix) { + return false } - if p1 != "." && p1 != "?" { - // This is a UNC path: \\${HOST}\${SHARE}\ - return len(path) - len(rest) - 1 + for i := 0; i < len(prefix); i++ { + if isSlash(prefix[i]) { + if !isSlash(s[i]) { + return false + } + } else if toUpper(prefix[i]) != toUpper(s[i]) { + return false + } } - // This is a DOS device path. - if len(p2) == 3 && toUpper(p2[0]) == 'U' && toUpper(p2[1]) == 'N' && toUpper(p2[2]) == 'C' { - // This is a DOS device path that links to a UNC: \\.\UNC\${HOST}\${SHARE}\ - _, rest, _ = cutPath(rest) // host - _, rest, ok = cutPath(rest) // share - if !ok { - return len(path) + if len(s) > len(prefix) && !isSlash(s[len(prefix)]) { + return false + } + return true +} + +// uncLen returns the length of the volume prefix of a UNC path. +// prefixLen is the prefix prior to the start of the UNC host; +// for example, for "//host/share", the prefixLen is len("//")==2. +func uncLen(path string, prefixLen int) int { + count := 0 + for i := prefixLen; i < len(path); i++ { + if isSlash(path[i]) { + count++ + if count == 2 { + return i + } } } - return len(path) - len(rest) - 1 + return len(path) } // cutPath slices path around the first path separator. @@ -238,6 +254,12 @@ func join(elem []string) string { for len(e) > 0 && isSlash(e[0]) { e = e[1:] } + // If the path is \ and the next path element is ??, + // add an extra .\ to create \.\?? rather than \??\ + // (a Root Local Device path). + if b.Len() == 1 && pathHasPrefixFold(e, "??") { + b.WriteString(`.\`) + } case lastChar == ':': // If the path ends in a colon, keep the path relative to the current directory // on a drive and don't add a separator. Preserve leading slashes in the next @@ -304,3 +326,29 @@ func isUNC(path string) bool { func sameWord(a, b string) bool { return strings.EqualFold(a, b) } + +// postClean adjusts the results of Clean to avoid turning a relative path +// into an absolute or rooted one. +func postClean(out *lazybuf) { + if out.volLen != 0 || out.buf == nil { + return + } + // If a ':' appears in the path element at the start of a path, + // insert a .\ at the beginning to avoid converting relative paths + // like a/../c: into c:. + for _, c := range out.buf { + if os.IsPathSeparator(c) { + break + } + if c == ':' { + out.prepend('.', Separator) + return + } + } + // If a path begins with \??\, insert a \. at the beginning + // to avoid converting paths like \a\..\??\c:\x into \??\c:\x + // (equivalent to c:\x). + if len(out.buf) >= 3 && os.IsPathSeparator(out.buf[0]) && out.buf[1] == '?' && out.buf[2] == '?' { + out.prepend(Separator, '.') + } +} diff --git a/src/runtime/cgo/asm_386.s b/src/runtime/cgo/asm_386.s index 086e20b02f50c3..f9a662aa88f485 100644 --- a/src/runtime/cgo/asm_386.s +++ b/src/runtime/cgo/asm_386.s @@ -6,12 +6,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVL _crosscall2_ptr(SB), AX - MOVL $crosscall2(SB), BX + MOVL $crosscall2_trampoline<>(SB), BX MOVL BX, (AX) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_amd64.s b/src/runtime/cgo/asm_amd64.s index f254622f231f50..e319094a456ba4 100644 --- a/src/runtime/cgo/asm_amd64.s +++ b/src/runtime/cgo/asm_amd64.s @@ -7,12 +7,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVQ _crosscall2_ptr(SB), AX - MOVQ $crosscall2(SB), BX + MOVQ $crosscall2_trampoline<>(SB), BX MOVQ BX, (AX) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_arm.s b/src/runtime/cgo/asm_arm.s index f7f99772a6ce88..095e9c06c9bf0a 100644 --- a/src/runtime/cgo/asm_arm.s +++ b/src/runtime/cgo/asm_arm.s @@ -6,12 +6,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVW _crosscall2_ptr(SB), R1 - MOVW $crosscall2(SB), R2 + MOVW $crosscall2_trampoline<>(SB), R2 MOVW R2, (R1) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_arm64.s b/src/runtime/cgo/asm_arm64.s index ce8909b49273e5..5492dc142c842a 100644 --- a/src/runtime/cgo/asm_arm64.s +++ b/src/runtime/cgo/asm_arm64.s @@ -7,12 +7,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVD _crosscall2_ptr(SB), R1 - MOVD $crosscall2(SB), R2 + MOVD $crosscall2_trampoline<>(SB), R2 MOVD R2, (R1) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_loong64.s b/src/runtime/cgo/asm_loong64.s index 3b514ffc4a1953..19c8d743341a0b 100644 --- a/src/runtime/cgo/asm_loong64.s +++ b/src/runtime/cgo/asm_loong64.s @@ -7,12 +7,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVV _crosscall2_ptr(SB), R5 - MOVV $crosscall2(SB), R6 + MOVV $crosscall2_trampoline<>(SB), R6 MOVV R6, (R5) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_mips64x.s b/src/runtime/cgo/asm_mips64x.s index 0a8fbbbef05d11..af817d5ea6ed3b 100644 --- a/src/runtime/cgo/asm_mips64x.s +++ b/src/runtime/cgo/asm_mips64x.s @@ -8,12 +8,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVV _crosscall2_ptr(SB), R5 - MOVV $crosscall2(SB), R6 + MOVV $crosscall2_trampoline<>(SB), R6 MOVV R6, (R5) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_mipsx.s b/src/runtime/cgo/asm_mipsx.s index a57ae97d7e96a1..198c59a33e7a0c 100644 --- a/src/runtime/cgo/asm_mipsx.s +++ b/src/runtime/cgo/asm_mipsx.s @@ -8,12 +8,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVW _crosscall2_ptr(SB), R5 - MOVW $crosscall2(SB), R6 + MOVW $crosscall2_trampoline<>(SB), R6 MOVW R6, (R5) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_ppc64x.s b/src/runtime/cgo/asm_ppc64x.s index c258c7c2a080fd..a3897459f5047e 100644 --- a/src/runtime/cgo/asm_ppc64x.s +++ b/src/runtime/cgo/asm_ppc64x.s @@ -16,7 +16,9 @@ DEFINE_PPC64X_FUNCDESC(_crosscall2<>, crosscall2) #define CROSSCALL2_FPTR $_crosscall2<>(SB) #else -#define CROSSCALL2_FPTR $crosscall2(SB) +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. +#define CROSSCALL2_FPTR $crosscall2_trampoline<>(SB) #endif // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. @@ -27,6 +29,9 @@ TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVD R6, (R5) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_riscv64.s b/src/runtime/cgo/asm_riscv64.s index 08c4ed846671da..d75a5438429e9d 100644 --- a/src/runtime/cgo/asm_riscv64.s +++ b/src/runtime/cgo/asm_riscv64.s @@ -6,12 +6,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOV _crosscall2_ptr(SB), X7 - MOV $crosscall2(SB), X8 + MOV $crosscall2_trampoline<>(SB), X8 MOV X8, (X7) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/cgo/asm_s390x.s b/src/runtime/cgo/asm_s390x.s index bb0dfc1e313d9a..8f74fd5fe7810e 100644 --- a/src/runtime/cgo/asm_s390x.s +++ b/src/runtime/cgo/asm_s390x.s @@ -6,12 +6,17 @@ // Set the x_crosscall2_ptr C function pointer variable point to crosscall2. // It's such a pointer chain: _crosscall2_ptr -> x_crosscall2_ptr -> crosscall2 +// Use a local trampoline, to avoid taking the address of a dynamically exported +// function. TEXT ·set_crosscall2(SB),NOSPLIT,$0-0 MOVD _crosscall2_ptr(SB), R1 - MOVD $crosscall2(SB), R2 + MOVD $crosscall2_trampoline<>(SB), R2 MOVD R2, (R1) RET +TEXT crosscall2_trampoline<>(SB),NOSPLIT,$0-0 + JMP crosscall2(SB) + // Called by C code generated by cmd/cgo. // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls cgocallback with three arguments. diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go index 4c6d6be4f04b75..659ca8df2ee9bf 100644 --- a/src/runtime/mgcscavenge.go +++ b/src/runtime/mgcscavenge.go @@ -1145,21 +1145,11 @@ func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) { // Mark that we're considering this chunk as backed by huge pages. sc.setHugePage() - // Collapse dense chunks into huge pages and mark that - // we did that, but only if we're not allocating to - // use the entire chunk. If we're allocating an entire chunk, - // this is likely part of a much bigger allocation. For - // instance, if the caller is allocating a 1 GiB slice of bytes, we - // don't want to go and manually collapse all those pages; we want - // them to be demand-paged. If the caller is actually going to use - // all that memory, it'll naturally get backed by huge pages later. - // - // This also avoids having sysHugePageCollapse fail. On Linux, - // the call requires that some part of the huge page being collapsed - // is already paged in. - if !s.test && npages < pallocChunkPages { - sysHugePageCollapse(unsafe.Pointer(chunkBase(ci)), pallocChunkBytes) - } + // TODO(mknyszek): Consider eagerly backing memory with huge pages + // here. In the past we've attempted to use sysHugePageCollapse + // (which uses MADV_COLLAPSE on Linux, and is unsupported elswhere) + // for this purpose, but that caused performance issues in production + // environments. } s.chunks[ci].store(sc) } diff --git a/src/syscall/rlimit_test.go b/src/syscall/rlimit_test.go index e48f45e3aacb75..764694fe2dcd96 100644 --- a/src/syscall/rlimit_test.go +++ b/src/syscall/rlimit_test.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build unix + package syscall_test import ( diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index 2b5f965f8f890b..4de656b0e81f82 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -7,7 +7,7 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -# golang.org/x/net v0.12.1-0.20230712162946-57553cbff163 +# golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c ## explicit; go 1.17 golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts diff --git a/test/fixedbugs/issue62498.dir/a.go b/test/fixedbugs/issue62498.dir/a.go new file mode 100644 index 00000000000000..68f97475ab62c5 --- /dev/null +++ b/test/fixedbugs/issue62498.dir/a.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +func One(L any) { + func() { + defer F(L) + }() +} + +func F(any) {} diff --git a/test/fixedbugs/issue62498.dir/main.go b/test/fixedbugs/issue62498.dir/main.go new file mode 100644 index 00000000000000..e55a24fb3a081e --- /dev/null +++ b/test/fixedbugs/issue62498.dir/main.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "./a" + +func main() { + a.One(nil) + Two(nil) +} + +func Two(L any) { + func() { + defer a.F(L) + }() +} diff --git a/test/fixedbugs/issue62498.go b/test/fixedbugs/issue62498.go new file mode 100644 index 00000000000000..8fe8d8783fbc8e --- /dev/null +++ b/test/fixedbugs/issue62498.go @@ -0,0 +1,7 @@ +// rundir + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ignored diff --git a/test/fixedbugs/issue63657.go b/test/fixedbugs/issue63657.go new file mode 100644 index 00000000000000..e32a4a34fbb63a --- /dev/null +++ b/test/fixedbugs/issue63657.go @@ -0,0 +1,48 @@ +// run + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure address calculations don't float up before +// the corresponding nil check. + +package main + +type T struct { + a, b int +} + +//go:noinline +func f(x *T, p *bool, n int) { + *p = n != 0 + useStack(1000) + g(&x.b) +} + +//go:noinline +func g(p *int) { +} + +func useStack(n int) { + if n == 0 { + return + } + useStack(n - 1) +} + +func main() { + mustPanic(func() { + var b bool + f(nil, &b, 3) + }) +} + +func mustPanic(f func()) { + defer func() { + if recover() == nil { + panic("expected panic, got nil") + } + }() + f() +}