commit 17119d9298e132aa309f1cc95e439f29f61214b0 Author: Jeff Carr Date: Wed Mar 19 04:40:49 2025 -0500 day1 diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..97d7a42 --- /dev/null +++ b/Makefile @@ -0,0 +1,32 @@ +# git remote add github git@github.com:wit-go/log.git + +all: vet + @#GO111MODULE=off go vet -x + @echo this go library builds ok + +vet: + @GO111MODULE=off go vet + +redomod: + rm -f go.* + GO111MODULE= go mod init + GO111MODULE= go mod tidy + +goimport: + goimports -w *.go + +github: + git push origin master + git push origin devel + git push origin --tags + git push github master + git push github devel + git push github --tags + @echo + @echo check https://github.com/wit-go/log + @echo + +init-github: + git push -u github master + git push -u github devel + git push github --tags diff --git a/abi/abi.go b/abi/abi.go new file mode 100644 index 0000000..e1c8adc --- /dev/null +++ b/abi/abi.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import ( + "internal/goarch" + "unsafe" +) + +// RegArgs is a struct that has space for each argument +// and return value register on the current architecture. +// +// Assembly code knows the layout of the first two fields +// of RegArgs. +// +// RegArgs also contains additional space to hold pointers +// when it may not be safe to keep them only in the integer +// register space otherwise. +type RegArgs struct { + // Values in these slots should be precisely the bit-by-bit + // representation of how they would appear in a register. + // + // This means that on big endian arches, integer values should + // be in the top bits of the slot. Floats are usually just + // directly represented, but some architectures treat narrow + // width floating point values specially (e.g. they're promoted + // first, or they need to be NaN-boxed). + Ints [IntArgRegs]uintptr // untyped integer registers + Floats [FloatArgRegs]uint64 // untyped float registers + + // Fields above this point are known to assembly. + + // Ptrs is a space that duplicates Ints but with pointer type, + // used to make pointers passed or returned in registers + // visible to the GC by making the type unsafe.Pointer. + Ptrs [IntArgRegs]unsafe.Pointer + + // ReturnIsPtr is a bitmap that indicates which registers + // contain or will contain pointers on the return path from + // a reflectcall. The i'th bit indicates whether the i'th + // register contains or will contain a valid Go pointer. + ReturnIsPtr IntArgRegBitmap +} + +func (r *RegArgs) Dump() { + print("Ints:") + for _, x := range r.Ints { + print(" ", x) + } + println() + print("Floats:") + for _, x := range r.Floats { + print(" ", x) + } + println() + print("Ptrs:") + for _, x := range r.Ptrs { + print(" ", x) + } + println() +} + +// IntRegArgAddr returns a pointer inside of r.Ints[reg] that is appropriately +// offset for an argument of size argSize. +// +// argSize must be non-zero, fit in a register, and a power-of-two. +// +// This method is a helper for dealing with the endianness of different CPU +// architectures, since sub-word-sized arguments in big endian architectures +// need to be "aligned" to the upper edge of the register to be interpreted +// by the CPU correctly. +func (r *RegArgs) IntRegArgAddr(reg int, argSize uintptr) unsafe.Pointer { + if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 { + panic("invalid argSize") + } + offset := uintptr(0) + if goarch.BigEndian { + offset = goarch.PtrSize - argSize + } + return unsafe.Pointer(uintptr(unsafe.Pointer(&r.Ints[reg])) + offset) +} + +// IntArgRegBitmap is a bitmap large enough to hold one bit per +// integer argument/return register. +type IntArgRegBitmap [(IntArgRegs + 7) / 8]uint8 + +// Set sets the i'th bit of the bitmap to 1. +func (b *IntArgRegBitmap) Set(i int) { + b[i/8] |= uint8(1) << (i % 8) +} + +// Get returns whether the i'th bit of the bitmap is set. +// +// nosplit because it's called in extremely sensitive contexts, like +// on the reflectcall return path. +// +//go:nosplit +func (b *IntArgRegBitmap) Get(i int) bool { + return b[i/8]&(uint8(1)<<(i%8)) != 0 +} diff --git a/abi/abi_amd64.go b/abi/abi_amd64.go new file mode 100644 index 0000000..d3c5678 --- /dev/null +++ b/abi/abi_amd64.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +const ( + // See abi_generic.go. + + // RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11. + IntArgRegs = 9 + + // X0 -> X14. + FloatArgRegs = 15 + + // We use SSE2 registers which support 64-bit float operations. + EffectiveFloatRegSize = 8 +) diff --git a/abi/abi_arm64.go b/abi/abi_arm64.go new file mode 100644 index 0000000..4dc5143 --- /dev/null +++ b/abi/abi_arm64.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +const ( + // See abi_generic.go. + + // R0 - R15. + IntArgRegs = 16 + + // F0 - F15. + FloatArgRegs = 16 + + EffectiveFloatRegSize = 8 +) diff --git a/abi/abi_generic.go b/abi/abi_generic.go new file mode 100644 index 0000000..a08d320 --- /dev/null +++ b/abi/abi_generic.go @@ -0,0 +1,38 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.regabiargs && !amd64 && !arm64 && !loong64 && !ppc64 && !ppc64le && !riscv64 + +package abi + +const ( + // ABI-related constants. + // + // In the generic case, these are all zero + // which lets them gracefully degrade to ABI0. + + // IntArgRegs is the number of registers dedicated + // to passing integer argument values. Result registers are identical + // to argument registers, so this number is used for those too. + IntArgRegs = 0 + + // FloatArgRegs is the number of registers dedicated + // to passing floating-point argument values. Result registers are + // identical to argument registers, so this number is used for + // those too. + FloatArgRegs = 0 + + // EffectiveFloatRegSize describes the width of floating point + // registers on the current platform from the ABI's perspective. + // + // Since Go only supports 32-bit and 64-bit floating point primitives, + // this number should be either 0, 4, or 8. 0 indicates no floating + // point registers for the ABI or that floating point values will be + // passed via the softfloat ABI. + // + // For platforms that support larger floating point register widths, + // such as x87's 80-bit "registers" (not that we support x87 currently), + // use 8. + EffectiveFloatRegSize = 0 +) diff --git a/abi/abi_loong64.go b/abi/abi_loong64.go new file mode 100644 index 0000000..10ad898 --- /dev/null +++ b/abi/abi_loong64.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +const ( + // See abi_generic.go. + + // R4 - R19 + IntArgRegs = 16 + + // F0 - F15 + FloatArgRegs = 16 + + EffectiveFloatRegSize = 8 +) diff --git a/abi/abi_ppc64x.go b/abi/abi_ppc64x.go new file mode 100644 index 0000000..73416d7 --- /dev/null +++ b/abi/abi_ppc64x.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package abi + +const ( + // See abi_generic.go. + + // R3 - R10, R14 - R17. + IntArgRegs = 12 + + // F1 - F12. + FloatArgRegs = 12 + + EffectiveFloatRegSize = 8 +) diff --git a/abi/abi_riscv64.go b/abi/abi_riscv64.go new file mode 100644 index 0000000..2bcd9d6 --- /dev/null +++ b/abi/abi_riscv64.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +const ( + // See abi_generic.go. + + // X8 - X23 + IntArgRegs = 16 + + // F8 - F23. + FloatArgRegs = 16 + + EffectiveFloatRegSize = 8 +) diff --git a/abi/abi_test.s b/abi/abi_test.s new file mode 100644 index 0000000..93ace3e --- /dev/null +++ b/abi/abi_test.s @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +#ifdef GOARCH_386 +#define PTRSIZE 4 +#endif +#ifdef GOARCH_arm +#define PTRSIZE 4 +#endif +#ifdef GOARCH_mips +#define PTRSIZE 4 +#endif +#ifdef GOARCH_mipsle +#define PTRSIZE 4 +#endif +#ifndef PTRSIZE +#define PTRSIZE 8 +#endif + +TEXT internal∕abi·FuncPCTestFn(SB),NOSPLIT,$0-0 + RET + +GLOBL internal∕abi·FuncPCTestFnAddr(SB), NOPTR, $PTRSIZE +DATA internal∕abi·FuncPCTestFnAddr(SB)/PTRSIZE, $internal∕abi·FuncPCTestFn(SB) diff --git a/abi/compiletype.go b/abi/compiletype.go new file mode 100644 index 0000000..f00a69c --- /dev/null +++ b/abi/compiletype.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +// These functions are the build-time version of the Go type data structures. + +// Their contents must be kept in sync with their definitions. +// Because the host and target type sizes can differ, the compiler and +// linker cannot use the host information that they might get from +// either unsafe.Sizeof and Alignof, nor runtime, reflect, or reflectlite. + +// CommonSize returns sizeof(Type) for a compilation target with a given ptrSize +func CommonSize(ptrSize int) int { return 4*ptrSize + 8 + 8 } + +// StructFieldSize returns sizeof(StructField) for a compilation target with a given ptrSize +func StructFieldSize(ptrSize int) int { return 3 * ptrSize } + +// UncommonSize returns sizeof(UncommonType). This currently does not depend on ptrSize. +// This exported function is in an internal package, so it may change to depend on ptrSize in the future. +func UncommonSize() uint64 { return 4 + 2 + 2 + 4 + 4 } + +// TFlagOff returns the offset of Type.TFlag for a compilation target with a given ptrSize +func TFlagOff(ptrSize int) int { return 2*ptrSize + 4 } + +// ITabTypeOff returns the offset of ITab.Type for a compilation target with a given ptrSize +func ITabTypeOff(ptrSize int) int { return ptrSize } diff --git a/abi/escape.go b/abi/escape.go new file mode 100644 index 0000000..8cdae14 --- /dev/null +++ b/abi/escape.go @@ -0,0 +1,33 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import "unsafe" + +// NoEscape hides the pointer p from escape analysis, preventing it +// from escaping to the heap. It compiles down to nothing. +// +// WARNING: This is very subtle to use correctly. The caller must +// ensure that it's truly safe for p to not escape to the heap by +// maintaining runtime pointer invariants (for example, that globals +// and the heap may not generally point into a stack). +// +//go:nosplit +//go:nocheckptr +func NoEscape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +var alwaysFalse bool +var escapeSink any + +// Escape forces any pointers in x to escape to the heap. +func Escape[T any](x T) T { + if alwaysFalse { + escapeSink = x + } + return x +} diff --git a/abi/export_test.go b/abi/export_test.go new file mode 100644 index 0000000..2a87e9d --- /dev/null +++ b/abi/export_test.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +func FuncPCTestFn() + +var FuncPCTestFnAddr uintptr // address of FuncPCTestFn, directly retrieved from assembly + +//go:noinline +func FuncPCTest() uintptr { + return FuncPCABI0(FuncPCTestFn) +} diff --git a/abi/funcpc.go b/abi/funcpc.go new file mode 100644 index 0000000..e038d36 --- /dev/null +++ b/abi/funcpc.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gccgo + +package abi + +// FuncPC* intrinsics. +// +// CAREFUL: In programs with plugins, FuncPC* can return different values +// for the same function (because there are actually multiple copies of +// the same function in the address space). To be safe, don't use the +// results of this function in any == expression. It is only safe to +// use the result as an address at which to start executing code. + +// FuncPCABI0 returns the entry PC of the function f, which must be a +// direct reference of a function defined as ABI0. Otherwise it is a +// compile-time error. +// +// Implemented as a compile intrinsic. +func FuncPCABI0(f interface{}) uintptr + +// FuncPCABIInternal returns the entry PC of the function f. If f is a +// direct reference of a function, it must be defined as ABIInternal. +// Otherwise it is a compile-time error. If f is not a direct reference +// of a defined function, it assumes that f is a func value. Otherwise +// the behavior is undefined. +// +// Implemented as a compile intrinsic. +func FuncPCABIInternal(f interface{}) uintptr diff --git a/abi/funcpc_gccgo.go b/abi/funcpc_gccgo.go new file mode 100644 index 0000000..ad5fa52 --- /dev/null +++ b/abi/funcpc_gccgo.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// For bootstrapping with gccgo. + +//go:build gccgo + +package abi + +import "unsafe" + +func FuncPCABI0(f interface{}) uintptr { + words := (*[2]unsafe.Pointer)(unsafe.Pointer(&f)) + return *(*uintptr)(unsafe.Pointer(words[1])) +} + +func FuncPCABIInternal(f interface{}) uintptr { + words := (*[2]unsafe.Pointer)(unsafe.Pointer(&f)) + return *(*uintptr)(unsafe.Pointer(words[1])) +} diff --git a/abi/iface.go b/abi/iface.go new file mode 100644 index 0000000..676a27d --- /dev/null +++ b/abi/iface.go @@ -0,0 +1,27 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import "unsafe" + +// The first word of every non-empty interface type contains an *ITab. +// It records the underlying concrete type (Type), the interface type it +// is implementing (Inter), and some ancillary information. +// +// allocated in non-garbage-collected memory +type ITab struct { + Inter *InterfaceType + Type *Type + Hash uint32 // copy of Type.Hash. Used for type switches. + Fun [1]uintptr // variable sized. fun[0]==0 means Type does not implement Inter. +} + +// EmptyInterface describes the layout of a "interface{}" or a "any." +// These are represented differently than non-empty interface, as the first +// word always points to an abi.Type. +type EmptyInterface struct { + Type *Type + Data unsafe.Pointer +} diff --git a/abi/map_noswiss.go b/abi/map_noswiss.go new file mode 100644 index 0000000..ff8609e --- /dev/null +++ b/abi/map_noswiss.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import ( + "unsafe" +) + +// Map constants common to several packages +// runtime/runtime-gdb.py:MapTypePrinter contains its own copy +const ( + // Maximum number of key/elem pairs a bucket can hold. + OldMapBucketCountBits = 3 // log2 of number of elements in a bucket. + OldMapBucketCount = 1 << OldMapBucketCountBits + + // Maximum key or elem size to keep inline (instead of mallocing per element). + // Must fit in a uint8. + // Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes). + OldMapMaxKeyBytes = 128 + OldMapMaxElemBytes = 128 // Must fit in a uint8. +) + +type OldMapType struct { + Type + Key *Type + Elem *Type + Bucket *Type // internal type representing a hash bucket + // function for hashing keys (ptr to key, seed) -> hash + Hasher func(unsafe.Pointer, uintptr) uintptr + KeySize uint8 // size of key slot + ValueSize uint8 // size of elem slot + BucketSize uint16 // size of bucket + Flags uint32 +} + +// Note: flag values must match those used in the TMAP case +// in ../cmd/compile/internal/reflectdata/reflect.go:writeType. +func (mt *OldMapType) IndirectKey() bool { // store ptr to key instead of key itself + return mt.Flags&1 != 0 +} +func (mt *OldMapType) IndirectElem() bool { // store ptr to elem instead of elem itself + return mt.Flags&2 != 0 +} +func (mt *OldMapType) ReflexiveKey() bool { // true if k==k for all keys + return mt.Flags&4 != 0 +} +func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite + return mt.Flags&8 != 0 +} +func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic + return mt.Flags&16 != 0 +} diff --git a/abi/map_select_noswiss.go b/abi/map_select_noswiss.go new file mode 100644 index 0000000..ab2b69d --- /dev/null +++ b/abi/map_select_noswiss.go @@ -0,0 +1,10 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.swissmap + +package abi + +// See comment in map_select_swiss.go. +type mapType = OldMapType diff --git a/abi/map_select_swiss.go b/abi/map_select_swiss.go new file mode 100644 index 0000000..88a0bb2 --- /dev/null +++ b/abi/map_select_swiss.go @@ -0,0 +1,22 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.swissmap + +package abi + +// Select the map type that this binary is built using. This is for common +// lookup methods like Type.Key to know which type to use. +// +// Note that mapType *must not be used by any functions called in the +// compiler to build a target program* because the compiler must use the map +// type determined by run-time GOEXPERIMENT, not the build tags used to build +// the compiler. +// +// TODO(prattmic): This package is rather confusing because it has many +// functions that can't be used by the compiler (e.g., Type.Uncommon depends on +// the layout of type + uncommon objects in the binary. It would be incorrect +// for an ad-hoc local Type object). It may be best to move code that isn't +// usable by the compiler out of the package. +type mapType = SwissMapType diff --git a/abi/map_swiss.go b/abi/map_swiss.go new file mode 100644 index 0000000..6c85566 --- /dev/null +++ b/abi/map_swiss.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import ( + "unsafe" +) + +// Map constants common to several packages +// runtime/runtime-gdb.py:MapTypePrinter contains its own copy +const ( + // Number of bits in the group.slot count. + SwissMapGroupSlotsBits = 3 + + // Number of slots in a group. + SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8 + + // Maximum key or elem size to keep inline (instead of mallocing per element). + // Must fit in a uint8. + SwissMapMaxKeyBytes = 128 + SwissMapMaxElemBytes = 128 + + ctrlEmpty = 0b10000000 + bitsetLSB = 0x0101010101010101 + + // Value of control word with all empty slots. + SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty) +) + +type SwissMapType struct { + Type + Key *Type + Elem *Type + Group *Type // internal type representing a slot group + // function for hashing keys (ptr to key, seed) -> hash + Hasher func(unsafe.Pointer, uintptr) uintptr + GroupSize uintptr // == Group.Size_ + SlotSize uintptr // size of key/elem slot + ElemOff uintptr // offset of elem in key/elem slot + Flags uint32 +} + +// Flag values +const ( + SwissMapNeedKeyUpdate = 1 << iota + SwissMapHashMightPanic + SwissMapIndirectKey + SwissMapIndirectElem +) + +func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite + return mt.Flags&SwissMapNeedKeyUpdate != 0 +} +func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic + return mt.Flags&SwissMapHashMightPanic != 0 +} +func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself + return mt.Flags&SwissMapIndirectKey != 0 +} +func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself + return mt.Flags&SwissMapIndirectElem != 0 +} diff --git a/abi/rangefuncconsts.go b/abi/rangefuncconsts.go new file mode 100644 index 0000000..4e9248d --- /dev/null +++ b/abi/rangefuncconsts.go @@ -0,0 +1,18 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +type RF_State int + +// These constants are shared between the compiler, which uses them for state functions +// and panic indicators, and the runtime, which turns them into more meaningful strings +// For best code generation, RF_DONE and RF_READY should be 0 and 1. +const ( + RF_DONE = RF_State(iota) // body of loop has exited in a non-panic way + RF_READY // body of loop has not exited yet, is not running -- this is not a panic index + RF_PANIC // body of loop is either currently running, or has panicked + RF_EXHAUSTED // iterator function return, i.e., sequence is "exhausted" + RF_MISSING_PANIC = 4 // body of loop panicked but iterator function defer-recovered it away +) diff --git a/abi/runtime.go b/abi/runtime.go new file mode 100644 index 0000000..9b91cdf --- /dev/null +++ b/abi/runtime.go @@ -0,0 +1,8 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +// ZeroValSize is the size in bytes of runtime.zeroVal. +const ZeroValSize = 1024 diff --git a/abi/stack.go b/abi/stack.go new file mode 100644 index 0000000..8e3327e --- /dev/null +++ b/abi/stack.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +const ( + // StackNosplitBase is the base maximum number of bytes that a chain of + // NOSPLIT functions can use. + // + // This value must be multiplied by the stack guard multiplier, so do not + // use it directly. See runtime/stack.go:stackNosplit and + // cmd/internal/objabi/stack.go:StackNosplit. + StackNosplitBase = 800 + + // We have three different sequences for stack bounds checks, depending on + // whether the stack frame of a function is small, big, or huge. + + // After a stack split check the SP is allowed to be StackSmall bytes below + // the stack guard. + // + // Functions that need frames <= StackSmall can perform the stack check + // using a single comparison directly between the stack guard and the SP + // because we ensure that StackSmall bytes of stack space are available + // beyond the stack guard. + StackSmall = 128 + + // Functions that need frames <= StackBig can assume that neither + // SP-framesize nor stackGuard-StackSmall will underflow, and thus use a + // more efficient check. In order to ensure this, StackBig must be <= the + // size of the unmapped space at zero. + StackBig = 4096 +) diff --git a/abi/stub.s b/abi/stub.s new file mode 100644 index 0000000..5bad98d --- /dev/null +++ b/abi/stub.s @@ -0,0 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file silences errors about body-less functions +// that are provided by intrinsics in the latest version of the compiler, +// but may not be known to the bootstrap compiler. diff --git a/abi/switch.go b/abi/switch.go new file mode 100644 index 0000000..a30fdd0 --- /dev/null +++ b/abi/switch.go @@ -0,0 +1,58 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import "internal/goarch" + +type InterfaceSwitch struct { + Cache *InterfaceSwitchCache + NCases int + + // Array of NCases elements. + // Each case must be a non-empty interface type. + Cases [1]*InterfaceType +} + +type InterfaceSwitchCache struct { + Mask uintptr // mask for index. Must be a power of 2 minus 1 + Entries [1]InterfaceSwitchCacheEntry // Mask+1 entries total +} + +type InterfaceSwitchCacheEntry struct { + // type of source value (a *Type) + Typ uintptr + // case # to dispatch to + Case int + // itab to use for resulting case variable (a *runtime.itab) + Itab uintptr +} + +func UseInterfaceSwitchCache(arch goarch.ArchFamilyType) bool { + // We need an atomic load instruction to make the cache multithreaded-safe. + // (AtomicLoadPtr needs to be implemented in cmd/compile/internal/ssa/_gen/ARCH.rules.) + switch arch { + case goarch.AMD64, goarch.ARM64, goarch.LOONG64, goarch.MIPS, goarch.MIPS64, goarch.PPC64, goarch.RISCV64, goarch.S390X: + return true + default: + return false + } +} + +type TypeAssert struct { + Cache *TypeAssertCache + Inter *InterfaceType + CanFail bool +} +type TypeAssertCache struct { + Mask uintptr + Entries [1]TypeAssertCacheEntry +} +type TypeAssertCacheEntry struct { + // type of source value (a *runtime._type) + Typ uintptr + // itab to use for result (a *runtime.itab) + // nil if CanFail is set and conversion would fail. + Itab uintptr +} diff --git a/abi/symtab.go b/abi/symtab.go new file mode 100644 index 0000000..a3c9be7 --- /dev/null +++ b/abi/symtab.go @@ -0,0 +1,111 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +// A FuncFlag records bits about a function, passed to the runtime. +type FuncFlag uint8 + +const ( + // FuncFlagTopFrame indicates a function that appears at the top of its stack. + // The traceback routine stop at such a function and consider that a + // successful, complete traversal of the stack. + // Examples of TopFrame functions include goexit, which appears + // at the top of a user goroutine stack, and mstart, which appears + // at the top of a system goroutine stack. + FuncFlagTopFrame FuncFlag = 1 << iota + + // FuncFlagSPWrite indicates a function that writes an arbitrary value to SP + // (any write other than adding or subtracting a constant amount). + // The traceback routines cannot encode such changes into the + // pcsp tables, so the function traceback cannot safely unwind past + // SPWrite functions. Stopping at an SPWrite function is considered + // to be an incomplete unwinding of the stack. In certain contexts + // (in particular garbage collector stack scans) that is a fatal error. + FuncFlagSPWrite + + // FuncFlagAsm indicates that a function was implemented in assembly. + FuncFlagAsm +) + +// A FuncID identifies particular functions that need to be treated +// specially by the runtime. +// Note that in some situations involving plugins, there may be multiple +// copies of a particular special runtime function. +type FuncID uint8 + +const ( + // If you add a FuncID, you probably also want to add an entry to the map in + // ../../cmd/internal/objabi/funcid.go + + FuncIDNormal FuncID = iota // not a special function + FuncID_abort + FuncID_asmcgocall + FuncID_asyncPreempt + FuncID_cgocallback + FuncID_corostart + FuncID_debugCallV2 + FuncID_gcBgMarkWorker + FuncID_goexit + FuncID_gogo + FuncID_gopanic + FuncID_handleAsyncEvent + FuncID_mcall + FuncID_morestack + FuncID_mstart + FuncID_panicwrap + FuncID_rt0_go + FuncID_runfinq + FuncID_runtime_main + FuncID_sigpanic + FuncID_systemstack + FuncID_systemstack_switch + FuncIDWrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.) +) + +// ArgsSizeUnknown is set in Func.argsize to mark all functions +// whose argument size is unknown (C vararg functions, and +// assembly code without an explicit specification). +// This value is generated by the compiler, assembler, or linker. +const ArgsSizeUnknown = -0x80000000 + +// IDs for PCDATA and FUNCDATA tables in Go binaries. +// +// These must agree with ../../../runtime/funcdata.h. +const ( + PCDATA_UnsafePoint = 0 + PCDATA_StackMapIndex = 1 + PCDATA_InlTreeIndex = 2 + PCDATA_ArgLiveIndex = 3 + + FUNCDATA_ArgsPointerMaps = 0 + FUNCDATA_LocalsPointerMaps = 1 + FUNCDATA_StackObjects = 2 + FUNCDATA_InlTree = 3 + FUNCDATA_OpenCodedDeferInfo = 4 + FUNCDATA_ArgInfo = 5 + FUNCDATA_ArgLiveInfo = 6 + FUNCDATA_WrapInfo = 7 +) + +// Special values for the PCDATA_UnsafePoint table. +const ( + UnsafePointSafe = -1 // Safe for async preemption + UnsafePointUnsafe = -2 // Unsafe for async preemption + + // UnsafePointRestart1(2) apply on a sequence of instructions, within + // which if an async preemption happens, we should back off the PC + // to the start of the sequence when resuming. + // We need two so we can distinguish the start/end of the sequence + // in case that two sequences are next to each other. + UnsafePointRestart1 = -3 + UnsafePointRestart2 = -4 + + // Like UnsafePointRestart1, but back to function entry if async preempted. + UnsafePointRestartAtEntry = -5 +) + +const MINFUNC = 16 // minimum size for a function + +const FuncTabBucketSize = 256 * MINFUNC // size of bucket in the pc->func lookup table diff --git a/abi/type.go b/abi/type.go new file mode 100644 index 0000000..4671b0d --- /dev/null +++ b/abi/type.go @@ -0,0 +1,779 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import ( + "unsafe" +) + +// Type is the runtime representation of a Go type. +// +// Be careful about accessing this type at build time, as the version +// of this type in the compiler/linker may not have the same layout +// as the version in the target binary, due to pointer width +// differences and any experiments. Use cmd/compile/internal/rttype +// or the functions in compiletype.go to access this type instead. +// (TODO: this admonition applies to every type in this package. +// Put it in some shared location?) +type Type struct { + Size_ uintptr + PtrBytes uintptr // number of (prefix) bytes in the type that can contain pointers + Hash uint32 // hash of type; avoids computation in hash tables + TFlag TFlag // extra type information flags + Align_ uint8 // alignment of variable with this type + FieldAlign_ uint8 // alignment of struct field with this type + Kind_ Kind // enumeration for C + // function for comparing objects of this type + // (ptr to object A, ptr to object B) -> ==? + Equal func(unsafe.Pointer, unsafe.Pointer) bool + // GCData stores the GC type data for the garbage collector. + // Normally, GCData points to a bitmask that describes the + // ptr/nonptr fields of the type. The bitmask will have at + // least PtrBytes/ptrSize bits. + // If the TFlagGCMaskOnDemand bit is set, GCData is instead a + // **byte and the pointer to the bitmask is one dereference away. + // The runtime will build the bitmask if needed. + // (See runtime/type.go:getGCMask.) + // Note: multiple types may have the same value of GCData, + // including when TFlagGCMaskOnDemand is set. The types will, of course, + // have the same pointer layout (but not necessarily the same size). + GCData *byte + Str NameOff // string form + PtrToThis TypeOff // type for pointer to this type, may be zero +} + +// A Kind represents the specific kind of type that a Type represents. +// The zero Kind is not a valid kind. +type Kind uint8 + +const ( + Invalid Kind = iota + Bool + Int + Int8 + Int16 + Int32 + Int64 + Uint + Uint8 + Uint16 + Uint32 + Uint64 + Uintptr + Float32 + Float64 + Complex64 + Complex128 + Array + Chan + Func + Interface + Map + Pointer + Slice + String + Struct + UnsafePointer +) + +const ( + // TODO (khr, drchase) why aren't these in TFlag? Investigate, fix if possible. + KindDirectIface Kind = 1 << 5 + KindMask Kind = (1 << 5) - 1 +) + +// TFlag is used by a Type to signal what extra type information is +// available in the memory directly following the Type value. +type TFlag uint8 + +const ( + // TFlagUncommon means that there is a data with a type, UncommonType, + // just beyond the shared-per-type common data. That is, the data + // for struct types will store their UncommonType at one offset, the + // data for interface types will store their UncommonType at a different + // offset. UncommonType is always accessed via a pointer that is computed + // using trust-us-we-are-the-implementors pointer arithmetic. + // + // For example, if t.Kind() == Struct and t.tflag&TFlagUncommon != 0, + // then t has UncommonType data and it can be accessed as: + // + // type structTypeUncommon struct { + // structType + // u UncommonType + // } + // u := &(*structTypeUncommon)(unsafe.Pointer(t)).u + TFlagUncommon TFlag = 1 << 0 + + // TFlagExtraStar means the name in the str field has an + // extraneous '*' prefix. This is because for most types T in + // a program, the type *T also exists and reusing the str data + // saves binary size. + TFlagExtraStar TFlag = 1 << 1 + + // TFlagNamed means the type has a name. + TFlagNamed TFlag = 1 << 2 + + // TFlagRegularMemory means that equal and hash functions can treat + // this type as a single region of t.size bytes. + TFlagRegularMemory TFlag = 1 << 3 + + // TFlagGCMaskOnDemand means that the GC pointer bitmask will be + // computed on demand at runtime instead of being precomputed at + // compile time. If this flag is set, the GCData field effectively + // has type **byte instead of *byte. The runtime will store a + // pointer to the GC pointer bitmask in *GCData. + TFlagGCMaskOnDemand TFlag = 1 << 4 +) + +// NameOff is the offset to a name from moduledata.types. See resolveNameOff in runtime. +type NameOff int32 + +// TypeOff is the offset to a type from moduledata.types. See resolveTypeOff in runtime. +type TypeOff int32 + +// TextOff is an offset from the top of a text section. See (rtype).textOff in runtime. +type TextOff int32 + +// String returns the name of k. +func (k Kind) String() string { + if int(k) < len(kindNames) { + return kindNames[k] + } + return kindNames[0] +} + +var kindNames = []string{ + Invalid: "invalid", + Bool: "bool", + Int: "int", + Int8: "int8", + Int16: "int16", + Int32: "int32", + Int64: "int64", + Uint: "uint", + Uint8: "uint8", + Uint16: "uint16", + Uint32: "uint32", + Uint64: "uint64", + Uintptr: "uintptr", + Float32: "float32", + Float64: "float64", + Complex64: "complex64", + Complex128: "complex128", + Array: "array", + Chan: "chan", + Func: "func", + Interface: "interface", + Map: "map", + Pointer: "ptr", + Slice: "slice", + String: "string", + Struct: "struct", + UnsafePointer: "unsafe.Pointer", +} + +// TypeOf returns the abi.Type of some value. +func TypeOf(a any) *Type { + eface := *(*EmptyInterface)(unsafe.Pointer(&a)) + // Types are either static (for compiler-created types) or + // heap-allocated but always reachable (for reflection-created + // types, held in the central map). So there is no need to + // escape types. noescape here help avoid unnecessary escape + // of v. + return (*Type)(NoEscape(unsafe.Pointer(eface.Type))) +} + +// TypeFor returns the abi.Type for a type parameter. +func TypeFor[T any]() *Type { + return (*PtrType)(unsafe.Pointer(TypeOf((*T)(nil)))).Elem +} + +func (t *Type) Kind() Kind { return t.Kind_ & KindMask } + +func (t *Type) HasName() bool { + return t.TFlag&TFlagNamed != 0 +} + +// Pointers reports whether t contains pointers. +func (t *Type) Pointers() bool { return t.PtrBytes != 0 } + +// IfaceIndir reports whether t is stored indirectly in an interface value. +func (t *Type) IfaceIndir() bool { + return t.Kind_&KindDirectIface == 0 +} + +// isDirectIface reports whether t is stored directly in an interface value. +func (t *Type) IsDirectIface() bool { + return t.Kind_&KindDirectIface != 0 +} + +func (t *Type) GcSlice(begin, end uintptr) []byte { + if t.TFlag&TFlagGCMaskOnDemand != 0 { + panic("GcSlice can't handle on-demand gcdata types") + } + return unsafe.Slice(t.GCData, int(end))[begin:] +} + +// Method on non-interface type +type Method struct { + Name NameOff // name of method + Mtyp TypeOff // method type (without receiver) + Ifn TextOff // fn used in interface call (one-word receiver) + Tfn TextOff // fn used for normal method call +} + +// UncommonType is present only for defined types or types with methods +// (if T is a defined type, the uncommonTypes for T and *T have methods). +// Using a pointer to this struct reduces the overall size required +// to describe a non-defined type with no methods. +type UncommonType struct { + PkgPath NameOff // import path; empty for built-in types like int, string + Mcount uint16 // number of methods + Xcount uint16 // number of exported methods + Moff uint32 // offset from this uncommontype to [mcount]Method + _ uint32 // unused +} + +func (t *UncommonType) Methods() []Method { + if t.Mcount == 0 { + return nil + } + return (*[1 << 16]Method)(addChecked(unsafe.Pointer(t), uintptr(t.Moff), "t.mcount > 0"))[:t.Mcount:t.Mcount] +} + +func (t *UncommonType) ExportedMethods() []Method { + if t.Xcount == 0 { + return nil + } + return (*[1 << 16]Method)(addChecked(unsafe.Pointer(t), uintptr(t.Moff), "t.xcount > 0"))[:t.Xcount:t.Xcount] +} + +// addChecked returns p+x. +// +// The whySafe string is ignored, so that the function still inlines +// as efficiently as p+x, but all call sites should use the string to +// record why the addition is safe, which is to say why the addition +// does not cause x to advance to the very end of p's allocation +// and therefore point incorrectly at the next block in memory. +func addChecked(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// Imethod represents a method on an interface type +type Imethod struct { + Name NameOff // name of method + Typ TypeOff // .(*FuncType) underneath +} + +// ArrayType represents a fixed array type. +type ArrayType struct { + Type + Elem *Type // array element type + Slice *Type // slice type + Len uintptr +} + +// Len returns the length of t if t is an array type, otherwise 0 +func (t *Type) Len() int { + if t.Kind() == Array { + return int((*ArrayType)(unsafe.Pointer(t)).Len) + } + return 0 +} + +func (t *Type) Common() *Type { + return t +} + +type ChanDir int + +const ( + RecvDir ChanDir = 1 << iota // <-chan + SendDir // chan<- + BothDir = RecvDir | SendDir // chan + InvalidDir ChanDir = 0 +) + +// ChanType represents a channel type +type ChanType struct { + Type + Elem *Type + Dir ChanDir +} + +type structTypeUncommon struct { + StructType + u UncommonType +} + +// ChanDir returns the direction of t if t is a channel type, otherwise InvalidDir (0). +func (t *Type) ChanDir() ChanDir { + if t.Kind() == Chan { + ch := (*ChanType)(unsafe.Pointer(t)) + return ch.Dir + } + return InvalidDir +} + +// Uncommon returns a pointer to T's "uncommon" data if there is any, otherwise nil +func (t *Type) Uncommon() *UncommonType { + if t.TFlag&TFlagUncommon == 0 { + return nil + } + switch t.Kind() { + case Struct: + return &(*structTypeUncommon)(unsafe.Pointer(t)).u + case Pointer: + type u struct { + PtrType + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case Func: + type u struct { + FuncType + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case Slice: + type u struct { + SliceType + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case Array: + type u struct { + ArrayType + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case Chan: + type u struct { + ChanType + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case Map: + type u struct { + mapType + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case Interface: + type u struct { + InterfaceType + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + default: + type u struct { + Type + u UncommonType + } + return &(*u)(unsafe.Pointer(t)).u + } +} + +// Elem returns the element type for t if t is an array, channel, map, pointer, or slice, otherwise nil. +func (t *Type) Elem() *Type { + switch t.Kind() { + case Array: + tt := (*ArrayType)(unsafe.Pointer(t)) + return tt.Elem + case Chan: + tt := (*ChanType)(unsafe.Pointer(t)) + return tt.Elem + case Map: + tt := (*mapType)(unsafe.Pointer(t)) + return tt.Elem + case Pointer: + tt := (*PtrType)(unsafe.Pointer(t)) + return tt.Elem + case Slice: + tt := (*SliceType)(unsafe.Pointer(t)) + return tt.Elem + } + return nil +} + +// StructType returns t cast to a *StructType, or nil if its tag does not match. +func (t *Type) StructType() *StructType { + if t.Kind() != Struct { + return nil + } + return (*StructType)(unsafe.Pointer(t)) +} + +// MapType returns t cast to a *OldMapType or *SwissMapType, or nil if its tag does not match. +func (t *Type) MapType() *mapType { + if t.Kind() != Map { + return nil + } + return (*mapType)(unsafe.Pointer(t)) +} + +// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match. +func (t *Type) ArrayType() *ArrayType { + if t.Kind() != Array { + return nil + } + return (*ArrayType)(unsafe.Pointer(t)) +} + +// FuncType returns t cast to a *FuncType, or nil if its tag does not match. +func (t *Type) FuncType() *FuncType { + if t.Kind() != Func { + return nil + } + return (*FuncType)(unsafe.Pointer(t)) +} + +// InterfaceType returns t cast to a *InterfaceType, or nil if its tag does not match. +func (t *Type) InterfaceType() *InterfaceType { + if t.Kind() != Interface { + return nil + } + return (*InterfaceType)(unsafe.Pointer(t)) +} + +// Size returns the size of data with type t. +func (t *Type) Size() uintptr { return t.Size_ } + +// Align returns the alignment of data with type t. +func (t *Type) Align() int { return int(t.Align_) } + +func (t *Type) FieldAlign() int { return int(t.FieldAlign_) } + +type InterfaceType struct { + Type + PkgPath Name // import path + Methods []Imethod // sorted by hash +} + +func (t *Type) ExportedMethods() []Method { + ut := t.Uncommon() + if ut == nil { + return nil + } + return ut.ExportedMethods() +} + +func (t *Type) NumMethod() int { + if t.Kind() == Interface { + tt := (*InterfaceType)(unsafe.Pointer(t)) + return tt.NumMethod() + } + return len(t.ExportedMethods()) +} + +// NumMethod returns the number of interface methods in the type's method set. +func (t *InterfaceType) NumMethod() int { return len(t.Methods) } + +func (t *Type) Key() *Type { + if t.Kind() == Map { + return (*mapType)(unsafe.Pointer(t)).Key + } + return nil +} + +type SliceType struct { + Type + Elem *Type // slice element type +} + +// funcType represents a function type. +// +// A *Type for each in and out parameter is stored in an array that +// directly follows the funcType (and possibly its uncommonType). So +// a function type with one method, one input, and one output is: +// +// struct { +// funcType +// uncommonType +// [2]*rtype // [0] is in, [1] is out +// } +type FuncType struct { + Type + InCount uint16 + OutCount uint16 // top bit is set if last input parameter is ... +} + +func (t *FuncType) In(i int) *Type { + return t.InSlice()[i] +} + +func (t *FuncType) NumIn() int { + return int(t.InCount) +} + +func (t *FuncType) NumOut() int { + return int(t.OutCount & (1<<15 - 1)) +} + +func (t *FuncType) Out(i int) *Type { + return (t.OutSlice()[i]) +} + +func (t *FuncType) InSlice() []*Type { + uadd := unsafe.Sizeof(*t) + if t.TFlag&TFlagUncommon != 0 { + uadd += unsafe.Sizeof(UncommonType{}) + } + if t.InCount == 0 { + return nil + } + return (*[1 << 16]*Type)(addChecked(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.InCount:t.InCount] +} +func (t *FuncType) OutSlice() []*Type { + outCount := uint16(t.NumOut()) + if outCount == 0 { + return nil + } + uadd := unsafe.Sizeof(*t) + if t.TFlag&TFlagUncommon != 0 { + uadd += unsafe.Sizeof(UncommonType{}) + } + return (*[1 << 17]*Type)(addChecked(unsafe.Pointer(t), uadd, "outCount > 0"))[t.InCount : t.InCount+outCount : t.InCount+outCount] +} + +func (t *FuncType) IsVariadic() bool { + return t.OutCount&(1<<15) != 0 +} + +type PtrType struct { + Type + Elem *Type // pointer element (pointed at) type +} + +type StructField struct { + Name Name // name is always non-empty + Typ *Type // type of field + Offset uintptr // byte offset of field +} + +func (f *StructField) Embedded() bool { + return f.Name.IsEmbedded() +} + +type StructType struct { + Type + PkgPath Name + Fields []StructField +} + +// Name is an encoded type Name with optional extra data. +// +// The first byte is a bit field containing: +// +// 1<<0 the name is exported +// 1<<1 tag data follows the name +// 1<<2 pkgPath nameOff follows the name and tag +// 1<<3 the name is of an embedded (a.k.a. anonymous) field +// +// Following that, there is a varint-encoded length of the name, +// followed by the name itself. +// +// If tag data is present, it also has a varint-encoded length +// followed by the tag itself. +// +// If the import path follows, then 4 bytes at the end of +// the data form a nameOff. The import path is only set for concrete +// methods that are defined in a different package than their type. +// +// If a name starts with "*", then the exported bit represents +// whether the pointed to type is exported. +// +// Note: this encoding must match here and in: +// cmd/compile/internal/reflectdata/reflect.go +// cmd/link/internal/ld/decodesym.go + +type Name struct { + Bytes *byte +} + +// DataChecked does pointer arithmetic on n's Bytes, and that arithmetic is asserted to +// be safe for the reason in whySafe (which can appear in a backtrace, etc.) +func (n Name) DataChecked(off int, whySafe string) *byte { + return (*byte)(addChecked(unsafe.Pointer(n.Bytes), uintptr(off), whySafe)) +} + +// Data does pointer arithmetic on n's Bytes, and that arithmetic is asserted to +// be safe because the runtime made the call (other packages use DataChecked) +func (n Name) Data(off int) *byte { + return (*byte)(addChecked(unsafe.Pointer(n.Bytes), uintptr(off), "the runtime doesn't need to give you a reason")) +} + +// IsExported returns "is n exported?" +func (n Name) IsExported() bool { + return (*n.Bytes)&(1<<0) != 0 +} + +// HasTag returns true iff there is tag data following this name +func (n Name) HasTag() bool { + return (*n.Bytes)&(1<<1) != 0 +} + +// IsEmbedded returns true iff n is embedded (an anonymous field). +func (n Name) IsEmbedded() bool { + return (*n.Bytes)&(1<<3) != 0 +} + +// ReadVarint parses a varint as encoded by encoding/binary. +// It returns the number of encoded bytes and the encoded value. +func (n Name) ReadVarint(off int) (int, int) { + v := 0 + for i := 0; ; i++ { + x := *n.DataChecked(off+i, "read varint") + v += int(x&0x7f) << (7 * i) + if x&0x80 == 0 { + return i + 1, v + } + } +} + +// IsBlank indicates whether n is "_". +func (n Name) IsBlank() bool { + if n.Bytes == nil { + return false + } + _, l := n.ReadVarint(1) + return l == 1 && *n.Data(2) == '_' +} + +// writeVarint writes n to buf in varint form. Returns the +// number of bytes written. n must be nonnegative. +// Writes at most 10 bytes. +func writeVarint(buf []byte, n int) int { + for i := 0; ; i++ { + b := byte(n & 0x7f) + n >>= 7 + if n == 0 { + buf[i] = b + return i + 1 + } + buf[i] = b | 0x80 + } +} + +// Name returns the tag string for n, or empty if there is none. +func (n Name) Name() string { + if n.Bytes == nil { + return "" + } + i, l := n.ReadVarint(1) + return unsafe.String(n.DataChecked(1+i, "non-empty string"), l) +} + +// Tag returns the tag string for n, or empty if there is none. +func (n Name) Tag() string { + if !n.HasTag() { + return "" + } + i, l := n.ReadVarint(1) + i2, l2 := n.ReadVarint(1 + i + l) + return unsafe.String(n.DataChecked(1+i+l+i2, "non-empty string"), l2) +} + +func NewName(n, tag string, exported, embedded bool) Name { + if len(n) >= 1<<29 { + panic("abi.NewName: name too long: " + n[:1024] + "...") + } + if len(tag) >= 1<<29 { + panic("abi.NewName: tag too long: " + tag[:1024] + "...") + } + var nameLen [10]byte + var tagLen [10]byte + nameLenLen := writeVarint(nameLen[:], len(n)) + tagLenLen := writeVarint(tagLen[:], len(tag)) + + var bits byte + l := 1 + nameLenLen + len(n) + if exported { + bits |= 1 << 0 + } + if len(tag) > 0 { + l += tagLenLen + len(tag) + bits |= 1 << 1 + } + if embedded { + bits |= 1 << 3 + } + + b := make([]byte, l) + b[0] = bits + copy(b[1:], nameLen[:nameLenLen]) + copy(b[1+nameLenLen:], n) + if len(tag) > 0 { + tb := b[1+nameLenLen+len(n):] + copy(tb, tagLen[:tagLenLen]) + copy(tb[tagLenLen:], tag) + } + + return Name{Bytes: &b[0]} +} + +const ( + TraceArgsLimit = 10 // print no more than 10 args/components + TraceArgsMaxDepth = 5 // no more than 5 layers of nesting + + // maxLen is a (conservative) upper bound of the byte stream length. For + // each arg/component, it has no more than 2 bytes of data (size, offset), + // and no more than one {, }, ... at each level (it cannot have both the + // data and ... unless it is the last one, just be conservative). Plus 1 + // for _endSeq. + TraceArgsMaxLen = (TraceArgsMaxDepth*3+2)*TraceArgsLimit + 1 +) + +// Populate the data. +// The data is a stream of bytes, which contains the offsets and sizes of the +// non-aggregate arguments or non-aggregate fields/elements of aggregate-typed +// arguments, along with special "operators". Specifically, +// - for each non-aggregate arg/field/element, its offset from FP (1 byte) and +// size (1 byte) +// - special operators: +// - 0xff - end of sequence +// - 0xfe - print { (at the start of an aggregate-typed argument) +// - 0xfd - print } (at the end of an aggregate-typed argument) +// - 0xfc - print ... (more args/fields/elements) +// - 0xfb - print _ (offset too large) +const ( + TraceArgsEndSeq = 0xff + TraceArgsStartAgg = 0xfe + TraceArgsEndAgg = 0xfd + TraceArgsDotdotdot = 0xfc + TraceArgsOffsetTooLarge = 0xfb + TraceArgsSpecial = 0xf0 // above this are operators, below this are ordinary offsets +) + +// MaxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, +// which holds 1-bit entries describing where pointers are in a given type. +// Above this length, the GC information is recorded as a GC program, +// which can express repetition compactly. In either form, the +// information is used by the runtime to initialize the heap bitmap, +// and for large types (like 128 or more words), they are roughly the +// same speed. GC programs are never much larger and often more +// compact. (If large arrays are involved, they can be arbitrarily +// more compact.) +// +// The cutoff must be large enough that any allocation large enough to +// use a GC program is large enough that it does not share heap bitmap +// bytes with any other objects, allowing the GC program execution to +// assume an aligned start and not use atomic operations. In the current +// runtime, this means all malloc size classes larger than the cutoff must +// be multiples of four words. On 32-bit systems that's 16 bytes, and +// all size classes >= 16 bytes are 16-byte aligned, so no real constraint. +// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed +// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated +// is 32 pointers, the bits for which fit in 4 bytes. So MaxPtrmaskBytes +// must be >= 4. +// +// We used to use 16 because the GC programs do have some constant overhead +// to get started, and processing 128 pointers seems to be enough to +// amortize that overhead well. +// +// To make sure that the runtime's chansend can call typeBitsBulkBarrier, +// we raised the limit to 2048, so that even 32-bit systems are guaranteed to +// use bitmaps for objects up to 64 kB in size. +const MaxPtrmaskBytes = 2048 diff --git a/goarch/gengoarch.go b/goarch/gengoarch.go new file mode 100644 index 0000000..a52936e --- /dev/null +++ b/goarch/gengoarch.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "strings" +) + +var goarches []string + +func main() { + data, err := os.ReadFile("../../internal/syslist/syslist.go") + if err != nil { + log.Fatal(err) + } + const goarchPrefix = `var KnownArch = map[string]bool{` + inGOARCH := false + for _, line := range strings.Split(string(data), "\n") { + if strings.HasPrefix(line, goarchPrefix) { + inGOARCH = true + } else if inGOARCH && strings.HasPrefix(line, "}") { + break + } else if inGOARCH { + goarch := strings.Fields(line)[0] + goarch = strings.TrimPrefix(goarch, `"`) + goarch = strings.TrimSuffix(goarch, `":`) + goarches = append(goarches, goarch) + } + } + + for _, target := range goarches { + if target == "amd64p32" { + continue + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.\n\n") + fmt.Fprintf(&buf, "//go:build %s\n\n", target) // must explicitly include target for bootstrapping purposes + fmt.Fprintf(&buf, "package goarch\n\n") + fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target) + for _, goarch := range goarches { + value := 0 + if goarch == target { + value = 1 + } + fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goarch), value) + } + err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666) + if err != nil { + log.Fatal(err) + } + } +} diff --git a/goarch/goarch.go b/goarch/goarch.go new file mode 100644 index 0000000..f52fe6c --- /dev/null +++ b/goarch/goarch.go @@ -0,0 +1,62 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package goarch contains GOARCH-specific constants. +package goarch + +// The next line makes 'go generate' write the zgoarch*.go files with +// per-arch information, including constants named $GOARCH for every +// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying +// by them is useful for defining GOARCH-specific constants. +// +//go:generate go run gengoarch.go + +// ArchFamilyType represents a family of one or more related architectures. +// For example, ppc64 and ppc64le are both members of the PPC64 family. +type ArchFamilyType int + +const ( + AMD64 ArchFamilyType = iota + ARM + ARM64 + I386 + LOONG64 + MIPS + MIPS64 + PPC64 + RISCV64 + S390X + WASM +) + +// PtrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) but as an ideal constant. +// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit). +const PtrSize = 4 << (^uintptr(0) >> 63) + +// ArchFamily is the architecture family (AMD64, ARM, ...) +const ArchFamily ArchFamilyType = _ArchFamily + +// BigEndian reports whether the architecture is big-endian. +const BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1 + +// DefaultPhysPageSize is the default physical page size. +const DefaultPhysPageSize = _DefaultPhysPageSize + +// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems). +// The various PC tables record PC deltas pre-divided by PCQuantum. +const PCQuantum = _PCQuantum + +// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit). +const Int64Align = PtrSize + +// MinFrameSize is the size of the system-reserved words at the bottom +// of a frame (just above the architectural stack pointer). +// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems. +// On PowerPC it is larger, to cover three more reserved words: +// the compiler word, the link editor word, and the TOC save word. +const MinFrameSize = _MinFrameSize + +// StackAlign is the required alignment of the SP register. +// The stack must be at least word aligned, but some architectures require more. +const StackAlign = _StackAlign diff --git a/goarch/goarch_386.go b/goarch/goarch_386.go new file mode 100644 index 0000000..c621421 --- /dev/null +++ b/goarch/goarch_386.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = I386 + _DefaultPhysPageSize = 4096 + _PCQuantum = 1 + _MinFrameSize = 0 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_amd64.go b/goarch/goarch_amd64.go new file mode 100644 index 0000000..911e3e7 --- /dev/null +++ b/goarch/goarch_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = AMD64 + _DefaultPhysPageSize = 4096 + _PCQuantum = 1 + _MinFrameSize = 0 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_arm.go b/goarch/goarch_arm.go new file mode 100644 index 0000000..a659171 --- /dev/null +++ b/goarch/goarch_arm.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = ARM + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 4 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_arm64.go b/goarch/goarch_arm64.go new file mode 100644 index 0000000..85d0b47 --- /dev/null +++ b/goarch/goarch_arm64.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = ARM64 + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = 16 +) diff --git a/goarch/goarch_loong64.go b/goarch/goarch_loong64.go new file mode 100644 index 0000000..dae1f4d --- /dev/null +++ b/goarch/goarch_loong64.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package goarch + +const ( + _ArchFamily = LOONG64 + _DefaultPhysPageSize = 16384 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_mips.go b/goarch/goarch_mips.go new file mode 100644 index 0000000..59f3995 --- /dev/null +++ b/goarch/goarch_mips.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 4 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_mips64.go b/goarch/goarch_mips64.go new file mode 100644 index 0000000..9e4f827 --- /dev/null +++ b/goarch/goarch_mips64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS64 + _DefaultPhysPageSize = 16384 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_mips64le.go b/goarch/goarch_mips64le.go new file mode 100644 index 0000000..9e4f827 --- /dev/null +++ b/goarch/goarch_mips64le.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS64 + _DefaultPhysPageSize = 16384 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_mipsle.go b/goarch/goarch_mipsle.go new file mode 100644 index 0000000..3e6642b --- /dev/null +++ b/goarch/goarch_mipsle.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 4 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_ppc64.go b/goarch/goarch_ppc64.go new file mode 100644 index 0000000..60cc846 --- /dev/null +++ b/goarch/goarch_ppc64.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = PPC64 + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 32 + _StackAlign = 16 +) diff --git a/goarch/goarch_ppc64le.go b/goarch/goarch_ppc64le.go new file mode 100644 index 0000000..60cc846 --- /dev/null +++ b/goarch/goarch_ppc64le.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = PPC64 + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 32 + _StackAlign = 16 +) diff --git a/goarch/goarch_riscv64.go b/goarch/goarch_riscv64.go new file mode 100644 index 0000000..3b6da1e --- /dev/null +++ b/goarch/goarch_riscv64.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = RISCV64 + _DefaultPhysPageSize = 4096 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_s390x.go b/goarch/goarch_s390x.go new file mode 100644 index 0000000..20c5705 --- /dev/null +++ b/goarch/goarch_s390x.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = S390X + _DefaultPhysPageSize = 4096 + _PCQuantum = 2 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/goarch/goarch_wasm.go b/goarch/goarch_wasm.go new file mode 100644 index 0000000..98618d6 --- /dev/null +++ b/goarch/goarch_wasm.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = WASM + _DefaultPhysPageSize = 65536 + _PCQuantum = 1 + _MinFrameSize = 0 + _StackAlign = PtrSize +) diff --git a/goarch/zgoarch_386.go b/goarch/zgoarch_386.go new file mode 100644 index 0000000..4a9b0e6 --- /dev/null +++ b/goarch/zgoarch_386.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build 386 + +package goarch + +const GOARCH = `386` + +const Is386 = 1 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_amd64.go b/goarch/zgoarch_amd64.go new file mode 100644 index 0000000..7926392 --- /dev/null +++ b/goarch/zgoarch_amd64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build amd64 + +package goarch + +const GOARCH = `amd64` + +const Is386 = 0 +const IsAmd64 = 1 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_arm.go b/goarch/zgoarch_arm.go new file mode 100644 index 0000000..6c03b8b --- /dev/null +++ b/goarch/zgoarch_arm.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build arm + +package goarch + +const GOARCH = `arm` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 1 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_arm64.go b/goarch/zgoarch_arm64.go new file mode 100644 index 0000000..ad342d7 --- /dev/null +++ b/goarch/zgoarch_arm64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build arm64 + +package goarch + +const GOARCH = `arm64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 1 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_arm64be.go b/goarch/zgoarch_arm64be.go new file mode 100644 index 0000000..0f26003 --- /dev/null +++ b/goarch/zgoarch_arm64be.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build arm64be + +package goarch + +const GOARCH = `arm64be` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 1 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_armbe.go b/goarch/zgoarch_armbe.go new file mode 100644 index 0000000..6092fee --- /dev/null +++ b/goarch/zgoarch_armbe.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build armbe + +package goarch + +const GOARCH = `armbe` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 1 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_loong64.go b/goarch/zgoarch_loong64.go new file mode 100644 index 0000000..21c67e1 --- /dev/null +++ b/goarch/zgoarch_loong64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build loong64 + +package goarch + +const GOARCH = `loong64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 1 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_mips.go b/goarch/zgoarch_mips.go new file mode 100644 index 0000000..0db1974 --- /dev/null +++ b/goarch/zgoarch_mips.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips + +package goarch + +const GOARCH = `mips` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 1 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_mips64.go b/goarch/zgoarch_mips64.go new file mode 100644 index 0000000..738806f --- /dev/null +++ b/goarch/zgoarch_mips64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64 + +package goarch + +const GOARCH = `mips64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 1 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_mips64le.go b/goarch/zgoarch_mips64le.go new file mode 100644 index 0000000..8de5beb --- /dev/null +++ b/goarch/zgoarch_mips64le.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64le + +package goarch + +const GOARCH = `mips64le` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 1 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_mips64p32.go b/goarch/zgoarch_mips64p32.go new file mode 100644 index 0000000..ea461be --- /dev/null +++ b/goarch/zgoarch_mips64p32.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64p32 + +package goarch + +const GOARCH = `mips64p32` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 1 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_mips64p32le.go b/goarch/zgoarch_mips64p32le.go new file mode 100644 index 0000000..15473ce --- /dev/null +++ b/goarch/zgoarch_mips64p32le.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64p32le + +package goarch + +const GOARCH = `mips64p32le` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 1 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_mipsle.go b/goarch/zgoarch_mipsle.go new file mode 100644 index 0000000..4955142 --- /dev/null +++ b/goarch/zgoarch_mipsle.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mipsle + +package goarch + +const GOARCH = `mipsle` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 1 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_ppc.go b/goarch/zgoarch_ppc.go new file mode 100644 index 0000000..ec01763 --- /dev/null +++ b/goarch/zgoarch_ppc.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build ppc + +package goarch + +const GOARCH = `ppc` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 1 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_ppc64.go b/goarch/zgoarch_ppc64.go new file mode 100644 index 0000000..39be392 --- /dev/null +++ b/goarch/zgoarch_ppc64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build ppc64 + +package goarch + +const GOARCH = `ppc64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 1 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_ppc64le.go b/goarch/zgoarch_ppc64le.go new file mode 100644 index 0000000..5f959e0 --- /dev/null +++ b/goarch/zgoarch_ppc64le.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build ppc64le + +package goarch + +const GOARCH = `ppc64le` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 1 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_riscv.go b/goarch/zgoarch_riscv.go new file mode 100644 index 0000000..8d81a14 --- /dev/null +++ b/goarch/zgoarch_riscv.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build riscv + +package goarch + +const GOARCH = `riscv` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 1 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_riscv64.go b/goarch/zgoarch_riscv64.go new file mode 100644 index 0000000..1df989c --- /dev/null +++ b/goarch/zgoarch_riscv64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build riscv64 + +package goarch + +const GOARCH = `riscv64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 1 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_s390.go b/goarch/zgoarch_s390.go new file mode 100644 index 0000000..56815b9 --- /dev/null +++ b/goarch/zgoarch_s390.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build s390 + +package goarch + +const GOARCH = `s390` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 1 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_s390x.go b/goarch/zgoarch_s390x.go new file mode 100644 index 0000000..e61e9bd --- /dev/null +++ b/goarch/zgoarch_s390x.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build s390x + +package goarch + +const GOARCH = `s390x` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 1 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_sparc.go b/goarch/zgoarch_sparc.go new file mode 100644 index 0000000..ee5b746 --- /dev/null +++ b/goarch/zgoarch_sparc.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build sparc + +package goarch + +const GOARCH = `sparc` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 1 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/goarch/zgoarch_sparc64.go b/goarch/zgoarch_sparc64.go new file mode 100644 index 0000000..519aaa1 --- /dev/null +++ b/goarch/zgoarch_sparc64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build sparc64 + +package goarch + +const GOARCH = `sparc64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 1 +const IsWasm = 0 diff --git a/goarch/zgoarch_wasm.go b/goarch/zgoarch_wasm.go new file mode 100644 index 0000000..25567a1 --- /dev/null +++ b/goarch/zgoarch_wasm.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build wasm + +package goarch + +const GOARCH = `wasm` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 1 diff --git a/iter.go b/iter.go new file mode 100644 index 0000000..e765378 --- /dev/null +++ b/iter.go @@ -0,0 +1,471 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package iter provides basic definitions and operations related to +iterators over sequences. + +# Iterators + +An iterator is a function that passes successive elements of a +sequence to a callback function, conventionally named yield. +The function stops either when the sequence is finished or +when yield returns false, indicating to stop the iteration early. +This package defines [Seq] and [Seq2] +(pronounced like seek—the first syllable of sequence) +as shorthands for iterators that pass 1 or 2 values per sequence element +to yield: + + type ( + Seq[V any] func(yield func(V) bool) + Seq2[K, V any] func(yield func(K, V) bool) + ) + +Seq2 represents a sequence of paired values, conventionally key-value +or index-value pairs. + +Yield returns true if the iterator should continue with the next +element in the sequence, false if it should stop. + +For instance, [maps.Keys] returns an iterator that produces the sequence +of keys of the map m, implemented as follows: + + func Keys[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[K] { + return func(yield func(K) bool) { + for k := range m { + if !yield(k) { + return + } + } + } + } + +Further examples can be found in [The Go Blog: Range Over Function Types]. + +Iterator functions are most often called by a [range loop], as in: + + func PrintAll[V any](seq iter.Seq[V]) { + for v := range seq { + fmt.Println(v) + } + } + +# Naming Conventions + +Iterator functions and methods are named for the sequence being walked: + + // All returns an iterator over all elements in s. + func (s *Set[V]) All() iter.Seq[V] + +The iterator method on a collection type is conventionally named All, +because it iterates a sequence of all the values in the collection. + +For a type containing multiple possible sequences, the iterator's name +can indicate which sequence is being provided: + + // Cities returns an iterator over the major cities in the country. + func (c *Country) Cities() iter.Seq[*City] + + // Languages returns an iterator over the official spoken languages of the country. + func (c *Country) Languages() iter.Seq[string] + +If an iterator requires additional configuration, the constructor function +can take additional configuration arguments: + + // Scan returns an iterator over key-value pairs with min ≤ key ≤ max. + func (m *Map[K, V]) Scan(min, max K) iter.Seq2[K, V] + + // Split returns an iterator over the (possibly-empty) substrings of s + // separated by sep. + func Split(s, sep string) iter.Seq[string] + +When there are multiple possible iteration orders, the method name may +indicate that order: + + // All returns an iterator over the list from head to tail. + func (l *List[V]) All() iter.Seq[V] + + // Backward returns an iterator over the list from tail to head. + func (l *List[V]) Backward() iter.Seq[V] + + // Preorder returns an iterator over all nodes of the syntax tree + // beneath (and including) the specified root, in depth-first preorder, + // visiting a parent node before its children. + func Preorder(root Node) iter.Seq[Node] + +# Single-Use Iterators + +Most iterators provide the ability to walk an entire sequence: +when called, the iterator does any setup necessary to start the +sequence, then calls yield on successive elements of the sequence, +and then cleans up before returning. Calling the iterator again +walks the sequence again. + +Some iterators break that convention, providing the ability to walk a +sequence only once. These “single-use iterators” typically report values +from a data stream that cannot be rewound to start over. +Calling the iterator again after stopping early may continue the +stream, but calling it again after the sequence is finished will yield +no values at all. Doc comments for functions or methods that return +single-use iterators should document this fact: + + // Lines returns an iterator over lines read from r. + // It returns a single-use iterator. + func (r *Reader) Lines() iter.Seq[string] + +# Pulling Values + +Functions and methods that accept or return iterators +should use the standard [Seq] or [Seq2] types, to ensure +compatibility with range loops and other iterator adapters. +The standard iterators can be thought of as “push iterators”, which +push values to the yield function. + +Sometimes a range loop is not the most natural way to consume values +of the sequence. In this case, [Pull] converts a standard push iterator +to a “pull iterator”, which can be called to pull one value at a time +from the sequence. [Pull] starts an iterator and returns a pair +of functions—next and stop—which return the next value from the iterator +and stop it, respectively. + +For example: + + // Pairs returns an iterator over successive pairs of values from seq. + func Pairs[V any](seq iter.Seq[V]) iter.Seq2[V, V] { + return func(yield func(V, V) bool) { + next, stop := iter.Pull(seq) + defer stop() + for { + v1, ok1 := next() + if !ok1 { + return + } + v2, ok2 := next() + // If ok2 is false, v2 should be the + // zero value; yield one last pair. + if !yield(v1, v2) { + return + } + if !ok2 { + return + } + } + } + } + +If clients do not consume the sequence to completion, they must call stop, +which allows the iterator function to finish and return. As shown in +the example, the conventional way to ensure this is to use defer. + +# Standard Library Usage + +A few packages in the standard library provide iterator-based APIs, +most notably the [maps] and [slices] packages. +For example, [maps.Keys] returns an iterator over the keys of a map, +while [slices.Sorted] collects the values of an iterator into a slice, +sorts them, and returns the slice, so to iterate over the sorted keys of a map: + + for _, key := range slices.Sorted(maps.Keys(m)) { + ... + } + +# Mutation + +Iterators provide only the values of the sequence, not any direct way +to modify it. If an iterator wishes to provide a mechanism for modifying +a sequence during iteration, the usual approach is to define a position type +with the extra operations and then provide an iterator over positions. + +For example, a tree implementation might provide: + + // Positions returns an iterator over positions in the sequence. + func (t *Tree[V]) Positions() iter.Seq[*Pos] + + // A Pos represents a position in the sequence. + // It is only valid during the yield call it is passed to. + type Pos[V any] struct { ... } + + // Pos returns the value at the cursor. + func (p *Pos[V]) Value() V + + // Delete deletes the value at this point in the iteration. + func (p *Pos[V]) Delete() + + // Set changes the value v at the cursor. + func (p *Pos[V]) Set(v V) + +And then a client could delete boring values from the tree using: + + for p := range t.Positions() { + if boring(p.Value()) { + p.Delete() + } + } + +[The Go Blog: Range Over Function Types]: https://go.dev/blog/range-functions +[range loop]: https://go.dev/ref/spec#For_range +*/ +package iter + +import ( + "internal/race" + "runtime" + "unsafe" +) + +// Seq is an iterator over sequences of individual values. +// When called as seq(yield), seq calls yield(v) for each value v in the sequence, +// stopping early if yield returns false. +// See the [iter] package documentation for more details. +type Seq[V any] func(yield func(V) bool) + +// Seq2 is an iterator over sequences of pairs of values, most commonly key-value pairs. +// When called as seq(yield), seq calls yield(k, v) for each pair (k, v) in the sequence, +// stopping early if yield returns false. +// See the [iter] package documentation for more details. +type Seq2[K, V any] func(yield func(K, V) bool) + +type coro struct{} + +//go:linkname newcoro runtime.newcoro +func newcoro(func(*coro)) *coro + +//go:linkname coroswitch runtime.coroswitch +func coroswitch(*coro) + +// Pull converts the “push-style” iterator sequence seq +// into a “pull-style” iterator accessed by the two functions +// next and stop. +// +// Next returns the next value in the sequence +// and a boolean indicating whether the value is valid. +// When the sequence is over, next returns the zero V and false. +// It is valid to call next after reaching the end of the sequence +// or after calling stop. These calls will continue +// to return the zero V and false. +// +// Stop ends the iteration. It must be called when the caller is +// no longer interested in next values and next has not yet +// signaled that the sequence is over (with a false boolean return). +// It is valid to call stop multiple times and when next has +// already returned false. Typically, callers should “defer stop()”. +// +// It is an error to call next or stop from multiple goroutines +// simultaneously. +// +// If the iterator panics during a call to next (or stop), +// then next (or stop) itself panics with the same value. +func Pull[V any](seq Seq[V]) (next func() (V, bool), stop func()) { + var ( + v V + ok bool + done bool + yieldNext bool + racer int + panicValue any + seqDone bool // to detect Goexit + ) + c := newcoro(func(c *coro) { + race.Acquire(unsafe.Pointer(&racer)) + if done { + race.Release(unsafe.Pointer(&racer)) + return + } + yield := func(v1 V) bool { + if done { + return false + } + if !yieldNext { + panic("iter.Pull: yield called again before next") + } + yieldNext = false + v, ok = v1, true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + return !done + } + // Recover and propagate panics from seq. + defer func() { + if p := recover(); p != nil { + panicValue = p + } else if !seqDone { + panicValue = goexitPanicValue + } + done = true // Invalidate iterator + race.Release(unsafe.Pointer(&racer)) + }() + seq(yield) + var v0 V + v, ok = v0, false + seqDone = true + }) + next = func() (v1 V, ok1 bool) { + race.Write(unsafe.Pointer(&racer)) // detect races + + if done { + return + } + if yieldNext { + panic("iter.Pull: next called again before yield") + } + yieldNext = true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + + // Propagate panics and goexits from seq. + if panicValue != nil { + if panicValue == goexitPanicValue { + // Propagate runtime.Goexit from seq. + runtime.Goexit() + } else { + panic(panicValue) + } + } + return v, ok + } + stop = func() { + race.Write(unsafe.Pointer(&racer)) // detect races + + if !done { + done = true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + + // Propagate panics and goexits from seq. + if panicValue != nil { + if panicValue == goexitPanicValue { + // Propagate runtime.Goexit from seq. + runtime.Goexit() + } else { + panic(panicValue) + } + } + } + } + return next, stop +} + +// Pull2 converts the “push-style” iterator sequence seq +// into a “pull-style” iterator accessed by the two functions +// next and stop. +// +// Next returns the next pair in the sequence +// and a boolean indicating whether the pair is valid. +// When the sequence is over, next returns a pair of zero values and false. +// It is valid to call next after reaching the end of the sequence +// or after calling stop. These calls will continue +// to return a pair of zero values and false. +// +// Stop ends the iteration. It must be called when the caller is +// no longer interested in next values and next has not yet +// signaled that the sequence is over (with a false boolean return). +// It is valid to call stop multiple times and when next has +// already returned false. Typically, callers should “defer stop()”. +// +// It is an error to call next or stop from multiple goroutines +// simultaneously. +// +// If the iterator panics during a call to next (or stop), +// then next (or stop) itself panics with the same value. +func Pull2[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func()) { + var ( + k K + v V + ok bool + done bool + yieldNext bool + racer int + panicValue any + seqDone bool + ) + c := newcoro(func(c *coro) { + race.Acquire(unsafe.Pointer(&racer)) + if done { + race.Release(unsafe.Pointer(&racer)) + return + } + yield := func(k1 K, v1 V) bool { + if done { + return false + } + if !yieldNext { + panic("iter.Pull2: yield called again before next") + } + yieldNext = false + k, v, ok = k1, v1, true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + return !done + } + // Recover and propagate panics from seq. + defer func() { + if p := recover(); p != nil { + panicValue = p + } else if !seqDone { + panicValue = goexitPanicValue + } + done = true // Invalidate iterator. + race.Release(unsafe.Pointer(&racer)) + }() + seq(yield) + var k0 K + var v0 V + k, v, ok = k0, v0, false + seqDone = true + }) + next = func() (k1 K, v1 V, ok1 bool) { + race.Write(unsafe.Pointer(&racer)) // detect races + + if done { + return + } + if yieldNext { + panic("iter.Pull2: next called again before yield") + } + yieldNext = true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + + // Propagate panics and goexits from seq. + if panicValue != nil { + if panicValue == goexitPanicValue { + // Propagate runtime.Goexit from seq. + runtime.Goexit() + } else { + panic(panicValue) + } + } + return k, v, ok + } + stop = func() { + race.Write(unsafe.Pointer(&racer)) // detect races + + if !done { + done = true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + + // Propagate panics and goexits from seq. + if panicValue != nil { + if panicValue == goexitPanicValue { + // Propagate runtime.Goexit from seq. + runtime.Goexit() + } else { + panic(panicValue) + } + } + } + } + return next, stop +} + +// goexitPanicValue is a sentinel value indicating that an iterator +// exited via runtime.Goexit. +var goexitPanicValue any = new(int) diff --git a/race/doc.go b/race/doc.go new file mode 100644 index 0000000..8fa44ce --- /dev/null +++ b/race/doc.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package race contains helper functions for manually instrumenting code for the race detector. + +The runtime package intentionally exports these functions only in the race build; +this package exports them unconditionally but without the "race" build tag they are no-ops. +*/ +package race diff --git a/race/norace.go b/race/norace.go new file mode 100644 index 0000000..21f9c10 --- /dev/null +++ b/race/norace.go @@ -0,0 +1,54 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +import ( + "unsafe" +) + +const Enabled = false + +func Acquire(addr unsafe.Pointer) { +} + +func Release(addr unsafe.Pointer) { +} + +func ReleaseMerge(addr unsafe.Pointer) { +} + +func Disable() { +} + +func Enable() { +} + +func Read(addr unsafe.Pointer) { +} + +func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) { +} + +//func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { +//} + +func Write(addr unsafe.Pointer) { +} + +func WritePC(addr unsafe.Pointer, callerpc, pc uintptr) { +} + +//func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { +//} + +func ReadRange(addr unsafe.Pointer, len int) { +} + +func WriteRange(addr unsafe.Pointer, len int) { +} + +func Errors() int { return 0 } diff --git a/race/race.go b/race/race.go new file mode 100644 index 0000000..bfcb24a --- /dev/null +++ b/race/race.go @@ -0,0 +1,58 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "internal/abi" + "unsafe" +) + +const Enabled = true + +// Functions below pushed from runtime. + +//go:linkname Acquire +func Acquire(addr unsafe.Pointer) + +//go:linkname Release +func Release(addr unsafe.Pointer) + +//go:linkname ReleaseMerge +func ReleaseMerge(addr unsafe.Pointer) + +//go:linkname Disable +func Disable() + +//go:linkname Enable +func Enable() + +//go:linkname Read +func Read(addr unsafe.Pointer) + +//go:linkname ReadPC +func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) + +//go:linkname ReadObjectPC +func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) + +//go:linkname Write +func Write(addr unsafe.Pointer) + +//go:linkname WritePC +func WritePC(addr unsafe.Pointer, callerpc, pc uintptr) + +//go:linkname WriteObjectPC +func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) + +//go:linkname ReadRange +func ReadRange(addr unsafe.Pointer, len int) + +//go:linkname WriteRange +func WriteRange(addr unsafe.Pointer, len int) + +//go:linkname Errors +func Errors() int