diff --git a/go.mod b/go.mod index fc3793ed2a..28348482e0 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.1 - github.com/tetratelabs/wazero v1.2.0 + github.com/tetratelabs/wazero v1.2.1 github.com/things-go/go-socks5 v0.0.3 github.com/xlab/treeprint v1.2.0 github.com/yiya1989/sshkrb5 v0.0.0-20201110125252-a1455b75a35e diff --git a/go.sum b/go.sum index a679fa2356..479657ed55 100644 --- a/go.sum +++ b/go.sum @@ -244,8 +244,8 @@ github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tetratelabs/wazero v1.2.0 h1:I/8LMf4YkCZ3r2XaL9whhA0VMyAvF6QE+O7rco0DCeQ= -github.com/tetratelabs/wazero v1.2.0/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= +github.com/tetratelabs/wazero v1.2.1 h1:J4X2hrGzJvt+wqltuvcSjHQ7ujQxA9gb6PeMs4qlUWs= +github.com/tetratelabs/wazero v1.2.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= github.com/thedevsaddam/gojsonq/v2 v2.5.2 h1:CoMVaYyKFsVj6TjU6APqAhAvC07hTI6IQen8PHzHYY0= github.com/thedevsaddam/gojsonq/v2 v2.5.2/go.mod h1:bv6Xa7kWy82uT0LnXPE2SzGqTj33TAEeR560MdJkiXs= github.com/things-go/go-socks5 v0.0.3 h1:QtlIhkwDuLNCwW3wnt2uTjn1mQzpyjnwct2xdPuqroI= diff --git a/vendor/github.com/tetratelabs/wazero/examples/basic/testdata/add.wasm b/vendor/github.com/tetratelabs/wazero/examples/basic/testdata/add.wasm index 3a9da803e1..259ca4f00a 100644 Binary files a/vendor/github.com/tetratelabs/wazero/examples/basic/testdata/add.wasm and b/vendor/github.com/tetratelabs/wazero/examples/basic/testdata/add.wasm differ diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go index b1b910d6c7..c87d7699f3 100644 --- a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go @@ -310,17 +310,20 @@ func fdFdstatSetFlagsFn(_ context.Context, mod api.Module, params []uint64) sysc if f, ok := fsc.LookupFile(fd); !ok { return syscall.EBADF - } else if isPreopenedStdio(fd, f) { - nonblock := wasip1.FD_NONBLOCK&wasiFlag != 0 - return f.File.SetNonblock(nonblock) - } else if _, ok := f.File.(socketapi.TCPConn); ok { - nonblock := wasip1.FD_NONBLOCK&wasiFlag != 0 - return f.File.SetNonblock(nonblock) } else { - // For normal files, proceed to apply an append flag. - append := wasip1.FD_APPEND&wasiFlag != 0 - return f.File.SetAppend(append) + nonblock := wasip1.FD_NONBLOCK&wasiFlag != 0 + errno := f.File.SetNonblock(nonblock) + if errno != 0 { + return errno + } + if stat, err := f.File.Stat(); err == 0 && stat.Mode.IsRegular() { + // For normal files, proceed to apply an append flag. + append := wasip1.FD_APPEND&wasiFlag != 0 + return f.File.SetAppend(append) + } } + + return 0 } // fdFdstatSetRights will not be implemented as rights were removed from WASI. @@ -1691,6 +1694,9 @@ func openFlags(dirflags, oflags, fdflags uint16, rights uint32) (openFlags int) openFlags |= syscall.O_CREAT defaultMode = syscall.O_RDWR } + if fdflags&wasip1.FD_NONBLOCK != 0 { + openFlags |= syscall.O_NONBLOCK + } if fdflags&wasip1.FD_APPEND != 0 { openFlags |= syscall.O_APPEND defaultMode = syscall.O_RDWR diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/gotip/wasi.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/gotip/wasi.go index 271e03c827..50612cb871 100644 --- a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/gotip/wasi.go +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/gotip/wasi.go @@ -2,8 +2,12 @@ package main import ( "fmt" + "io" "net" + "net/http" "os" + "sync" + "syscall" ) func main() { @@ -12,9 +16,18 @@ func main() { if err := mainSock(); err != nil { panic(err) } + case "http": + if err := mainHTTP(); err != nil { + panic(err) + } + case "nonblock": + if err := mainNonblock(os.Args[2], os.Args[3:]); err != nil { + panic(err) + } } } +// mainSock is an explicit test of a blocking socket. func mainSock() error { // Get a listener from the pre-opened file descriptor. // The listener is the first pre-open, with a file-descriptor of 3. @@ -43,3 +56,107 @@ func mainSock() error { fmt.Println(string(buf[:n])) return nil } + +// mainHTTP implicitly tests non-blocking sockets, as they are needed for +// middleware. +func mainHTTP() error { + // Get the file representing a pre-opened TCP socket. + // The socket (listener) is the first pre-open, with a file-descriptor of + // 3 because the host didn't add any pre-opened files. + listenerFD := 3 + f := os.NewFile(uintptr(listenerFD), "") + + // Wasm runs similarly to GOMAXPROCS=1, so multiple goroutines cannot work + // in parallel. non-blocking allows the poller to park the go-routine + // accepting connections while work is done on one. + if err := syscall.SetNonblock(listenerFD, true); err != nil { + return err + } + + // Convert the file representing the pre-opened socket to a listener, so + // that we can integrate it with HTTP middleware. + ln, err := net.FileListener(f) + defer f.Close() + if err != nil { + return err + } + defer ln.Close() + + // Serve middleware that echos the request body to the response once, then quits. + h := &echoOnce{ch: make(chan struct{}, 1)} + go http.Serve(ln, h) + <-h.ch + return nil +} + +type echoOnce struct { + ch chan struct{} +} + +func (e echoOnce) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Copy up to 32 bytes from the request to the response, appending a newline. + // Note: the test should write: "wazero", so that's all we should read. + var buf [32]byte + if n, err := r.Body.Read(buf[:]); err != nil && err != io.EOF { + panic(err) + } else if n, err = w.Write(append(buf[:n], '\n')); err != nil { + panic(err) + } + // Once one request was served, close the channel. + close(e.ch) +} + +// Adapted from nonblock.go +// https://github.com/golang/go/blob/0fcc70ecd56e3b5c214ddaee4065ea1139ae16b5/src/runtime/internal/wasitest/testdata/nonblock.go +func mainNonblock(mode string, files []string) error { + ready := make(chan struct{}) + + var wg sync.WaitGroup + for _, path := range files { + f, err := os.Open(path) + if err != nil { + return err + } + switch mode { + case "open": + case "create": + fd := f.Fd() + if err = syscall.SetNonblock(int(fd), true); err != nil { + return err + } + f = os.NewFile(fd, path) + default: + return fmt.Errorf("invalid test mode") + } + + spawnWait := make(chan struct{}) + + wg.Add(1) + go func(f *os.File) { + defer f.Close() + defer wg.Done() + + // Signal the routine has been spawned. + close(spawnWait) + + // Wait until ready. + <-ready + + var buf [256]byte + + if n, err := f.Read(buf[:]); err != nil { + panic(err) + } else { + os.Stderr.Write(buf[:n]) + } + }(f) + + // Spawn one goroutine at a time. + <-spawnWait + } + + println("waiting") + close(ready) + wg.Wait() + return nil +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.c b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.c index a03e2dd5fe..818b8c330a 100644 --- a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.c +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.c @@ -162,6 +162,25 @@ void main_sock() { } } +void main_nonblock(char* fpath) { + struct timespec tim, tim2; + tim.tv_sec = 0; + tim.tv_nsec = 100 * 1000000; // 100 msec + int fd = open(fpath, O_RDONLY | O_NONBLOCK); + char buf[32]; + ssize_t newLen = 0; + while (newLen == 0) { + newLen = read(fd, buf, sizeof(buf)); + if (errno == EAGAIN || newLen == 0) { + printf("."); + nanosleep(&tim , &tim2) ; + continue; + } + } + printf("\n%s\n", buf); + close(fd); +} + int main(int argc, char** argv) { if (strcmp(argv[1],"ls")==0) { bool repeat = false; @@ -193,6 +212,8 @@ int main(int argc, char** argv) { main_open_wronly(); } else if (strcmp(argv[1],"sock")==0) { main_sock(); + } else if (strcmp(argv[1],"nonblock")==0) { + main_nonblock(argv[2]); } else { fprintf(stderr, "unknown command: %s\n", argv[1]); return 1; diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.wasm b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.wasm index 3e1ec063da..255a198f09 100644 Binary files a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.wasm and b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/testdata/zig-cc/wasi.wasm differ diff --git a/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/consts.go b/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/consts.go index 9df4d5f6a1..47ed3cc2d4 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/consts.go +++ b/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/consts.go @@ -506,6 +506,8 @@ const ( BCONDNE // BCONDVS is the B.cond instruction with CondVS. BCONDVS + // BCONDVC is the B.cond instruction with CondVC. + BCONDVC // CLZ is the CLZ instruction. https://developer.arm.com/documentation/dui0802/a/A64-General-Instructions/CLZ CLZ @@ -1073,6 +1075,8 @@ func InstructionName(i asm.Instruction) string { return "BCONDNE" case BCONDVS: return "BCONDVS" + case BCONDVC: + return "BCONDVC" case CLZ: return "CLZ" case CLZW: diff --git a/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/impl.go b/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/impl.go index 2d64f47ac4..00bc0d9952 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/impl.go +++ b/vendor/github.com/tetratelabs/wazero/internal/asm/arm64/impl.go @@ -832,6 +832,8 @@ func (a *AssemblerImpl) relativeBranchFinalize(code []byte, n *nodeImpl) error { condBits = 0b0001 case BCONDVS: condBits = 0b0110 + case BCONDVC: + condBits = 0b0111 } branchInstOffset := int64(n.OffsetInBinary()) @@ -872,7 +874,7 @@ func (a *AssemblerImpl) relativeBranchFinalize(code []byte, n *nodeImpl) error { func (a *AssemblerImpl) encodeRelativeBranch(buf asm.Buffer, n *nodeImpl) error { switch n.instruction { - case B, BCONDEQ, BCONDGE, BCONDGT, BCONDHI, BCONDHS, BCONDLE, BCONDLO, BCONDLS, BCONDLT, BCONDMI, BCONDNE, BCONDVS, BCONDPL: + case B, BCONDEQ, BCONDGE, BCONDGT, BCONDHI, BCONDHS, BCONDLE, BCONDLO, BCONDLS, BCONDLT, BCONDMI, BCONDNE, BCONDVS, BCONDVC, BCONDPL: default: return errorEncodingUnsupported(n) } diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_amd64.go b/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_amd64.go index f02bcaa91e..79b299ec9d 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_amd64.go +++ b/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_amd64.go @@ -93,6 +93,7 @@ type amd64Compiler struct { stackPointerCeil uint64 // assignStackPointerCeilNeeded holds an asm.Node whose AssignDestinationConstant must be called with the determined stack pointer ceiling. assignStackPointerCeilNeeded asm.Node + compiledTrapTargets [nativeCallStatusModuleClosed]asm.Node withListener bool typ *wasm.FunctionType // locationStackForEntrypoint is the initial location stack for all functions. To reuse the allocated stack, @@ -583,7 +584,7 @@ func (c *amd64Compiler) compileBrIf(o *wazeroir.UnionOperation) error { return err } // Check if the value not equals zero. - c.assembler.CompileRegisterToConst(amd64.CMPQ, cond.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, cond.register, cond.register) // Emit jump instruction which jumps when the value does not equals zero. jmpWithCond = c.assembler.CompileJump(amd64.JNE) @@ -869,13 +870,9 @@ func (c *amd64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) error { // tmp = &module.Tables[0] + Index*8 = &module.Tables[0] + sizeOf(*TableInstance)*index = module.Tables[o.TableIndex]. c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(tableIndex*8), tmp) - // Then, we need to check if the offset doesn't exceed the length of table. + // Then, we need to trap if the offset exceeds the length of table. c.assembler.CompileMemoryToRegister(amd64.CMPQ, tmp, tableInstanceTableLenOffset, offset.register) - notLengthExceedJump := c.assembler.CompileJump(amd64.JHI) - - // If it exceeds, we return the function with nativeCallStatusCodeInvalidTableAccess. - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(notLengthExceedJump) + c.compileMaybeExitFromNativeCode(amd64.JHI, nativeCallStatusCodeInvalidTableAccess) // next we check if the target's type matches the operation's one. // In order to get the type instance's address, we have to multiply the offset @@ -893,15 +890,10 @@ func (c *amd64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) error { // At this point offset.register holds the address of *code (as uintptr) at wasm.Table[offset]. // // Check if the value of table[offset] equals zero, meaning that the target is uninitialized. - c.assembler.CompileRegisterToConst(amd64.CMPQ, offset.register, 0) - - // Jump if the target is initialized element. - jumpIfInitialized := c.assembler.CompileJump(amd64.JNE) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, offset.register, offset.register) - // If not initialized, we return the function with nativeCallStatusCodeInvalidTableAccess. - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - - c.assembler.SetJumpTargetOnNext(jumpIfInitialized) + // Skipped if the target is initialized. + c.compileMaybeExitFromNativeCode(amd64.JNE, nativeCallStatusCodeInvalidTableAccess) // Next, we need to check the type matches, i.e. table[offset].source.TypeID == targetFunctionType's typeID. // @@ -911,14 +903,9 @@ func (c *amd64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) error { tmp2) c.assembler.CompileMemoryToRegister(amd64.MOVL, tmp2, int64(typeIndex)*4, tmp2) - // Jump if the type matches. + // Skipped if the type matches. c.assembler.CompileMemoryToRegister(amd64.CMPL, offset.register, functionTypeIDOffset, tmp2) - jumpIfTypeMatch := c.assembler.CompileJump(amd64.JEQ) - - // Otherwise, exit with type mismatch status. - c.compileExitFromNativeCode(nativeCallStatusCodeTypeMismatchOnIndirectCall) - - c.assembler.SetJumpTargetOnNext(jumpIfTypeMatch) + c.compileMaybeExitFromNativeCode(amd64.JEQ, nativeCallStatusCodeTypeMismatchOnIndirectCall) targetFunctionType := &c.ir.Types[typeIndex] if err = c.compileCallFunctionImpl(offset.register, targetFunctionType); err != nil { return nil @@ -947,7 +934,7 @@ func (c *amd64Compiler) compileSelectV128Impl(selectorReg asm.Register) error { } // Compare the conditional value with zero. - c.assembler.CompileRegisterToConst(amd64.CMPQ, selectorReg, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, selectorReg, selectorReg) // Set the jump if the top value is not zero. jmpIfNotZero := c.assembler.CompileJump(amd64.JNE) @@ -988,7 +975,7 @@ func (c *amd64Compiler) compileSelect(o *wazeroir.UnionOperation) error { peekedX1 := c.locationStack.peek() // Compare the conditional value with zero. - c.assembler.CompileRegisterToConst(amd64.CMPQ, cv.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, cv.register, cv.register) // Now we can use c.register as temporary location. // We alias it here for readability. @@ -1295,7 +1282,7 @@ func (c *amd64Compiler) compileClz(o *wazeroir.UnionOperation) error { // First, we have to check if the target is non-zero as BSR is undefined // on zero. See https://www.felixcloutier.com/x86/bsr. - c.assembler.CompileRegisterToConst(amd64.CMPQ, target.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, target.register, target.register) jmpIfNonZero := c.assembler.CompileJump(amd64.JNE) // If the value is zero, we just push the const value. @@ -1358,7 +1345,7 @@ func (c *amd64Compiler) compileCtz(o *wazeroir.UnionOperation) error { // https://www.felixcloutier.com/x86/tzcnt.html // First we compare the target with zero. - c.assembler.CompileRegisterToConst(amd64.CMPQ, target.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, target.register, target.register) jmpIfNonZero := c.assembler.CompileJump(amd64.JNE) // If the value is zero, we just push the const value. @@ -1523,18 +1510,13 @@ func (c *amd64Compiler) performDivisionOnInts(isRem, is32Bit, signed bool) error // Check if the x2 equals zero. if is32Bit { - c.assembler.CompileRegisterToConst(amd64.CMPL, x2.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTL, x2.register, x2.register) } else { - c.assembler.CompileRegisterToConst(amd64.CMPQ, x2.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, x2.register, x2.register) } - // Jump if the divisor is not zero. - jmpIfNotZero := c.assembler.CompileJump(amd64.JNE) - - // Otherwise, we return with nativeCallStatusIntegerDivisionByZero status. - c.compileExitFromNativeCode(nativeCallStatusIntegerDivisionByZero) - - c.assembler.SetJumpTargetOnNext(jmpIfNotZero) + // Skipped if the divisor is nonzero. + c.compileMaybeExitFromNativeCode(amd64.JNE, nativeCallStatusIntegerDivisionByZero) // next, we ensure that x1 is placed on AX. x1 := c.locationStack.pop() @@ -1567,29 +1549,17 @@ func (c *amd64Compiler) performDivisionOnInts(isRem, is32Bit, signed bool) error // for divisions on (-2^31) / -1 where we do not need to emit the special branches. // For detail, please refer to https://stackoverflow.com/questions/56303282/why-idiv-with-1-causes-floating-point-exception - // First we compare the division with -1. - if is32Bit { - c.assembler.CompileRegisterToConst(amd64.CMPL, x2.register, -1) - } else { - c.assembler.CompileRegisterToConst(amd64.CMPQ, x2.register, -1) - } - - // If it doesn't equal minus one, we jump to the normal case. - okJmp := c.assembler.CompileJump(amd64.JNE) - - // Otherwise, we store zero into the remainder result register (DX). + // First we store zero into the remainder result register (DX) and compare the divisor with -1. if is32Bit { c.assembler.CompileRegisterToRegister(amd64.XORL, remainderRegister, remainderRegister) + c.assembler.CompileRegisterToConst(amd64.CMPL, x2.register, -1) } else { c.assembler.CompileRegisterToRegister(amd64.XORQ, remainderRegister, remainderRegister) + c.assembler.CompileRegisterToConst(amd64.CMPQ, x2.register, -1) } - // Emit the exit jump instruction for the divisor -1 case so - // we skips the normal case. - signedRemMinusOneDivisorJmp = c.assembler.CompileJump(amd64.JMP) - - // Set the normal case's jump target. - c.assembler.SetJumpTargetOnNext(okJmp) + // If it equals minus one, we skip the normal case. + signedRemMinusOneDivisorJmp = c.assembler.CompileJump(amd64.JEQ) } else if isSignedDiv { // For signed division, we have to have branches for "math.MinInt{32,64} / -1" // case which results in the floating point exception via division error as @@ -1606,7 +1576,7 @@ func (c *amd64Compiler) performDivisionOnInts(isRem, is32Bit, signed bool) error nonMinusOneDivisorJmp := c.assembler.CompileJump(amd64.JNE) // next we check if the quotient is the most negative value for the signed integer. - // That means whether or not we try to do (math.MaxInt32 / -1) or (math.Math.Int64 / -1) respectively. + // That means whether or not we try to do (math.MinInt32 / -1) or (math.MinInt64 / -1) respectively. if is32Bit { if err := c.assembler.CompileRegisterToStaticConst(amd64.CMPL, x1.register, c.minimum32BitSignedInt); err != nil { return err @@ -1617,17 +1587,12 @@ func (c *amd64Compiler) performDivisionOnInts(isRem, is32Bit, signed bool) error } } - // If it doesn't equal, we jump to the normal case. - jmpOK := c.assembler.CompileJump(amd64.JNE) - - // Otherwise, we are trying to do (math.MaxInt32 / -1) or (math.Math.Int64 / -1), - // and that is the overflow in division as the result becomes 2^31 which is larger than + // Trap if we are trying to do (math.MinInt32 / -1) or (math.MinInt64 / -1), + // as that is the overflow in division as the result becomes 2^31 which is larger than // the maximum of signed 32-bit int (2^31-1). - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) - + c.compileMaybeExitFromNativeCode(amd64.JNE, nativeCallStatusIntegerOverflow) // Set the normal case's jump target. c.assembler.SetJumpTargetOnNext(nonMinusOneDivisorJmp) - c.assembler.SetJumpTargetOnNext(jmpOK) } // Now ready to emit the div instruction. @@ -2196,20 +2161,18 @@ func (c *amd64Compiler) emitUnsignedI32TruncFromFloat(isFloat32Bit, nonTrapping } // Check the parity flag (set when the value is NaN), and if it is set, we should raise an exception. - jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is not set. - var nonTrappingNaNJump asm.Node - if !nonTrapping { - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidFloatToIntConversion) - } else { + if nonTrapping { + jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is not set. // In non trapping case, NaN is casted as zero. // Zero out the result register by XOR itsself. c.assembler.CompileRegisterToRegister(amd64.XORL, result, result) nonTrappingNaNJump = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) + } else { + c.compileMaybeExitFromNativeCode(amd64.JPC, nativeCallStatusCodeInvalidFloatToIntConversion) } - c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) - // Jump if the source float value is above or equal math.MaxInt32+1. jmpAboveOrEqualMaxIn32PlusOne := c.assembler.CompileJump(amd64.JCC) @@ -2222,20 +2185,19 @@ func (c *amd64Compiler) emitUnsignedI32TruncFromFloat(isFloat32Bit, nonTrapping // Then if the result is minus, it is invalid conversion from minus float (incl. -Inf). c.assembler.CompileRegisterToRegister(amd64.TESTL, result, result) - jmpIfNotMinusOrMinusInf := c.assembler.CompileJump(amd64.JPL) var nonTrappingMinusJump asm.Node - if !nonTrapping { - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) - } else { + if nonTrapping { + jmpIfNotMinusOrMinusInf := c.assembler.CompileJump(amd64.JPL) // In non trapping case, the minus value is casted as zero. // Zero out the result register by XOR itsself. c.assembler.CompileRegisterToRegister(amd64.XORL, result, result) nonTrappingMinusJump = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotMinusOrMinusInf) + } else { + c.compileMaybeExitFromNativeCode(amd64.JPL, nativeCallStatusIntegerOverflow) } - c.assembler.SetJumpTargetOnNext(jmpIfNotMinusOrMinusInf) - // Otherwise, the values is valid. okJmpForLessThanMaxInt32PlusOne := c.assembler.CompileJump(amd64.JMP) @@ -2265,7 +2227,18 @@ func (c *amd64Compiler) emitUnsignedI32TruncFromFloat(isFloat32Bit, nonTrapping c.assembler.CompileRegisterToRegister(amd64.TESTL, result, result) // If the result is minus, the conversion is invalid (from NaN or +Inf) - jmpIfPlusInf := c.assembler.CompileJump(amd64.JMI) + var nonTrappingAboveOrEqualMaxInt32PlusOne asm.Node + if nonTrapping { + jmpIfNotPlusInf := c.assembler.CompileJump(amd64.JPL) + err = c.assembler.CompileStaticConstToRegister(amd64.MOVL, c.maximum32BitUnsignedInt, result) + if err != nil { + return err + } + nonTrappingAboveOrEqualMaxInt32PlusOne = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotPlusInf) + } else { + c.compileMaybeExitFromNativeCode(amd64.JPL, nativeCallStatusIntegerOverflow) + } // Otherwise, we successfully converted the source float minus (math.MaxInt32+1) to int. // So, we retrieve the original source float value by adding the sign mask. @@ -2273,22 +2246,10 @@ func (c *amd64Compiler) emitUnsignedI32TruncFromFloat(isFloat32Bit, nonTrapping return err } - okJmpForAboveOrEqualMaxInt32PlusOne := c.assembler.CompileJump(amd64.JMP) - - c.assembler.SetJumpTargetOnNext(jmpIfPlusInf) - if !nonTrapping { - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) - } else { - err = c.assembler.CompileStaticConstToRegister(amd64.MOVL, c.maximum32BitUnsignedInt, result) - if err != nil { - return err - } - } - // We jump to the next instructions for valid cases. c.assembler.SetJumpTargetOnNext(okJmpForLessThanMaxInt32PlusOne) - c.assembler.SetJumpTargetOnNext(okJmpForAboveOrEqualMaxInt32PlusOne) if nonTrapping { + c.assembler.SetJumpTargetOnNext(nonTrappingAboveOrEqualMaxInt32PlusOne) c.assembler.SetJumpTargetOnNext(nonTrappingMinusJump) c.assembler.SetJumpTargetOnNext(nonTrappingNaNJump) } @@ -2323,20 +2284,18 @@ func (c *amd64Compiler) emitUnsignedI64TruncFromFloat(isFloat32Bit, nonTrapping } // Check the parity flag (set when the value is NaN), and if it is set, we should raise an exception. - jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is c.not set. - var nonTrappingNaNJump asm.Node - if !nonTrapping { - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidFloatToIntConversion) - } else { + if nonTrapping { + jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is c.not set. // In non trapping case, NaN is casted as zero. // Zero out the result register by XOR itsself. c.assembler.CompileRegisterToRegister(amd64.XORQ, result, result) nonTrappingNaNJump = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) + } else { + c.compileMaybeExitFromNativeCode(amd64.JPC, nativeCallStatusCodeInvalidFloatToIntConversion) } - c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) - // Jump if the source float values is above or equal math.MaxInt64+1. jmpAboveOrEqualMaxIn32PlusOne := c.assembler.CompileJump(amd64.JCC) @@ -2349,20 +2308,19 @@ func (c *amd64Compiler) emitUnsignedI64TruncFromFloat(isFloat32Bit, nonTrapping // Then if the result is minus, it is invalid conversion from minus float (incl. -Inf). c.assembler.CompileRegisterToRegister(amd64.TESTQ, result, result) - jmpIfNotMinusOrMinusInf := c.assembler.CompileJump(amd64.JPL) var nonTrappingMinusJump asm.Node - if !nonTrapping { - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) - } else { + if nonTrapping { + jmpIfNotMinusOrMinusInf := c.assembler.CompileJump(amd64.JPL) // In non trapping case, the minus value is casted as zero. // Zero out the result register by XOR itsself. c.assembler.CompileRegisterToRegister(amd64.XORQ, result, result) nonTrappingMinusJump = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotMinusOrMinusInf) + } else { + c.compileMaybeExitFromNativeCode(amd64.JPL, nativeCallStatusIntegerOverflow) } - c.assembler.SetJumpTargetOnNext(jmpIfNotMinusOrMinusInf) - // Otherwise, the values is valid. okJmpForLessThanMaxInt64PlusOne := c.assembler.CompileJump(amd64.JMP) @@ -2392,7 +2350,18 @@ func (c *amd64Compiler) emitUnsignedI64TruncFromFloat(isFloat32Bit, nonTrapping c.assembler.CompileRegisterToRegister(amd64.TESTQ, result, result) // If the result is minus, the conversion is invalid (from NaN or +Inf) - jmpIfPlusInf := c.assembler.CompileJump(amd64.JMI) + var nonTrappingAboveOrEqualMaxInt64PlusOne asm.Node + if nonTrapping { + jmpIfNotPlusInf := c.assembler.CompileJump(amd64.JPL) + err = c.assembler.CompileStaticConstToRegister(amd64.MOVQ, c.maximum64BitUnsignedInt, result) + if err != nil { + return err + } + nonTrappingAboveOrEqualMaxInt64PlusOne = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotPlusInf) + } else { + c.compileMaybeExitFromNativeCode(amd64.JPL, nativeCallStatusIntegerOverflow) + } // Otherwise, we successfully converted the the source float minus (math.MaxInt64+1) to int. // So, we retrieve the original source float value by adding the sign mask. @@ -2400,22 +2369,10 @@ func (c *amd64Compiler) emitUnsignedI64TruncFromFloat(isFloat32Bit, nonTrapping return err } - okJmpForAboveOrEqualMaxInt64PlusOne := c.assembler.CompileJump(amd64.JMP) - - c.assembler.SetJumpTargetOnNext(jmpIfPlusInf) - if !nonTrapping { - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) - } else { - err = c.assembler.CompileStaticConstToRegister(amd64.MOVQ, c.maximum64BitUnsignedInt, result) - if err != nil { - return err - } - } - // We jump to the next instructions for valid cases. c.assembler.SetJumpTargetOnNext(okJmpForLessThanMaxInt64PlusOne) - c.assembler.SetJumpTargetOnNext(okJmpForAboveOrEqualMaxInt64PlusOne) if nonTrapping { + c.assembler.SetJumpTargetOnNext(nonTrappingAboveOrEqualMaxInt64PlusOne) c.assembler.SetJumpTargetOnNext(nonTrappingMinusJump) c.assembler.SetJumpTargetOnNext(nonTrappingNaNJump) } @@ -2467,22 +2424,21 @@ func (c *amd64Compiler) emitSignedI32TruncFromFloat(isFloat32Bit, nonTrapping bo } // Check the parity flag (set when the value is NaN), and if it is set, we should raise an exception. - jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is not set. - var nontrappingNanJump asm.Node - if !nonTrapping { - // If the value is NaN, we return the function with nativeCallStatusCodeInvalidFloatToIntConversion. - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidFloatToIntConversion) - } else { + if nonTrapping { + jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is not set. // In non trapping case, NaN is casted as zero. // Zero out the result register by XOR itsself. c.assembler.CompileRegisterToRegister(amd64.XORL, result, result) nontrappingNanJump = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) + } else { + // If the value is NaN, we return the function with nativeCallStatusCodeInvalidFloatToIntConversion. + c.compileMaybeExitFromNativeCode(amd64.JPC, nativeCallStatusCodeInvalidFloatToIntConversion) } // Check if the value is larger than or equal the minimum 32-bit integer value, // meaning that the value exceeds the lower bound of 32-bit signed integer range. - c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) if isFloat32Bit { err = c.assembler.CompileStaticConstToRegister(amd64.UCOMISS, c.float32ForMinimumSigned32bitInteger, source.register) } else { @@ -2493,12 +2449,11 @@ func (c *amd64Compiler) emitSignedI32TruncFromFloat(isFloat32Bit, nonTrapping bo } if !nonTrapping { - // Jump if the value exceeds the lower bound. - var jmpIfExceedsLowerBound asm.Node + // Trap if the value does not exceed the lower bound. if isFloat32Bit { - jmpIfExceedsLowerBound = c.assembler.CompileJump(amd64.JCS) + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusIntegerOverflow) } else { - jmpIfExceedsLowerBound = c.assembler.CompileJump(amd64.JLS) + c.compileMaybeExitFromNativeCode(amd64.JHI, nativeCallStatusIntegerOverflow) } // At this point, the value is the minimum signed 32-bit int (=-2147483648.000000) or larger than 32-bit maximum. @@ -2512,14 +2467,11 @@ func (c *amd64Compiler) emitSignedI32TruncFromFloat(isFloat32Bit, nonTrapping bo return err } - jmpIfMinimumSignedInt := c.assembler.CompileJump(amd64.JCS) // jump if the value is minus (= the minimum signed 32-bit int). - - c.assembler.SetJumpTargetOnNext(jmpIfExceedsLowerBound) - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) + // Trap if the value is not minus (= the minimum signed 32-bit int). + c.compileMaybeExitFromNativeCode(amd64.JCS, nativeCallStatusIntegerOverflow) // We jump to the next instructions for valid cases. c.assembler.SetJumpTargetOnNext(okJmp) - c.assembler.SetJumpTargetOnNext(jmpIfMinimumSignedInt) } else { // Jump if the value does not exceed the lower bound. var jmpIfNotExceedsLowerBound asm.Node @@ -2605,21 +2557,20 @@ func (c *amd64Compiler) emitSignedI64TruncFromFloat(isFloat32Bit, nonTrapping bo } // Check the parity flag (set when the value is NaN), and if it is set, we should raise an exception. - jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is not set. - var nontrappingNanJump asm.Node - if !nonTrapping { - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidFloatToIntConversion) - } else { + if nonTrapping { + jmpIfNotNaN := c.assembler.CompileJump(amd64.JPC) // jump if parity is not set. // In non trapping case, NaN is casted as zero. // Zero out the result register by XOR itsself. c.assembler.CompileRegisterToRegister(amd64.XORQ, result, result) nontrappingNanJump = c.assembler.CompileJump(amd64.JMP) + c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) + } else { + c.compileMaybeExitFromNativeCode(amd64.JPC, nativeCallStatusCodeInvalidFloatToIntConversion) } // Check if the value is larger than or equal the minimum 64-bit integer value, // meaning that the value exceeds the lower bound of 64-bit signed integer range. - c.assembler.SetJumpTargetOnNext(jmpIfNotNaN) if isFloat32Bit { err = c.assembler.CompileStaticConstToRegister(amd64.UCOMISS, c.float32ForMinimumSigned64bitInteger, source.register) } else { @@ -2631,7 +2582,7 @@ func (c *amd64Compiler) emitSignedI64TruncFromFloat(isFloat32Bit, nonTrapping bo if !nonTrapping { // Jump if the value is -Inf. - jmpIfExceedsLowerBound := c.assembler.CompileJump(amd64.JCS) + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusIntegerOverflow) // At this point, the value is the minimum signed 64-bit int (=-9223372036854775808.0) or larger than 64-bit maximum. // So, check if the value equals the minimum signed 64-bit int. @@ -2644,14 +2595,11 @@ func (c *amd64Compiler) emitSignedI64TruncFromFloat(isFloat32Bit, nonTrapping bo return err } - jmpIfMinimumSignedInt := c.assembler.CompileJump(amd64.JCS) // jump if the value is minus (= the minimum signed 64-bit int). - - c.assembler.SetJumpTargetOnNext(jmpIfExceedsLowerBound) - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) + // Trap if the value is not minus (= the minimum signed 64-bit int). + c.compileMaybeExitFromNativeCode(amd64.JCS, nativeCallStatusIntegerOverflow) // We jump to the next instructions for valid cases. c.assembler.SetJumpTargetOnNext(okJmp) - c.assembler.SetJumpTargetOnNext(jmpIfMinimumSignedInt) } else { // Jump if the value is not -Inf. jmpIfNotExceedsLowerBound := c.assembler.CompileJump(amd64.JCC) @@ -3504,13 +3452,8 @@ func (c *amd64Compiler) compileMemoryAccessCeilSetup(offsetArg uint32, targetSiz c.assembler.CompileMemoryToRegister(amd64.CMPQ, amd64ReservedRegisterForCallEngine, callEngineModuleContextMemorySliceLenOffset, result) - // Jump if the value is within the memory length. - okJmp := c.assembler.CompileJump(amd64.JCC) - - // Otherwise, we exit the function with out-of-bounds status code. - c.compileExitFromNativeCode(nativeCallStatusCodeMemoryOutOfBounds) - - c.assembler.SetJumpTargetOnNext(okJmp) + // Trap if the value is out-of-bounds of memory length. + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusCodeMemoryOutOfBounds) c.locationStack.markRegisterUnused(result) return result, nil @@ -3664,9 +3607,7 @@ func (c *amd64Compiler) compileInitImpl(isTable bool, index, tableIndex uint32) c.assembler.CompileMemoryToRegister(amd64.CMPQ, instanceAddr, 8, // DataInstance and Element instance holds the length is stored at offset 8. sourceOffset.register) - sourceBoundOKJump := c.assembler.CompileJump(amd64.JCC) - c.compileExitFromNativeCode(outOfBoundsErrorStatus) - c.assembler.SetJumpTargetOnNext(sourceBoundOKJump) + c.compileMaybeExitFromNativeCode(amd64.JCC, outOfBoundsErrorStatus) // Check destination bounds and if exceeds the length, exit with out of bounds error. if isTable { @@ -3681,14 +3622,12 @@ func (c *amd64Compiler) compileInitImpl(isTable bool, index, tableIndex uint32) destinationOffset.register) } - destinationBoundOKJump := c.assembler.CompileJump(amd64.JCC) - c.compileExitFromNativeCode(outOfBoundsErrorStatus) - c.assembler.SetJumpTargetOnNext(destinationBoundOKJump) + c.compileMaybeExitFromNativeCode(amd64.JCC, outOfBoundsErrorStatus) // Otherwise, ready to copy the value from source to destination. // // If the copy size equal zero, we skip the entire instructions below. - c.assembler.CompileRegisterToConst(amd64.CMPQ, copySize.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, copySize.register, copySize.register) skipJump := c.assembler.CompileJump(amd64.JEQ) var scale int16 @@ -3782,7 +3721,7 @@ func (c *amd64Compiler) compileLoadDataInstanceAddress(dataIndex uint32, dst asm // compileCopyLoopImpl implements a REP MOVSQ memory copy for the given range with support for both directions. func (c *amd64Compiler) compileCopyLoopImpl(destinationOffset, sourceOffset, copySize *runtimeValueLocation, backwards bool, bwOffset uint8) { // skip if nothing to copy - c.assembler.CompileRegisterToConst(amd64.CMPQ, copySize.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, copySize.register, copySize.register) emptyEightGroupsJump := c.assembler.CompileJump(amd64.JEQ) // Prepare registers for swaps. There will never be more than 3 XCHGs in total. @@ -3894,23 +3833,18 @@ func (c *amd64Compiler) compileMemoryCopy() error { c.assembler.CompileRegisterToRegister(amd64.ADDQ, copySize.register, sourceOffset.register) // destinationOffset += size. c.assembler.CompileRegisterToRegister(amd64.ADDQ, copySize.register, destinationOffset.register) + // tmp = max(sourceOffset, destinationOffset). + c.assembler.CompileRegisterToRegister(amd64.CMPQ, sourceOffset.register, destinationOffset.register) + c.assembler.CompileRegisterToRegister(amd64.MOVQ, sourceOffset.register, tmp) + c.assembler.CompileRegisterToRegister(amd64.CMOVQCS, destinationOffset.register, tmp) - // Check source bounds and if exceeds the length, exit with out of bounds error. - c.assembler.CompileMemoryToRegister(amd64.CMPQ, - amd64ReservedRegisterForCallEngine, callEngineModuleContextMemorySliceLenOffset, sourceOffset.register) - sourceBoundOKJump := c.assembler.CompileJump(amd64.JCC) - c.compileExitFromNativeCode(nativeCallStatusCodeMemoryOutOfBounds) - c.assembler.SetJumpTargetOnNext(sourceBoundOKJump) - - // Check destination bounds and if exceeds the length, exit with out of bounds error. + // Check maximum bounds and if exceeds the length, exit with out of bounds error. c.assembler.CompileMemoryToRegister(amd64.CMPQ, - amd64ReservedRegisterForCallEngine, callEngineModuleContextMemorySliceLenOffset, destinationOffset.register) - destinationBoundOKJump := c.assembler.CompileJump(amd64.JCC) - c.compileExitFromNativeCode(nativeCallStatusCodeMemoryOutOfBounds) - c.assembler.SetJumpTargetOnNext(destinationBoundOKJump) + amd64ReservedRegisterForCallEngine, callEngineModuleContextMemorySliceLenOffset, tmp) + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusCodeMemoryOutOfBounds) // Skip zero size. - c.assembler.CompileRegisterToConst(amd64.CMPQ, copySize.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, copySize.register, copySize.register) skipJump := c.assembler.CompileJump(amd64.JEQ) // If dest < source, we can copy forwards @@ -3943,7 +3877,7 @@ func (c *amd64Compiler) compileMemoryCopy() error { // compileFillLoopImpl implements a REP STOSQ fill loop. func (c *amd64Compiler) compileFillLoopImpl(destinationOffset, value, fillSize *runtimeValueLocation, tmp asm.Register, replicateByte bool) { // Skip if nothing to fill. - c.assembler.CompileRegisterToConst(amd64.CMPQ, fillSize.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, fillSize.register, fillSize.register) emptyEightGroupsJump := c.assembler.CompileJump(amd64.JEQ) if replicateByte { @@ -4028,18 +3962,16 @@ func (c *amd64Compiler) compileFillImpl(isTable bool, tableIndex uint32) error { amd64ReservedRegisterForCallEngine, callEngineModuleContextMemorySliceLenOffset, destinationOffset.register) } - destinationBoundOKJump := c.assembler.CompileJump(amd64.JCC) if isTable { - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusCodeInvalidTableAccess) } else { - c.compileExitFromNativeCode(nativeCallStatusCodeMemoryOutOfBounds) + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusCodeMemoryOutOfBounds) } - c.assembler.SetJumpTargetOnNext(destinationBoundOKJump) // Otherwise, ready to copy the value from source to destination. // // If the copy size equal zero, we skip the entire instructions below. - c.assembler.CompileRegisterToConst(amd64.CMPQ, copySize.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, copySize.register, copySize.register) skipJump := c.assembler.CompileJump(amd64.JEQ) // destinationOffset -= size. @@ -4055,7 +3987,7 @@ func (c *amd64Compiler) compileFillImpl(isTable bool, tableIndex uint32) error { // destinationOffset += memory buffer's absolute address. c.assembler.CompileRegisterToRegister(amd64.ADDQ, amd64ReservedRegisterForMemory, destinationOffset.register) - // Copy first %15 bytes with simple MOVB instruction. + // Copy first % 16 bytes with simple MOVB instruction. beginCopyLoop := c.assembler.CompileStandAlone(amd64.NOP) c.assembler.CompileConstToRegister(amd64.TESTQ, 15, copySize.register) breakLoop := c.assembler.CompileJump(amd64.JEQ) @@ -4154,20 +4086,16 @@ func (c *amd64Compiler) compileTableCopy(o *wazeroir.UnionOperation) error { c.assembler.CompileMemoryToRegister(amd64.MOVQ, amd64ReservedRegisterForCallEngine, callEngineModuleContextTablesElement0AddressOffset, tmp) c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(srcTableIndex*8), tmp) c.assembler.CompileMemoryToRegister(amd64.CMPQ, tmp, tableInstanceTableLenOffset, sourceOffset.register) - sourceBoundOKJump := c.assembler.CompileJump(amd64.JCC) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(sourceBoundOKJump) + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusCodeInvalidTableAccess) // Check destination bounds and if exceeds the length, exit with out of bounds error. c.assembler.CompileMemoryToRegister(amd64.MOVQ, amd64ReservedRegisterForCallEngine, callEngineModuleContextTablesElement0AddressOffset, tmp) c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(dstTableIndex*8), tmp) c.assembler.CompileMemoryToRegister(amd64.CMPQ, tmp, tableInstanceTableLenOffset, destinationOffset.register) - destinationBoundOKJump := c.assembler.CompileJump(amd64.JCC) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(destinationBoundOKJump) + c.compileMaybeExitFromNativeCode(amd64.JCC, nativeCallStatusCodeInvalidTableAccess) // Skip zero size. - c.assembler.CompileRegisterToConst(amd64.CMPQ, copySize.register, 0) + c.assembler.CompileRegisterToRegister(amd64.TESTQ, copySize.register, copySize.register) skipJump := c.assembler.CompileJump(amd64.JEQ) // If dest < source, we can copy forwards. @@ -4257,9 +4185,7 @@ func (c *amd64Compiler) compileTableGet(o *wazeroir.UnionOperation) error { // Out of bounds check. c.assembler.CompileMemoryToRegister(amd64.CMPQ, ref, tableInstanceTableLenOffset, offset.register) - boundOKJmp := c.assembler.CompileJump(amd64.JHI) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(boundOKJmp) + c.compileMaybeExitFromNativeCode(amd64.JHI, nativeCallStatusCodeInvalidTableAccess) // ref = [&tables[TableIndex] + tableInstanceTableOffset] = &tables[TableIndex].References[0] c.assembler.CompileMemoryToRegister(amd64.MOVQ, ref, tableInstanceTableOffset, ref) @@ -4307,9 +4233,7 @@ func (c *amd64Compiler) compileTableSet(o *wazeroir.UnionOperation) error { // Out of bounds check. c.assembler.CompileMemoryToRegister(amd64.CMPQ, tmp, tableInstanceTableLenOffset, offset.register) - boundOKJmp := c.assembler.CompileJump(amd64.JHI) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(boundOKJmp) + c.compileMaybeExitFromNativeCode(amd64.JHI, nativeCallStatusCodeInvalidTableAccess) // tmp = [&tables[TableIndex] + tableInstanceTableOffset] = &tables[TableIndex].References[0] c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, tableInstanceTableOffset, tmp) @@ -4789,11 +4713,7 @@ func (c *amd64Compiler) compileReturnFunction() error { ) c.assembler.CompileRegisterToRegister(amd64.TESTQ, returnAddressRegister, returnAddressRegister) - jmpIfNotReturn := c.assembler.CompileJump(amd64.JNE) - c.compileExitFromNativeCode(nativeCallStatusCodeReturned) - - // Otherwise, we return to the caller. - c.assembler.SetJumpTargetOnNext(jmpIfNotReturn) + c.compileMaybeExitFromNativeCode(amd64.JNE, nativeCallStatusCodeReturned) // Alias for readability. tmpRegister := amd64CallingConventionDestinationFunctionModuleInstanceAddressRegister @@ -4896,16 +4816,52 @@ func (c *amd64Compiler) compileReleaseRegisterToStack(loc *runtimeValueLocation) } } -func (c *amd64Compiler) compileExitFromNativeCode(status nativeCallStatusCode) { - c.assembler.CompileConstToMemory(amd64.MOVB, int64(status), - amd64ReservedRegisterForCallEngine, callEngineExitContextNativeCallStatusCodeOffset) +func (c *amd64Compiler) compileMaybeExitFromNativeCode(skipCondition asm.Instruction, status nativeCallStatusCode) { + if target := c.compiledTrapTargets[status]; target != nil { + // We've already compiled this. + // Invert the return condition to jump into the appropriate target. + var returnCondition asm.Instruction + switch skipCondition { + case amd64.JHI: + returnCondition = amd64.JLS + case amd64.JLS: + returnCondition = amd64.JHI + case amd64.JNE: + returnCondition = amd64.JEQ + case amd64.JEQ: + returnCondition = amd64.JNE + case amd64.JCC: + returnCondition = amd64.JCS + case amd64.JCS: + returnCondition = amd64.JCC + case amd64.JPC: + returnCondition = amd64.JPS + case amd64.JPS: + returnCondition = amd64.JPC + case amd64.JPL: + returnCondition = amd64.JMI + case amd64.JMI: + returnCondition = amd64.JPL + default: + panic("BUG: couldn't invert condition") + } + c.assembler.CompileJump(returnCondition).AssignJumpTarget(target) + } else { + skip := c.assembler.CompileJump(skipCondition) + c.compileExitFromNativeCode(status) + c.assembler.SetJumpTargetOnNext(skip) + } +} - // Write back the cached SP to the actual eng.stackPointer. - c.assembler.CompileConstToMemory(amd64.MOVQ, int64(c.locationStack.sp), - amd64ReservedRegisterForCallEngine, callEngineStackContextStackPointerOffset) +func (c *amd64Compiler) compileExitFromNativeCode(status nativeCallStatusCode) { + if target := c.compiledTrapTargets[status]; target != nil { + c.assembler.CompileJump(amd64.JMP).AssignJumpTarget(target) + } switch status { case nativeCallStatusCodeReturned: + // Save the target for reuse. + c.compiledTrapTargets[status] = c.compileNOP() case nativeCallStatusCodeCallGoHostFunction, nativeCallStatusCodeCallBuiltInFunction: // Read the return address, and write it to callEngine.exitContext.returnAddress. returnAddressReg, ok := c.locationStack.takeFreeRegister(registerTypeGeneralPurpose) @@ -4916,14 +4872,27 @@ func (c *amd64Compiler) compileExitFromNativeCode(status nativeCallStatusCode) { c.assembler.CompileRegisterToMemory(amd64.MOVQ, returnAddressReg, amd64ReservedRegisterForCallEngine, callEngineExitContextReturnAddressOffset) default: - // This case, the execution traps, so take tmpReg and store the instruction address onto callEngine.returnAddress - // so that the stack trace can contain the top frame's source position. - tmpReg := amd64.RegR15 - c.assembler.CompileReadInstructionAddress(tmpReg, amd64.MOVQ) - c.assembler.CompileRegisterToMemory(amd64.MOVQ, - tmpReg, amd64ReservedRegisterForCallEngine, callEngineExitContextReturnAddressOffset) + if c.ir.IROperationSourceOffsetsInWasmBinary != nil { + // This case, the execution traps and we want the top frame's source position in the stack trace. + // Take RegR15 and store the instruction address onto callEngine.returnAddress. + returnAddressReg := amd64.RegR15 + c.assembler.CompileReadInstructionAddress(returnAddressReg, amd64.MOVQ) + c.assembler.CompileRegisterToMemory(amd64.MOVQ, + returnAddressReg, amd64ReservedRegisterForCallEngine, callEngineExitContextReturnAddressOffset) + } else { + // We won't use the source position, so just save the target for reuse. + c.compiledTrapTargets[status] = c.compileNOP() + } } + // Write the status to callEngine.exitContext.statusCode. + c.assembler.CompileConstToMemory(amd64.MOVB, int64(status), + amd64ReservedRegisterForCallEngine, callEngineExitContextNativeCallStatusCodeOffset) + + // Write back the cached SP to the actual eng.stackPointer. + c.assembler.CompileConstToMemory(amd64.MOVQ, int64(c.locationStack.sp), + amd64ReservedRegisterForCallEngine, callEngineStackContextStackPointerOffset) + c.assembler.CompileStandAlone(amd64.RET) } diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_arm64.go b/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_arm64.go index c306f07b7e..af02481885 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_arm64.go +++ b/vendor/github.com/tetratelabs/wazero/internal/engine/compiler/impl_arm64.go @@ -27,6 +27,7 @@ type arm64Compiler struct { stackPointerCeil uint64 // assignStackPointerCeilNeeded holds an asm.Node whose AssignDestinationConstant must be called with the determined stack pointer ceiling. assignStackPointerCeilNeeded asm.Node + compiledTrapTargets [nativeCallStatusModuleClosed]asm.Node withListener bool typ *wasm.FunctionType br *bytes.Reader @@ -350,13 +351,9 @@ func (c *arm64Compiler) compileReturnFunction() error { c.compileLoadValueOnStackToRegister(returnAddress) c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64ReservedRegisterForTemporary, arm64.RegRZR) - // Br if the address does not equal zero. - brIfNotEqual := c.assembler.CompileJump(arm64.BCONDNE) - // Otherwise, exit. - c.compileExitFromNativeCode(nativeCallStatusCodeReturned) - + // Br if the address does not equal zero, otherwise, exit. // If the address doesn't equal zero, return br into returnAddressRegister (caller's return address). - c.assembler.SetJumpTargetOnNext(brIfNotEqual) + c.compileMaybeExitFromNativeCode(arm64.BCONDNE, nativeCallStatusCodeReturned) // Alias for readability. tmp := arm64CallingConventionModuleInstanceAddressRegister @@ -382,13 +379,77 @@ func (c *arm64Compiler) compileReturnFunction() error { return nil } +func (c *arm64Compiler) compileMaybeExitFromNativeCode(skipCondition asm.Instruction, status nativeCallStatusCode) { + if target := c.compiledTrapTargets[status]; target != nil { + // We've already compiled this. + // Invert the condition to jump into the appropriate target. + var trapCondition asm.Instruction + switch skipCondition { + case arm64.BCONDEQ: + trapCondition = arm64.BCONDNE + case arm64.BCONDNE: + trapCondition = arm64.BCONDEQ + case arm64.BCONDLO: + trapCondition = arm64.BCONDHS + case arm64.BCONDHS: + trapCondition = arm64.BCONDLO + case arm64.BCONDLS: + trapCondition = arm64.BCONDHI + case arm64.BCONDHI: + trapCondition = arm64.BCONDLS + case arm64.BCONDVS: + trapCondition = arm64.BCONDVC + case arm64.BCONDVC: + trapCondition = arm64.BCONDVS + default: + panic("BUG: couldn't invert condition") + } + c.assembler.CompileJump(trapCondition).AssignJumpTarget(target) + } else { + skip := c.assembler.CompileJump(skipCondition) + c.compileExitFromNativeCode(status) + c.assembler.SetJumpTargetOnNext(skip) + } +} + // compileExitFromNativeCode adds instructions to give the control back to ce.exec with the given status code. func (c *arm64Compiler) compileExitFromNativeCode(status nativeCallStatusCode) { + if target := c.compiledTrapTargets[status]; target != nil { + c.assembler.CompileJump(arm64.B).AssignJumpTarget(target) + } + + switch status { + case nativeCallStatusCodeReturned: + // Save the target for reuse. + c.compiledTrapTargets[status] = c.compileNOP() + case nativeCallStatusCodeCallGoHostFunction, nativeCallStatusCodeCallBuiltInFunction: + // Read the return address, and write it to callEngine.exitContext.returnAddress. + c.assembler.CompileReadInstructionAddress(arm64ReservedRegisterForTemporary, arm64.RET) + c.assembler.CompileRegisterToMemory( + arm64.STRD, arm64ReservedRegisterForTemporary, + arm64ReservedRegisterForCallEngine, callEngineExitContextReturnAddressOffset, + ) + default: + if c.ir.IROperationSourceOffsetsInWasmBinary != nil { + // This case, the execution traps, and we want the top frame's source position in the stack trace. + // We store the instruction address onto callEngine.returnAddress. + c.assembler.CompileReadInstructionAddress(arm64ReservedRegisterForTemporary, arm64.STRD) + c.assembler.CompileRegisterToMemory( + arm64.STRD, arm64ReservedRegisterForTemporary, + arm64ReservedRegisterForCallEngine, callEngineExitContextReturnAddressOffset, + ) + } else { + // We won't use the source position, so just save the target for reuse. + c.compiledTrapTargets[status] = c.compileNOP() + } + } + // Write the current stack pointer to the ce.stackPointer. c.assembler.CompileConstToRegister(arm64.MOVD, int64(c.locationStack.sp), arm64ReservedRegisterForTemporary) c.assembler.CompileRegisterToMemory(arm64.STRD, arm64ReservedRegisterForTemporary, arm64ReservedRegisterForCallEngine, callEngineStackContextStackPointerOffset) + // Write the status to callEngine.exitContext.statusCode. if status != 0 { c.assembler.CompileConstToRegister(arm64.MOVW, int64(status), arm64ReservedRegisterForTemporary) c.assembler.CompileRegisterToMemory(arm64.STRW, arm64ReservedRegisterForTemporary, @@ -399,25 +460,6 @@ func (c *arm64Compiler) compileExitFromNativeCode(status nativeCallStatusCode) { arm64ReservedRegisterForCallEngine, callEngineExitContextNativeCallStatusCodeOffset) } - switch status { - case nativeCallStatusCodeReturned: - case nativeCallStatusCodeCallGoHostFunction, nativeCallStatusCodeCallBuiltInFunction: - // Read the return address, and write it to callEngine.exitContext.returnAddress. - c.assembler.CompileReadInstructionAddress(arm64ReservedRegisterForTemporary, arm64.RET) - c.assembler.CompileRegisterToMemory( - arm64.STRD, arm64ReservedRegisterForTemporary, - arm64ReservedRegisterForCallEngine, callEngineExitContextReturnAddressOffset, - ) - default: - // This case, the execution traps, store the instruction address onto callEngine.returnAddress - // so that the stack trace can contain the top frame's source position. - c.assembler.CompileReadInstructionAddress(arm64ReservedRegisterForTemporary, arm64.STRD) - c.assembler.CompileRegisterToMemory( - arm64.STRD, arm64ReservedRegisterForTemporary, - arm64ReservedRegisterForCallEngine, callEngineExitContextReturnAddressOffset, - ) - } - // The return address to the Go code is stored in archContext.compilerReturnAddress which // is embedded in ce. We load the value to the tmpRegister, and then // invoke RET with that register. @@ -1163,12 +1205,9 @@ func (c *arm64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) (err err // "cmp tmp2, offset" c.assembler.CompileTwoRegistersToNone(arm64.CMP, tmp2, offsetReg) - // If it exceeds len(table), we exit the execution. - brIfOffsetOK := c.assembler.CompileJump(arm64.BCONDLO) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - + // If it exceeds len(table), we trap. + c.compileMaybeExitFromNativeCode(arm64.BCONDLO, nativeCallStatusCodeInvalidTableAccess) // Otherwise, we proceed to do function type check. - c.assembler.SetJumpTargetOnNext(brIfOffsetOK) // We need to obtain the absolute address of table element. // "tmp = &Tables[tableIndex].table[0]" @@ -1192,10 +1231,10 @@ func (c *arm64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) (err err // Check if the value of table[offset] equals zero, meaning that the target element is uninitialized. c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64.RegRZR, offsetReg) - brIfInitialized := c.assembler.CompileJump(arm64.BCONDNE) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(brIfInitialized) + // Skipped if the target is initialized. + c.compileMaybeExitFromNativeCode(arm64.BCONDNE, nativeCallStatusCodeInvalidTableAccess) + // next we check the type matches, i.e. table[offset].source.TypeID == targetFunctionType. // "tmp = table[offset].typeID" c.assembler.CompileMemoryToRegister( @@ -1211,10 +1250,8 @@ func (c *arm64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) (err err // Compare these two values, and if they equal, we are ready to make function call. c.assembler.CompileTwoRegistersToNone(arm64.CMPW, tmp, tmp2) - brIfTypeMatched := c.assembler.CompileJump(arm64.BCONDEQ) - c.compileExitFromNativeCode(nativeCallStatusCodeTypeMismatchOnIndirectCall) - - c.assembler.SetJumpTargetOnNext(brIfTypeMatched) + // Skipped if the type matches. + c.compileMaybeExitFromNativeCode(arm64.BCONDEQ, nativeCallStatusCodeTypeMismatchOnIndirectCall) targetFunctionType := &c.ir.Types[typeIndex] if err := c.compileCallImpl(offsetReg, targetFunctionType); err != nil { @@ -1720,11 +1757,8 @@ func (c *arm64Compiler) compileIntegerDivPrecheck(is32Bit, isSigned bool, divide c.assembler.CompileTwoRegistersToNone(cmpInst, arm64.RegRZR, divisor) // If it is zero, we exit with nativeCallStatusIntegerDivisionByZero. - brIfDivisorNonZero := c.assembler.CompileJump(arm64.BCONDNE) - c.compileExitFromNativeCode(nativeCallStatusIntegerDivisionByZero) - + c.compileMaybeExitFromNativeCode(arm64.BCONDNE, nativeCallStatusIntegerDivisionByZero) // Otherwise, we proceed. - c.assembler.SetJumpTargetOnNext(brIfDivisorNonZero) // If the operation is a signed integer div, we have to do an additional check on overflow. if isSigned { @@ -1747,13 +1781,10 @@ func (c *arm64Compiler) compileIntegerDivPrecheck(is32Bit, isSigned bool, divide c.assembler.CompileTwoRegistersToNone(cmpInst, arm64ReservedRegisterForTemporary, dividend) // If they not equal, we are safe to execute the division. - brIfDividendNotMinInt := c.assembler.CompileJump(arm64.BCONDNE) - // Otherwise, we raise overflow error. - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) + c.compileMaybeExitFromNativeCode(arm64.BCONDNE, nativeCallStatusIntegerOverflow) c.assembler.SetJumpTargetOnNext(brIfDivisorNonMinusOne) - c.assembler.SetJumpTargetOnNext(brIfDividendNotMinInt) } return nil } @@ -1802,11 +1833,8 @@ func (c *arm64Compiler) compileRem(o *wazeroir.UnionOperation) error { c.assembler.CompileTwoRegistersToNone(cmpInst, arm64.RegRZR, divisorReg) // If it is zero, we exit with nativeCallStatusIntegerDivisionByZero. - brIfDivisorNonZero := c.assembler.CompileJump(arm64.BCONDNE) - c.compileExitFromNativeCode(nativeCallStatusIntegerDivisionByZero) - + c.compileMaybeExitFromNativeCode(arm64.BCONDNE, nativeCallStatusIntegerDivisionByZero) // Otherwise, we proceed. - c.assembler.SetJumpTargetOnNext(brIfDivisorNonZero) // Temporarily mark them used to allocate a result register while keeping these values. c.markRegisterUsed(dividend.register, divisor.register) @@ -2256,13 +2284,10 @@ func (c *arm64Compiler) compileITruncFromF(o *wazeroir.UnionOperation) error { c.assembler.CompileTwoRegistersToNone(floatcmp, sourceReg, sourceReg) // VS flag is set if at least one of values for FCMP is NaN. // https://developer.arm.com/documentation/dui0801/g/Condition-Codes/Comparison-of-condition-code-meanings-in-integer-and-floating-point-code - brIfSourceNaN := c.assembler.CompileJump(arm64.BCONDVS) - // If the source value is not NaN, the operation was overflow. - c.compileExitFromNativeCode(nativeCallStatusIntegerOverflow) + c.compileMaybeExitFromNativeCode(arm64.BCONDVS, nativeCallStatusIntegerOverflow) // Otherwise, the operation was invalid as this is trying to convert NaN to integer. - c.assembler.SetJumpTargetOnNext(brIfSourceNaN) c.compileExitFromNativeCode(nativeCallStatusCodeInvalidFloatToIntConversion) // Otherwise, we branch into the next instruction. @@ -2822,7 +2847,7 @@ func (c *arm64Compiler) compileStoreImpl(offsetArg uint32, storeInst asm.Instruc return nil } -// compileMemoryAccessOffsetSetup pops the top value from the stack (called "base"), stores "base + offsetArg + targetSizeInBytes" +// compileMemoryAccessOffsetSetup pops the top value from the stack (called "base"), stores "base + offsetArg" // into a register, and returns the stored register. We call the result "offset" because we access the memory // as memory.Buffer[offset: offset+targetSizeInBytes]. // @@ -2859,14 +2884,12 @@ func (c *arm64Compiler) compileMemoryAccessOffsetSetup(offsetArg uint32, targetS // Check if offsetRegister(= base+offsetArg+targetSizeInBytes) > len(memory.Buffer). c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64ReservedRegisterForTemporary, offsetRegister) - boundsOK := c.assembler.CompileJump(arm64.BCONDLS) // If offsetRegister(= base+offsetArg+targetSizeInBytes) exceeds the memory length, // we exit the function with nativeCallStatusCodeMemoryOutOfBounds. - c.compileExitFromNativeCode(nativeCallStatusCodeMemoryOutOfBounds) + c.compileMaybeExitFromNativeCode(arm64.BCONDLS, nativeCallStatusCodeMemoryOutOfBounds) // Otherwise, we subtract targetSizeInBytes from offsetRegister. - c.assembler.SetJumpTargetOnNext(boundsOK) c.assembler.CompileConstToRegister(arm64.SUB, targetSizeInBytes, offsetRegister) return offsetRegister, nil } @@ -3124,13 +3147,10 @@ func (c *arm64Compiler) compileInitImpl(isTable bool, index, tableIndex uint32) arm64ReservedRegisterForTemporary) c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64ReservedRegisterForTemporary, sourceOffset.register) - sourceBoundsOK := c.assembler.CompileJump(arm64.BCONDLS) - // If not, raise out of bounds memory access error. - c.compileExitFromNativeCode(outOfBoundsErrorStatus) - - c.assembler.SetJumpTargetOnNext(sourceBoundsOK) + c.compileMaybeExitFromNativeCode(arm64.BCONDLS, outOfBoundsErrorStatus) + // Otherwise, ready to copy the value from destination to source. // Check destination bounds. if isTable { // arm64ReservedRegisterForTemporary = &tables[0] @@ -3154,14 +3174,10 @@ func (c *arm64Compiler) compileInitImpl(isTable bool, index, tableIndex uint32) } c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64ReservedRegisterForTemporary, destinationOffset.register) - destinationBoundsOK := c.assembler.CompileJump(arm64.BCONDLS) - // If not, raise out of bounds memory access error. - c.compileExitFromNativeCode(outOfBoundsErrorStatus) + c.compileMaybeExitFromNativeCode(arm64.BCONDLS, outOfBoundsErrorStatus) // Otherwise, ready to copy the value from source to destination. - c.assembler.SetJumpTargetOnNext(destinationBoundsOK) - if !isZeroRegister(copySize.register) { // If the size equals zero, we can skip the entire instructions beflow. c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64.RegRZR, copySize.register) @@ -3345,12 +3361,8 @@ func (c *arm64Compiler) compileCopyImpl(isTable bool, srcTableIndex, dstTableInd // Check memory len >= sourceOffset. c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64ReservedRegisterForTemporary, sourceOffset.register) - sourceBoundsOK := c.assembler.CompileJump(arm64.BCONDLS) - // If not, raise out of bounds memory access error. - c.compileExitFromNativeCode(outOfBoundsErrorStatus) - - c.assembler.SetJumpTargetOnNext(sourceBoundsOK) + c.compileMaybeExitFromNativeCode(arm64.BCONDLS, outOfBoundsErrorStatus) // Otherwise, check memory len >= destinationOffset. if isTable { @@ -3371,14 +3383,10 @@ func (c *arm64Compiler) compileCopyImpl(isTable bool, srcTableIndex, dstTableInd } c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64ReservedRegisterForTemporary, destinationOffset.register) - destinationBoundsOK := c.assembler.CompileJump(arm64.BCONDLS) - // If not, raise out of bounds memory access error. - c.compileExitFromNativeCode(outOfBoundsErrorStatus) + c.compileMaybeExitFromNativeCode(arm64.BCONDLS, outOfBoundsErrorStatus) // Otherwise, ready to copy the value from source to destination. - c.assembler.SetJumpTargetOnNext(destinationBoundsOK) - var ldr, str asm.Instruction var movSize int64 if isTable { @@ -3545,6 +3553,11 @@ func (c *arm64Compiler) compileMemoryFill() error { // TODO: the compiled code in this function should be reused and compile at once as // the code is independent of any module. func (c *arm64Compiler) compileFillImpl(isTable bool, tableIndex uint32) error { + outOfBoundsErrorStatus := nativeCallStatusCodeMemoryOutOfBounds + if isTable { + outOfBoundsErrorStatus = nativeCallStatusCodeInvalidTableAccess + } + fillSize, err := c.popValueOnRegister() if err != nil { return err @@ -3597,19 +3610,12 @@ func (c *arm64Compiler) compileFillImpl(isTable bool, tableIndex uint32) error { // Check len >= destinationOffset. c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64ReservedRegisterForTemporary, destinationOffset.register) - destinationBoundsOK := c.assembler.CompileJump(arm64.BCONDLS) // If not, raise the runtime error. - if isTable { - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - } else { - c.compileExitFromNativeCode(nativeCallStatusCodeMemoryOutOfBounds) - } + c.compileMaybeExitFromNativeCode(arm64.BCONDLS, outOfBoundsErrorStatus) // Otherwise, ready to copy the value from destination to source. - c.assembler.SetJumpTargetOnNext(destinationBoundsOK) - - // If the size equals zero, we can skip the entire instructions beflow. + // If the size equals zero, we can skip the entire instructions below. c.assembler.CompileTwoRegistersToNone(arm64.CMP, arm64.RegRZR, fillSize.register) skipCopyJump := c.assembler.CompileJump(arm64.BCONDEQ) @@ -3781,9 +3787,7 @@ func (c *arm64Compiler) compileTableGet(o *wazeroir.UnionOperation) error { c.assembler.CompileTwoRegistersToNone(arm64.CMP, ref, offset.register) // If it exceeds len(table), we exit the execution. - brIfBoundsOK := c.assembler.CompileJump(arm64.BCONDLO) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(brIfBoundsOK) + c.compileMaybeExitFromNativeCode(arm64.BCONDLO, nativeCallStatusCodeInvalidTableAccess) // ref = [&tables[TableIndex] + tableInstanceTableOffset] = &tables[TableIndex].References[0] c.assembler.CompileMemoryToRegister(arm64.LDRD, @@ -3843,9 +3847,7 @@ func (c *arm64Compiler) compileTableSet(o *wazeroir.UnionOperation) error { c.assembler.CompileTwoRegistersToNone(arm64.CMP, tmp, offset.register) // If it exceeds len(table), we exit the execution. - brIfBoundsOK := c.assembler.CompileJump(arm64.BCONDLO) - c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) - c.assembler.SetJumpTargetOnNext(brIfBoundsOK) + c.compileMaybeExitFromNativeCode(arm64.BCONDLO, nativeCallStatusCodeInvalidTableAccess) // tmp = [&tables[TableIndex] + tableInstanceTableOffset] = &tables[TableIndex].References[0] c.assembler.CompileMemoryToRegister(arm64.LDRD, diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_test.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_test.go index 36909681b6..3f10c61269 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_test.go +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_test.go @@ -28,7 +28,7 @@ var ( emptyFile = "empty.txt" ) -func TestFileSetNonblock(t *testing.T) { +func TestStdioFileSetNonblock(t *testing.T) { // Test using os.Pipe as it is known to support non-blocking reads. r, w, err := os.Pipe() require.NoError(t, err) @@ -47,6 +47,54 @@ func TestFileSetNonblock(t *testing.T) { require.False(t, rF.IsNonblock()) } +func TestRegularFileSetNonblock(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Nonblock on regular files is not supported on Windows") + } + + // Test using os.Pipe as it is known to support non-blocking reads. + r, w, err := os.Pipe() + require.NoError(t, err) + defer r.Close() + defer w.Close() + + rF := newOsFile("", syscall.O_RDONLY, 0, r) + + errno := rF.SetNonblock(true) + require.EqualErrno(t, 0, errno) + require.True(t, rF.IsNonblock()) + + // Read from the file without ever writing to it should not block. + buf := make([]byte, 8) + _, e := rF.Read(buf) + require.EqualErrno(t, syscall.EAGAIN, e) + + errno = rF.SetNonblock(false) + require.EqualErrno(t, 0, errno) + require.False(t, rF.IsNonblock()) +} + +func TestReadFdNonblock(t *testing.T) { + // Test using os.Pipe as it is known to support non-blocking reads. + r, w, err := os.Pipe() + require.NoError(t, err) + defer r.Close() + defer w.Close() + + fd := r.Fd() + err = setNonblock(fd, true) + require.NoError(t, err) + + // Read from the file without ever writing to it should not block. + buf := make([]byte, 8) + _, e := readFd(fd, buf) + if runtime.GOOS == "windows" { + require.EqualErrno(t, syscall.ENOSYS, e) + } else { + require.EqualErrno(t, syscall.EAGAIN, e) + } +} + func TestFileSetAppend(t *testing.T) { tmpDir := t.TempDir() diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_unix.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_unix.go new file mode 100644 index 0000000000..e451df820b --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_unix.go @@ -0,0 +1,21 @@ +//go:build unix || darwin || linux + +package sysfs + +import ( + "syscall" + + "github.com/tetratelabs/wazero/internal/platform" +) + +const NonBlockingFileIoSupported = true + +// readFd exposes syscall.Read. +func readFd(fd uintptr, buf []byte) (int, syscall.Errno) { + if len(buf) == 0 { + return 0, 0 // Short-circuit 0-len reads. + } + n, err := syscall.Read(int(fd), buf) + errno := platform.UnwrapOSError(err) + return n, errno +} diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_unsupported.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_unsupported.go new file mode 100644 index 0000000000..cb4bddb339 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/file_unsupported.go @@ -0,0 +1,12 @@ +//go:build !unix && !linux && !darwin + +package sysfs + +import "syscall" + +const NonBlockingFileIoSupported = false + +// readFd returns ENOSYS on unsupported platforms. +func readFd(fd uintptr, buf []byte) (int, syscall.Errno) { + return -1, syscall.ENOSYS +} diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/open_file_windows.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/open_file_windows.go index 0800dc7bad..d9297d7e8b 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/sysfs/open_file_windows.go +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/open_file_windows.go @@ -13,7 +13,7 @@ import ( func newOsFile(openPath string, openFlag int, openPerm fs.FileMode, f *os.File) fsapi.File { return &windowsOsFile{ - osFile: osFile{path: openPath, flag: openFlag, perm: openPerm, file: f}, + osFile: osFile{path: openPath, flag: openFlag, perm: openPerm, file: f, fd: f.Fd()}, } } diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/osfile.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/osfile.go index e919a23380..95798f4820 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/sysfs/osfile.go +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/osfile.go @@ -12,7 +12,7 @@ import ( ) func newDefaultOsFile(openPath string, openFlag int, openPerm fs.FileMode, f *os.File) fsapi.File { - return &osFile{path: openPath, flag: openFlag, perm: openPerm, file: f} + return &osFile{path: openPath, flag: openFlag, perm: openPerm, file: f, fd: f.Fd()} } // osFile is a file opened with this package, and uses os.File or syscalls to @@ -22,6 +22,7 @@ type osFile struct { flag int perm fs.FileMode file *os.File + fd uintptr // closed is true when closed was called. This ensures proper syscall.EBADF closed bool @@ -92,7 +93,7 @@ func (f *osFile) SetNonblock(enable bool) (errno syscall.Errno) { } else { f.flag &= ^fsapi.O_NONBLOCK } - if err := setNonblock(f.file.Fd(), enable); err != nil { + if err := setNonblock(f.fd, enable); err != nil { return fileError(f, f.closed, platform.UnwrapOSError(err)) } return 0 @@ -126,7 +127,15 @@ func (f *osFile) Stat() (fsapi.Stat_t, syscall.Errno) { // Read implements the same method as documented on fsapi.File func (f *osFile) Read(buf []byte) (n int, errno syscall.Errno) { - if n, errno = read(f.file, buf); errno != 0 { + if len(buf) == 0 { + return 0, 0 // Short-circuit 0-len reads. + } + if NonBlockingFileIoSupported && f.IsNonblock() { + n, errno = readFd(f.fd, buf) + } else { + n, errno = read(f.file, buf) + } + if errno != 0 { // Defer validation overhead until we've already had an error. errno = fileError(f, f.closed, errno) } @@ -160,7 +169,7 @@ func (f *osFile) Seek(offset int64, whence int) (newOffset int64, errno syscall. // PollRead implements the same method as documented on fsapi.File func (f *osFile) PollRead(timeout *time.Duration) (ready bool, errno syscall.Errno) { fdSet := platform.FdSet{} - fd := int(f.file.Fd()) + fd := int(f.fd) fdSet.Set(fd) nfds := fd + 1 // See https://man7.org/linux/man-pages/man2/select.2.html#:~:text=condition%20has%20occurred.-,nfds,-This%20argument%20should count, err := _select(nfds, &fdSet, nil, nil, timeout) @@ -232,7 +241,7 @@ func (f *osFile) Chown(uid, gid int) syscall.Errno { return syscall.EBADF } - return fchown(f.file.Fd(), uid, gid) + return fchown(f.fd, uid, gid) } // Utimens implements the same method as documented on fsapi.File @@ -241,7 +250,7 @@ func (f *osFile) Utimens(times *[2]syscall.Timespec) syscall.Errno { return syscall.EBADF } - err := futimens(f.file.Fd(), times) + err := futimens(f.fd, times) return platform.UnwrapOSError(err) } diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock.go index 6a23f15552..62bef426fb 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock.go +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock.go @@ -6,149 +6,32 @@ import ( "syscall" "github.com/tetratelabs/wazero/internal/fsapi" - "github.com/tetratelabs/wazero/internal/platform" socketapi "github.com/tetratelabs/wazero/internal/sock" ) +// NewTCPListenerFile creates a socketapi.TCPSock for a given *net.TCPListener. func NewTCPListenerFile(tl *net.TCPListener) socketapi.TCPSock { - return &tcpListenerFile{tl: tl} + return newTCPListenerFile(tl) } -var _ socketapi.TCPSock = (*tcpListenerFile)(nil) - -type tcpListenerFile struct { +// baseSockFile implements base behavior for all TCPSock, TCPConn files, +// regardless the platform. +type baseSockFile struct { fsapi.UnimplementedFile - - tl *net.TCPListener } -// Accept implements the same method as documented on socketapi.TCPSock -func (f *tcpListenerFile) Accept() (socketapi.TCPConn, syscall.Errno) { - conn, err := f.tl.Accept() - if err != nil { - return nil, platform.UnwrapOSError(err) - } - return &tcpConnFile{tc: conn.(*net.TCPConn)}, 0 -} +var _ fsapi.File = (*baseSockFile)(nil) // IsDir implements the same method as documented on File.IsDir -func (*tcpListenerFile) IsDir() (bool, syscall.Errno) { +func (*baseSockFile) IsDir() (bool, syscall.Errno) { // We need to override this method because WASI-libc prestats the FD // and the default impl returns ENOSYS otherwise. return false, 0 } // Stat implements the same method as documented on File.Stat -func (f *tcpListenerFile) Stat() (fs fsapi.Stat_t, errno syscall.Errno) { +func (f *baseSockFile) Stat() (fs fsapi.Stat_t, errno syscall.Errno) { // The mode is not really important, but it should be neither a regular file nor a directory. fs.Mode = os.ModeIrregular return } - -// Close implements the same method as documented on fsapi.File -func (f *tcpListenerFile) Close() syscall.Errno { - return platform.UnwrapOSError(f.tl.Close()) -} - -// Addr is exposed for testing. -func (f *tcpListenerFile) Addr() *net.TCPAddr { - return f.tl.Addr().(*net.TCPAddr) -} - -var _ socketapi.TCPConn = (*tcpConnFile)(nil) - -type tcpConnFile struct { - fsapi.UnimplementedFile - - tc *net.TCPConn - - // closed is true when closed was called. This ensures proper syscall.EBADF - closed bool -} - -// IsDir implements the same method as documented on File.IsDir -func (*tcpConnFile) IsDir() (bool, syscall.Errno) { - // We need to override this method because WASI-libc prestats the FD - // and the default impl returns ENOSYS otherwise. - return false, 0 -} - -// Stat implements the same method as documented on File.Stat -func (f *tcpConnFile) Stat() (fs fsapi.Stat_t, errno syscall.Errno) { - // The mode is not really important, but it should be neither a regular file nor a directory. - fs.Mode = os.ModeIrregular - return -} - -// SetNonblock implements the same method as documented on fsapi.File -func (f *tcpConnFile) SetNonblock(enabled bool) (errno syscall.Errno) { - syscallConn, err := f.tc.SyscallConn() - if err != nil { - return platform.UnwrapOSError(err) - } - - // Prioritize the error from setNonblock over Control - if controlErr := syscallConn.Control(func(fd uintptr) { - errno = platform.UnwrapOSError(setNonblock(fd, enabled)) - }); errno == 0 { - errno = platform.UnwrapOSError(controlErr) - } - return -} - -// Read implements the same method as documented on fsapi.File -func (f *tcpConnFile) Read(buf []byte) (n int, errno syscall.Errno) { - if n, errno = read(f.tc, buf); errno != 0 { - // Defer validation overhead until we've already had an error. - errno = fileError(f, f.closed, errno) - } - return -} - -// Write implements the same method as documented on fsapi.File -func (f *tcpConnFile) Write(buf []byte) (n int, errno syscall.Errno) { - if n, errno = write(f.tc, buf); errno != 0 { - // Defer validation overhead until we've alwritey had an error. - errno = fileError(f, f.closed, errno) - } - return -} - -// Recvfrom implements the same method as documented on socketapi.TCPConn -func (f *tcpConnFile) Recvfrom(p []byte, flags int) (n int, errno syscall.Errno) { - if flags != MSG_PEEK { - errno = syscall.EINVAL - return - } - return recvfromPeek(f.tc, p) -} - -// Shutdown implements the same method as documented on fsapi.Conn -func (f *tcpConnFile) Shutdown(how int) syscall.Errno { - // FIXME: can userland shutdown listeners? - var err error - switch how { - case syscall.SHUT_RD: - err = f.tc.CloseRead() - case syscall.SHUT_WR: - err = f.tc.CloseWrite() - case syscall.SHUT_RDWR: - return f.close() - default: - return syscall.EINVAL - } - return platform.UnwrapOSError(err) -} - -// Close implements the same method as documented on fsapi.File -func (f *tcpConnFile) Close() syscall.Errno { - return f.close() -} - -func (f *tcpConnFile) close() syscall.Errno { - if f.closed { - return 0 - } - f.closed = true - return f.Shutdown(syscall.SHUT_RDWR) -} diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unix.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unix.go index 1c59af488e..aa3d3bb59d 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unix.go +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unix.go @@ -7,24 +7,149 @@ import ( "syscall" "github.com/tetratelabs/wazero/internal/platform" + socketapi "github.com/tetratelabs/wazero/internal/sock" ) +// MSG_PEEK is the constant syscall.MSG_PEEK const MSG_PEEK = syscall.MSG_PEEK -// recvfromPeek exposes syscall.Recvfrom with flag MSG_PEEK on POSIX systems. -func recvfromPeek(conn *net.TCPConn, p []byte) (n int, errno syscall.Errno) { - syscallConn, err := conn.SyscallConn() +// newTCPListenerFile is a constructor for a socketapi.TCPSock. +// +// Note: the implementation of socketapi.TCPSock goes straight +// to the syscall layer, bypassing most of the Go library. +// For an alternative approach, consider winTcpListenerFile +// where most APIs are implemented with regular Go std-lib calls. +func newTCPListenerFile(tl *net.TCPListener) socketapi.TCPSock { + conn, err := tl.File() if err != nil { - return 0, platform.UnwrapOSError(err) + panic(err) } + fd := conn.Fd() + // We need to duplicate this file handle, or the lifecycle will be tied + // to the TCPListener. We rely on the TCPListener only to set up + // the connection correctly and parse/resolve the TCP Address + // (notice we actually rely on the listener in the Windows implementation). + sysfd, err := syscall.Dup(int(fd)) + if err != nil { + panic(err) + } + return &tcpListenerFile{fd: uintptr(sysfd), addr: tl.Addr().(*net.TCPAddr)} +} + +var _ socketapi.TCPSock = (*tcpListenerFile)(nil) + +type tcpListenerFile struct { + baseSockFile + + fd uintptr + addr *net.TCPAddr +} + +// Accept implements the same method as documented on socketapi.TCPSock +func (f *tcpListenerFile) Accept() (socketapi.TCPConn, syscall.Errno) { + nfd, _, err := syscall.Accept(int(f.fd)) + errno := platform.UnwrapOSError(err) + if errno != 0 { + return nil, errno + } + return &tcpConnFile{fd: uintptr(nfd)}, 0 +} + +// SetNonblock implements the same method as documented on fsapi.File +func (f *tcpListenerFile) SetNonblock(enabled bool) syscall.Errno { + return platform.UnwrapOSError(setNonblock(f.fd, enabled)) +} + +// Close implements the same method as documented on fsapi.File +func (f *tcpListenerFile) Close() syscall.Errno { + return platform.UnwrapOSError(syscall.Close(int(f.fd))) +} + +// Addr is exposed for testing. +func (f *tcpListenerFile) Addr() *net.TCPAddr { + return f.addr +} + +var _ socketapi.TCPConn = (*tcpConnFile)(nil) + +type tcpConnFile struct { + baseSockFile + + fd uintptr + + // closed is true when closed was called. This ensures proper syscall.EBADF + closed bool +} + +func newTcpConn(tc *net.TCPConn) socketapi.TCPConn { + f, err := tc.File() + if err != nil { + panic(err) + } + return &tcpConnFile{fd: f.Fd()} +} + +// SetNonblock implements the same method as documented on fsapi.File +func (f *tcpConnFile) SetNonblock(enabled bool) (errno syscall.Errno) { + return platform.UnwrapOSError(setNonblock(f.fd, enabled)) +} + +// Read implements the same method as documented on fsapi.File +func (f *tcpConnFile) Read(buf []byte) (n int, errno syscall.Errno) { + n, err := syscall.Read(int(f.fd), buf) + if err != nil { + // Defer validation overhead until we've already had an error. + errno = platform.UnwrapOSError(err) + errno = fileError(f, f.closed, errno) + } + return n, errno +} + +// Write implements the same method as documented on fsapi.File +func (f *tcpConnFile) Write(buf []byte) (n int, errno syscall.Errno) { + n, err := syscall.Write(int(f.fd), buf) + if err != nil { + // Defer validation overhead until we've already had an error. + errno = platform.UnwrapOSError(err) + errno = fileError(f, f.closed, errno) + } + return n, errno +} + +// Recvfrom implements the same method as documented on socketapi.TCPConn +func (f *tcpConnFile) Recvfrom(p []byte, flags int) (n int, errno syscall.Errno) { + if flags != MSG_PEEK { + errno = syscall.EINVAL + return + } + n, _, recvfromErr := syscall.Recvfrom(int(f.fd), p, MSG_PEEK) + errno = platform.UnwrapOSError(recvfromErr) + return n, errno +} + +// Shutdown implements the same method as documented on fsapi.Conn +func (f *tcpConnFile) Shutdown(how int) syscall.Errno { + var err error + switch how { + case syscall.SHUT_RD, syscall.SHUT_WR: + err = syscall.Shutdown(int(f.fd), how) + case syscall.SHUT_RDWR: + return f.close() + default: + return syscall.EINVAL + } + return platform.UnwrapOSError(err) +} + +// Close implements the same method as documented on fsapi.File +func (f *tcpConnFile) Close() syscall.Errno { + return f.close() +} - // Prioritize the error from Recvfrom over Control - if controlErr := syscallConn.Control(func(fd uintptr) { - var recvfromErr error - n, _, recvfromErr = syscall.Recvfrom(int(fd), p, MSG_PEEK) - errno = platform.UnwrapOSError(recvfromErr) - }); errno == 0 { - errno = platform.UnwrapOSError(controlErr) +func (f *tcpConnFile) close() syscall.Errno { + if f.closed { + return 0 } - return + f.closed = true + return platform.UnwrapOSError(syscall.Shutdown(int(f.fd), syscall.SHUT_RDWR)) } diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unsupported.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unsupported.go index 5105d0a704..57e8eb10a2 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unsupported.go +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_unsupported.go @@ -5,11 +5,22 @@ package sysfs import ( "net" "syscall" + + socketapi "github.com/tetratelabs/wazero/internal/sock" ) -// MSG_PEEK is a filler value +// MSG_PEEK is a filler value. const MSG_PEEK = 0x2 -func recvfromPeek(conn *net.TCPConn, p []byte) (n int, errno syscall.Errno) { - return 0, syscall.ENOSYS +func newTCPListenerFile(tl *net.TCPListener) socketapi.TCPSock { + return &unsupportedSockFile{} +} + +type unsupportedSockFile struct { + baseSockFile +} + +// Accept implements the same method as documented on socketapi.TCPSock +func (f *unsupportedSockFile) Accept() (socketapi.TCPConn, syscall.Errno) { + return nil, syscall.ENOSYS } diff --git a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_windows.go b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_windows.go index 1ff680c66f..9f3b46913a 100644 --- a/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_windows.go +++ b/vendor/github.com/tetratelabs/wazero/internal/sysfs/sock_windows.go @@ -8,31 +8,13 @@ import ( "unsafe" "github.com/tetratelabs/wazero/internal/platform" + socketapi "github.com/tetratelabs/wazero/internal/sock" ) // MSG_PEEK is the flag PEEK for syscall.Recvfrom on Windows. // This constant is not exported on this platform. const MSG_PEEK = 0x2 -// recvfromPeek exposes syscall.Recvfrom with flag MSG_PEEK on Windows. -func recvfromPeek(conn *net.TCPConn, p []byte) (n int, errno syscall.Errno) { - syscallConn, err := conn.SyscallConn() - if err != nil { - errno = platform.UnwrapOSError(err) - return - } - - // Prioritize the error from recvfrom over Control - if controlErr := syscallConn.Control(func(fd uintptr) { - var recvfromErr error - n, recvfromErr = recvfrom(syscall.Handle(fd), p, MSG_PEEK) - errno = platform.UnwrapOSError(recvfromErr) - }); errno == 0 { - errno = platform.UnwrapOSError(controlErr) - } - return -} - var ( // modws2_32 is WinSock. modws2_32 = syscall.NewLazyDLL("ws2_32.dll") @@ -61,3 +43,150 @@ func recvfrom(s syscall.Handle, buf []byte, flags int32) (n int, errno syscall.E 0) // fromlen *int (optional) return int(r0), e1 } + +// newTCPListenerFile is a constructor for a socketapi.TCPSock. +// +// Note: currently the Windows implementation of socketapi.TCPSock +// returns a winTcpListenerFile, which is a specialized TCPSock +// that delegates to a .net.TCPListener. +// The current strategy is to delegate most behavior to the Go +// standard library, instead of invoke syscalls/Win32 APIs +// because they are sensibly different from Unix's. +func newTCPListenerFile(tl *net.TCPListener) socketapi.TCPSock { + return &winTcpListenerFile{tl: tl} +} + +var _ socketapi.TCPSock = (*winTcpListenerFile)(nil) + +type winTcpListenerFile struct { + baseSockFile + + tl *net.TCPListener +} + +// Accept implements the same method as documented on socketapi.TCPSock +func (f *winTcpListenerFile) Accept() (socketapi.TCPConn, syscall.Errno) { + conn, err := f.tl.Accept() + if err != nil { + return nil, platform.UnwrapOSError(err) + } + return &winTcpConnFile{tc: conn.(*net.TCPConn)}, 0 +} + +// SetNonblock implements the same method as documented on fsapi.File +func (f *winTcpListenerFile) SetNonblock(enabled bool) syscall.Errno { + return 0 // setNonblock() is a no-op on Windows +} + +// Close implements the same method as documented on fsapi.File +func (f *winTcpListenerFile) Close() syscall.Errno { + return platform.UnwrapOSError(f.tl.Close()) +} + +// Addr is exposed for testing. +func (f *winTcpListenerFile) Addr() *net.TCPAddr { + return f.tl.Addr().(*net.TCPAddr) +} + +var _ socketapi.TCPConn = (*winTcpConnFile)(nil) + +type winTcpConnFile struct { + baseSockFile + + tc *net.TCPConn + + // closed is true when closed was called. This ensures proper syscall.EBADF + closed bool +} + +func newTcpConn(tc *net.TCPConn) socketapi.TCPConn { + return &winTcpConnFile{tc: tc} +} + +// SetNonblock implements the same method as documented on fsapi.File +func (f *winTcpConnFile) SetNonblock(enabled bool) (errno syscall.Errno) { + syscallConn, err := f.tc.SyscallConn() + if err != nil { + return platform.UnwrapOSError(err) + } + + // Prioritize the error from setNonblock over Control + if controlErr := syscallConn.Control(func(fd uintptr) { + errno = platform.UnwrapOSError(setNonblock(fd, enabled)) + }); errno == 0 { + errno = platform.UnwrapOSError(controlErr) + } + return +} + +// Read implements the same method as documented on fsapi.File +func (f *winTcpConnFile) Read(buf []byte) (n int, errno syscall.Errno) { + if n, errno = read(f.tc, buf); errno != 0 { + // Defer validation overhead until we've already had an error. + errno = fileError(f, f.closed, errno) + } + return +} + +// Write implements the same method as documented on fsapi.File +func (f *winTcpConnFile) Write(buf []byte) (n int, errno syscall.Errno) { + if n, errno = write(f.tc, buf); errno != 0 { + // Defer validation overhead until we've already had an error. + errno = fileError(f, f.closed, errno) + } + return +} + +// Recvfrom implements the same method as documented on socketapi.TCPConn +func (f *winTcpConnFile) Recvfrom(p []byte, flags int) (n int, errno syscall.Errno) { + if flags != MSG_PEEK { + errno = syscall.EINVAL + return + } + conn := f.tc + syscallConn, err := conn.SyscallConn() + if err != nil { + errno = platform.UnwrapOSError(err) + return + } + + // Prioritize the error from recvfrom over Control + if controlErr := syscallConn.Control(func(fd uintptr) { + var recvfromErr error + n, recvfromErr = recvfrom(syscall.Handle(fd), p, MSG_PEEK) + errno = platform.UnwrapOSError(recvfromErr) + }); errno == 0 { + errno = platform.UnwrapOSError(controlErr) + } + return +} + +// Shutdown implements the same method as documented on fsapi.Conn +func (f *winTcpConnFile) Shutdown(how int) syscall.Errno { + // FIXME: can userland shutdown listeners? + var err error + switch how { + case syscall.SHUT_RD: + err = f.tc.CloseRead() + case syscall.SHUT_WR: + err = f.tc.CloseWrite() + case syscall.SHUT_RDWR: + return f.close() + default: + return syscall.EINVAL + } + return platform.UnwrapOSError(err) +} + +// Close implements the same method as documented on fsapi.File +func (f *winTcpConnFile) Close() syscall.Errno { + return f.close() +} + +func (f *winTcpConnFile) close() syscall.Errno { + if f.closed { + return 0 + } + f.closed = true + return f.Shutdown(syscall.SHUT_RDWR) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b21fdb63d9..5d8fefb64d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -414,7 +414,7 @@ github.com/spf13/pflag # github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert -# github.com/tetratelabs/wazero v1.2.0 +# github.com/tetratelabs/wazero v1.2.1 ## explicit; go 1.18 github.com/tetratelabs/wazero github.com/tetratelabs/wazero/api