mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 03:42:25 -05:00 
			
		
		
		
	update go-ffmpreg to v0.2.5 (pulls in latest tetratelabs/wazero) (#3203)
This commit is contained in:
		
					parent
					
						
							
								6fe96a5611
							
						
					
				
			
			
				commit
				
					
						09f24e0446
					
				
			
		
					 75 changed files with 1772 additions and 1913 deletions
				
			
		
							
								
								
									
										4
									
								
								go.mod
									
										
									
									
									
								
							
							
						
						
									
										4
									
								
								go.mod
									
										
									
									
									
								
							|  | @ -12,7 +12,7 @@ require ( | ||||||
| 	codeberg.org/gruf/go-debug v1.3.0 | 	codeberg.org/gruf/go-debug v1.3.0 | ||||||
| 	codeberg.org/gruf/go-errors/v2 v2.3.2 | 	codeberg.org/gruf/go-errors/v2 v2.3.2 | ||||||
| 	codeberg.org/gruf/go-fastcopy v1.1.3 | 	codeberg.org/gruf/go-fastcopy v1.1.3 | ||||||
| 	codeberg.org/gruf/go-ffmpreg v0.2.4 | 	codeberg.org/gruf/go-ffmpreg v0.2.5 | ||||||
| 	codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf | 	codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf | ||||||
| 	codeberg.org/gruf/go-kv v1.6.4 | 	codeberg.org/gruf/go-kv v1.6.4 | ||||||
| 	codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f | 	codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f | ||||||
|  | @ -56,7 +56,7 @@ require ( | ||||||
| 	github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 | 	github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 | ||||||
| 	github.com/tdewolff/minify/v2 v2.20.37 | 	github.com/tdewolff/minify/v2 v2.20.37 | ||||||
| 	github.com/technologize/otel-go-contrib v1.1.1 | 	github.com/technologize/otel-go-contrib v1.1.1 | ||||||
| 	github.com/tetratelabs/wazero v1.7.3 | 	github.com/tetratelabs/wazero v1.8.0 | ||||||
| 	github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 | 	github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 | ||||||
| 	github.com/ulule/limiter/v3 v3.11.2 | 	github.com/ulule/limiter/v3 v3.11.2 | ||||||
| 	github.com/uptrace/bun v1.2.1 | 	github.com/uptrace/bun v1.2.1 | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								go.sum
									
										
									
									
									
								
							
							
						
						
									
										8
									
								
								go.sum
									
										
									
									
									
								
							|  | @ -52,8 +52,8 @@ codeberg.org/gruf/go-fastcopy v1.1.3 h1:Jo9VTQjI6KYimlw25PPc7YLA3Xm+XMQhaHwKnM7x | ||||||
| codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s= | codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s= | ||||||
| codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0= | codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0= | ||||||
| codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q= | codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q= | ||||||
| codeberg.org/gruf/go-ffmpreg v0.2.4 h1:9NR0a5a0RjiIpyQgsqmHen6oadABADv04BWt7dr9kuE= | codeberg.org/gruf/go-ffmpreg v0.2.5 h1:suQJ8VdWLkqUhDhHJEdOHMdLqvPisUxcAJiKrxjc6KQ= | ||||||
| codeberg.org/gruf/go-ffmpreg v0.2.4/go.mod h1:oPMfBkOK7xmR/teT/dKW6SeMFpRos9ceR/OuUrxBfcQ= | codeberg.org/gruf/go-ffmpreg v0.2.5/go.mod h1:sViRI0BYK2B8PJw4BrOg7DquPD71mZjDfffRAFcDtvk= | ||||||
| codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A= | codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A= | ||||||
| codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk= | codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk= | ||||||
| codeberg.org/gruf/go-kv v1.6.4 h1:3NZiW8HVdBM3kpOiLb7XfRiihnzZWMAixdCznguhILk= | codeberg.org/gruf/go-kv v1.6.4 h1:3NZiW8HVdBM3kpOiLb7XfRiihnzZWMAixdCznguhILk= | ||||||
|  | @ -552,8 +552,8 @@ github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739 h1:IkjBCtQOOjIn03 | ||||||
| github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8= | github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8= | ||||||
| github.com/technologize/otel-go-contrib v1.1.1 h1:wZH9aSPNWZWIkEh3vfaKfMb15AJ80jJ1aVj/4GZdqIw= | github.com/technologize/otel-go-contrib v1.1.1 h1:wZH9aSPNWZWIkEh3vfaKfMb15AJ80jJ1aVj/4GZdqIw= | ||||||
| github.com/technologize/otel-go-contrib v1.1.1/go.mod h1:dCN/wj2WyUO8aFZFdIN+6tfJHImjTML/8r2YVYAy3So= | github.com/technologize/otel-go-contrib v1.1.1/go.mod h1:dCN/wj2WyUO8aFZFdIN+6tfJHImjTML/8r2YVYAy3So= | ||||||
| github.com/tetratelabs/wazero v1.7.3 h1:PBH5KVahrt3S2AHgEjKu4u+LlDbbk+nsGE3KLucy6Rw= | github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= | ||||||
| github.com/tetratelabs/wazero v1.7.3/go.mod h1:ytl6Zuh20R/eROuyDaGPkp82O9C/DJfXAwJfQ3X6/7Y= | github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= | ||||||
| github.com/tidwall/btree v0.0.0-20191029221954-400434d76274 h1:G6Z6HvJuPjG6XfNGi/feOATzeJrfgTNJY+rGrHbA04E= | github.com/tidwall/btree v0.0.0-20191029221954-400434d76274 h1:G6Z6HvJuPjG6XfNGi/feOATzeJrfgTNJY+rGrHbA04E= | ||||||
| github.com/tidwall/btree v0.0.0-20191029221954-400434d76274/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8= | github.com/tidwall/btree v0.0.0-20191029221954-400434d76274/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8= | ||||||
| github.com/tidwall/buntdb v1.1.2 h1:noCrqQXL9EKMtcdwJcmuVKSEjqu1ua99RHHgbLTEHRo= | github.com/tidwall/buntdb v1.1.2 h1:noCrqQXL9EKMtcdwJcmuVKSEjqu1ua99RHHgbLTEHRo= | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								vendor/codeberg.org/gruf/go-ffmpreg/wasm/instance.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/codeberg.org/gruf/go-ffmpreg/wasm/instance.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -10,6 +10,10 @@ import ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type Args struct { | type Args struct { | ||||||
|  | 	// Optional further module configuration function. | ||||||
|  | 	// (e.g. to mount filesystem dir, set env vars, etc). | ||||||
|  | 	Config func(wazero.ModuleConfig) wazero.ModuleConfig | ||||||
|  | 
 | ||||||
| 	// Standard FDs. | 	// Standard FDs. | ||||||
| 	Stdin  io.Reader | 	Stdin  io.Reader | ||||||
| 	Stdout io.Writer | 	Stdout io.Writer | ||||||
|  | @ -17,10 +21,6 @@ type Args struct { | ||||||
| 
 | 
 | ||||||
| 	// CLI args. | 	// CLI args. | ||||||
| 	Args []string | 	Args []string | ||||||
| 
 |  | ||||||
| 	// Optional further module configuration function. |  | ||||||
| 	// (e.g. to mount filesystem dir, set env vars, etc). |  | ||||||
| 	Config func(wazero.ModuleConfig) wazero.ModuleConfig |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type Instantiator struct { | type Instantiator struct { | ||||||
|  |  | ||||||
							
								
								
									
										35
									
								
								vendor/github.com/tetratelabs/wazero/Makefile
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										35
									
								
								vendor/github.com/tetratelabs/wazero/Makefile
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,7 +1,7 @@ | ||||||
| 
 | 
 | ||||||
| gofumpt       := mvdan.cc/gofumpt@v0.5.0 | gofumpt       := mvdan.cc/gofumpt@v0.6.0 | ||||||
| gosimports    := github.com/rinchsan/gosimports/cmd/gosimports@v0.3.8 | gosimports    := github.com/rinchsan/gosimports/cmd/gosimports@v0.3.8 | ||||||
| golangci_lint := github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2 | golangci_lint := github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.0 | ||||||
| asmfmt        := github.com/klauspost/asmfmt/cmd/asmfmt@v1.3.2 | asmfmt        := github.com/klauspost/asmfmt/cmd/asmfmt@v1.3.2 | ||||||
| # sync this with netlify.toml!
 | # sync this with netlify.toml!
 | ||||||
| hugo          := github.com/gohugoio/hugo@v0.115.2 | hugo          := github.com/gohugoio/hugo@v0.115.2 | ||||||
|  | @ -20,22 +20,6 @@ main_packages := $(sort $(foreach f,$(dir $(main_sources)),$(if $(findstring ./, | ||||||
| 
 | 
 | ||||||
| go_test_options ?= -timeout 300s | go_test_options ?= -timeout 300s | ||||||
| 
 | 
 | ||||||
| ensureCompilerFastest := -ldflags '-X github.com/tetratelabs/wazero/internal/integration_test/vs.ensureCompilerFastest=true' |  | ||||||
| .PHONY: bench |  | ||||||
| bench: |  | ||||||
| 	@go build ./internal/integration_test/bench/... |  | ||||||
| 	@# Don't use -test.benchmem as it isn't accurate when comparing against CGO libs |  | ||||||
| 	@for d in vs/time vs/wasmedge vs/wasmtime ; do \
 |  | ||||||
| 		cd ./internal/integration_test/$$d ; \
 |  | ||||||
| 		go test -bench=. . -tags='wasmedge' $(ensureCompilerFastest) ; \
 |  | ||||||
| 		cd - ;\
 |  | ||||||
| 	done |  | ||||||
| 
 |  | ||||||
| bench_testdata_dir := internal/integration_test/bench/testdata |  | ||||||
| .PHONY: build.bench |  | ||||||
| build.bench: |  | ||||||
| 	@tinygo build -o $(bench_testdata_dir)/case.wasm -scheduler=none --no-debug -target=wasi $(bench_testdata_dir)/case.go |  | ||||||
| 
 |  | ||||||
| .PHONY: test.examples | .PHONY: test.examples | ||||||
| test.examples: | test.examples: | ||||||
| 	@go test $(go_test_options) ./examples/... ./imports/assemblyscript/example/... ./imports/emscripten/... ./imports/wasi_snapshot_preview1/example/... | 	@go test $(go_test_options) ./examples/... ./imports/assemblyscript/example/... ./imports/emscripten/... ./imports/wasi_snapshot_preview1/example/... | ||||||
|  | @ -183,7 +167,7 @@ build.spectest.threads: | ||||||
| 
 | 
 | ||||||
| .PHONY: test | .PHONY: test | ||||||
| test: | test: | ||||||
| 	@go test $(go_test_options) $$(go list ./... | grep -vE '$(spectest_v1_dir)|$(spectest_v2_dir)') | 	@go test $(go_test_options) ./... | ||||||
| 	@cd internal/version/testdata && go test $(go_test_options) ./... | 	@cd internal/version/testdata && go test $(go_test_options) ./... | ||||||
| 	@cd internal/integration_test/fuzz/wazerolib && CGO_ENABLED=0 WASM_BINARY_PATH=testdata/test.wasm go test ./... | 	@cd internal/integration_test/fuzz/wazerolib && CGO_ENABLED=0 WASM_BINARY_PATH=testdata/test.wasm go test ./... | ||||||
| 
 | 
 | ||||||
|  | @ -194,17 +178,6 @@ coverage: ## Generate test coverage | ||||||
| 	@go test -coverprofile=coverage.txt -covermode=atomic --coverpkg=$(coverpkg) $(main_packages) | 	@go test -coverprofile=coverage.txt -covermode=atomic --coverpkg=$(coverpkg) $(main_packages) | ||||||
| 	@go tool cover -func coverage.txt | 	@go tool cover -func coverage.txt | ||||||
| 
 | 
 | ||||||
| .PHONY: spectest |  | ||||||
| spectest: |  | ||||||
| 	@$(MAKE) spectest.v1 |  | ||||||
| 	@$(MAKE) spectest.v2 |  | ||||||
| 
 |  | ||||||
| spectest.v1: |  | ||||||
| 	@go test $(go_test_options) $$(go list ./... | grep $(spectest_v1_dir)) |  | ||||||
| 
 |  | ||||||
| spectest.v2: |  | ||||||
| 	@go test $(go_test_options) $$(go list ./... | grep $(spectest_v2_dir)) |  | ||||||
| 
 |  | ||||||
| golangci_lint_path := $(shell go env GOPATH)/bin/golangci-lint | golangci_lint_path := $(shell go env GOPATH)/bin/golangci-lint | ||||||
| 
 | 
 | ||||||
| $(golangci_lint_path): | $(golangci_lint_path): | ||||||
|  | @ -214,7 +187,7 @@ golangci_lint_goarch ?= $(shell go env GOARCH) | ||||||
| 
 | 
 | ||||||
| .PHONY: lint | .PHONY: lint | ||||||
| lint: $(golangci_lint_path) | lint: $(golangci_lint_path) | ||||||
| 	@GOARCH=$(golangci_lint_goarch) CGO_ENABLED=0 $(golangci_lint_path) run --timeout 5m | 	@GOARCH=$(golangci_lint_goarch) CGO_ENABLED=0 $(golangci_lint_path) run --timeout 5m -E testableexamples | ||||||
| 
 | 
 | ||||||
| .PHONY: format | .PHONY: format | ||||||
| format: | format: | ||||||
|  |  | ||||||
							
								
								
									
										2
									
								
								vendor/github.com/tetratelabs/wazero/README.md
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/tetratelabs/wazero/README.md
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,6 +1,6 @@ | ||||||
| # wazero: the zero dependency WebAssembly runtime for Go developers | # wazero: the zero dependency WebAssembly runtime for Go developers | ||||||
| 
 | 
 | ||||||
| [](https://github.com/tetratelabs/wazero/actions/workflows/spectest.yaml) [](https://pkg.go.dev/github.com/tetratelabs/wazero) [](https://opensource.org/licenses/Apache-2.0) | [](https://pkg.go.dev/github.com/tetratelabs/wazero) [](https://opensource.org/licenses/Apache-2.0) | ||||||
| 
 | 
 | ||||||
| WebAssembly is a way to safely run code compiled in other languages. Runtimes | WebAssembly is a way to safely run code compiled in other languages. Runtimes | ||||||
| execute WebAssembly Modules (Wasm), which are most often binaries with a `.wasm` | execute WebAssembly Modules (Wasm), which are most often binaries with a `.wasm` | ||||||
|  |  | ||||||
							
								
								
									
										6
									
								
								vendor/github.com/tetratelabs/wazero/api/wasm.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/tetratelabs/wazero/api/wasm.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -151,9 +151,13 @@ type Module interface { | ||||||
| 
 | 
 | ||||||
| 	// ExportedFunction returns a function exported from this module or nil if it wasn't. | 	// ExportedFunction returns a function exported from this module or nil if it wasn't. | ||||||
| 	// | 	// | ||||||
| 	// Note: The default wazero.ModuleConfig attempts to invoke `_start`, which | 	// # Notes | ||||||
|  | 	//   - The default wazero.ModuleConfig attempts to invoke `_start`, which | ||||||
| 	//     in rare cases can close the module. When in doubt, check IsClosed prior | 	//     in rare cases can close the module. When in doubt, check IsClosed prior | ||||||
| 	//     to invoking a function export after instantiation. | 	//     to invoking a function export after instantiation. | ||||||
|  | 	//   - The semantics of host functions assumes the existence of an "importing module" because, for example, the host function needs access to | ||||||
|  | 	//     the memory of the importing module. Therefore, direct use of ExportedFunction is forbidden for host modules. | ||||||
|  | 	//     Practically speaking, it is usually meaningless to directly call a host function from Go code as it is already somewhere in Go code. | ||||||
| 	ExportedFunction(name string) Function | 	ExportedFunction(name string) Function | ||||||
| 
 | 
 | ||||||
| 	// ExportedFunctionDefinitions returns all the exported function | 	// ExportedFunctionDefinitions returns all the exported function | ||||||
|  |  | ||||||
							
								
								
									
										17
									
								
								vendor/github.com/tetratelabs/wazero/builder.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/tetratelabs/wazero/builder.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -179,6 +179,9 @@ type HostFunctionBuilder interface { | ||||||
| //     are deferred until Compile. | //     are deferred until Compile. | ||||||
| //   - Functions are indexed in order of calls to NewFunctionBuilder as | //   - Functions are indexed in order of calls to NewFunctionBuilder as | ||||||
| //     insertion ordering is needed by ABI such as Emscripten (invoke_*). | //     insertion ordering is needed by ABI such as Emscripten (invoke_*). | ||||||
|  | //   - The semantics of host functions assumes the existence of an "importing module" because, for example, the host function needs access to | ||||||
|  | //     the memory of the importing module. Therefore, direct use of ExportedFunction is forbidden for host modules. | ||||||
|  | //     Practically speaking, it is usually meaningless to directly call a host function from Go code as it is already somewhere in Go code. | ||||||
| type HostModuleBuilder interface { | type HostModuleBuilder interface { | ||||||
| 	// Note: until golang/go#5860, we can't use example tests to embed code in interface godocs. | 	// Note: until golang/go#5860, we can't use example tests to embed code in interface godocs. | ||||||
| 
 | 
 | ||||||
|  | @ -341,12 +344,24 @@ func (b *hostModuleBuilder) Compile(ctx context.Context) (CompiledModule, error) | ||||||
| 	return c, nil | 	return c, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // hostModuleInstance is a wrapper around api.Module that prevents calling ExportedFunction. | ||||||
|  | type hostModuleInstance struct{ api.Module } | ||||||
|  | 
 | ||||||
|  | // ExportedFunction implements api.Module ExportedFunction. | ||||||
|  | func (h hostModuleInstance) ExportedFunction(name string) api.Function { | ||||||
|  | 	panic("calling ExportedFunction is forbidden on host modules. See the note on ExportedFunction interface") | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // Instantiate implements HostModuleBuilder.Instantiate | // Instantiate implements HostModuleBuilder.Instantiate | ||||||
| func (b *hostModuleBuilder) Instantiate(ctx context.Context) (api.Module, error) { | func (b *hostModuleBuilder) Instantiate(ctx context.Context) (api.Module, error) { | ||||||
| 	if compiled, err := b.Compile(ctx); err != nil { | 	if compiled, err := b.Compile(ctx); err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} else { | 	} else { | ||||||
| 		compiled.(*compiledModule).closeWithModule = true | 		compiled.(*compiledModule).closeWithModule = true | ||||||
| 		return b.r.InstantiateModule(ctx, compiled, NewModuleConfig()) | 		m, err := b.r.InstantiateModule(ctx, compiled, NewModuleConfig()) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		return hostModuleInstance{m}, nil | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										7
									
								
								vendor/github.com/tetratelabs/wazero/cache.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/tetratelabs/wazero/cache.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -24,6 +24,13 @@ import ( | ||||||
| //     All implementations are in wazero. | //     All implementations are in wazero. | ||||||
| //   - Instances of this can be reused across multiple runtimes, if configured | //   - Instances of this can be reused across multiple runtimes, if configured | ||||||
| //     via RuntimeConfig. | //     via RuntimeConfig. | ||||||
|  | //   - The cache check happens before the compilation, so if multiple Goroutines are | ||||||
|  | //     trying to compile the same module simultaneously, it is possible that they | ||||||
|  | //     all compile the module. The design here is that the lock isn't held for the action "Compile" | ||||||
|  | //     but only for checking and saving the compiled result. Therefore, we strongly recommend that the embedder | ||||||
|  | //     does the centralized compilation in a single Goroutines (or multiple Goroutines per Wasm binary) to generate cache rather than | ||||||
|  | //     trying to Compile in parallel for a single module. In other words, we always recommend to produce CompiledModule | ||||||
|  | //     share it across multiple Goroutines to avoid trying to compile the same module simultaneously. | ||||||
| type CompilationCache interface{ api.Closer } | type CompilationCache interface{ api.Closer } | ||||||
| 
 | 
 | ||||||
| // NewCompilationCache returns a new CompilationCache to be passed to RuntimeConfig. | // NewCompilationCache returns a new CompilationCache to be passed to RuntimeConfig. | ||||||
|  |  | ||||||
							
								
								
									
										15
									
								
								vendor/github.com/tetratelabs/wazero/config.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/tetratelabs/wazero/config.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -495,7 +495,20 @@ type ModuleConfig interface { | ||||||
| 	WithFSConfig(FSConfig) ModuleConfig | 	WithFSConfig(FSConfig) ModuleConfig | ||||||
| 
 | 
 | ||||||
| 	// WithName configures the module name. Defaults to what was decoded from | 	// WithName configures the module name. Defaults to what was decoded from | ||||||
| 	// the name section. Empty string ("") clears any name. | 	// the name section. Duplicate names are not allowed in a single Runtime. | ||||||
|  | 	// | ||||||
|  | 	// Calling this with the empty string "" makes the module anonymous. | ||||||
|  | 	// That is useful when you want to instantiate the same CompiledModule multiple times like below: | ||||||
|  | 	// | ||||||
|  | 	// 	for i := 0; i < N; i++ { | ||||||
|  | 	//		// Instantiate a new Wasm module from the already compiled `compiledWasm` anonymously without a name. | ||||||
|  | 	//		instance, err := r.InstantiateModule(ctx, compiledWasm, wazero.NewModuleConfig().WithName("")) | ||||||
|  | 	//		// .... | ||||||
|  | 	//	} | ||||||
|  | 	// | ||||||
|  | 	// See the `concurrent-instantiation` example for a complete usage. | ||||||
|  | 	// | ||||||
|  | 	// Non-empty named modules are available for other modules to import by name. | ||||||
| 	WithName(string) ModuleConfig | 	WithName(string) ModuleConfig | ||||||
| 
 | 
 | ||||||
| 	// WithStartFunctions configures the functions to call after the module is | 	// WithStartFunctions configures the functions to call after the module is | ||||||
|  |  | ||||||
							
								
								
									
										19
									
								
								vendor/github.com/tetratelabs/wazero/experimental/importresolver.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/tetratelabs/wazero/experimental/importresolver.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,19 @@ | ||||||
|  | package experimental | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 
 | ||||||
|  | 	"github.com/tetratelabs/wazero/api" | ||||||
|  | 	"github.com/tetratelabs/wazero/internal/expctxkeys" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ImportResolver is an experimental func type that, if set, | ||||||
|  | // will be used as the first step in resolving imports. | ||||||
|  | // See issue 2294. | ||||||
|  | // If the import name is not found, it should return nil. | ||||||
|  | type ImportResolver func(name string) api.Module | ||||||
|  | 
 | ||||||
|  | // WithImportResolver returns a new context with the given ImportResolver. | ||||||
|  | func WithImportResolver(ctx context.Context, resolver ImportResolver) context.Context { | ||||||
|  | 	return context.WithValue(ctx, expctxkeys.ImportResolverKey{}, resolver) | ||||||
|  | } | ||||||
							
								
								
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1962,17 +1962,11 @@ func pathSymlinkFn(_ context.Context, mod api.Module, params []uint64) experimen | ||||||
| 	return dir.FS.Symlink( | 	return dir.FS.Symlink( | ||||||
| 		// Do not join old path since it's only resolved when dereference the link created here. | 		// Do not join old path since it's only resolved when dereference the link created here. | ||||||
| 		// And the dereference result depends on the opening directory's file descriptor at that point. | 		// And the dereference result depends on the opening directory's file descriptor at that point. | ||||||
| 		bufToStr(oldPathBuf), | 		unsafe.String(&oldPathBuf[0], int(oldPathLen)), | ||||||
| 		newPathName, | 		newPathName, | ||||||
| 	) | 	) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // bufToStr converts the given byte slice as string unsafely. |  | ||||||
| func bufToStr(buf []byte) string { |  | ||||||
| 	// TODO: use unsafe.String after flooring Go 1.20. |  | ||||||
| 	return *(*string)(unsafe.Pointer(&buf)) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // pathUnlinkFile is the WASI function named PathUnlinkFileName which unlinks a | // pathUnlinkFile is the WASI function named PathUnlinkFileName which unlinks a | ||||||
| // file. | // file. | ||||||
| // | // | ||||||
|  |  | ||||||
							
								
								
									
										4
									
								
								vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/poll.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/poll.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -68,9 +68,7 @@ func pollOneoffFn(_ context.Context, mod api.Module, params []uint64) sys.Errno | ||||||
| 	} | 	} | ||||||
| 	outBuf, ok := mem.Read(out, nsubscriptions*32) | 	outBuf, ok := mem.Read(out, nsubscriptions*32) | ||||||
| 	// zero-out all buffer before writing | 	// zero-out all buffer before writing | ||||||
| 	for i := range outBuf { | 	clear(outBuf) | ||||||
| 		outBuf[i] = 0 |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	if !ok { | 	if !ok { | ||||||
| 		return sys.EFAULT | 		return sys.EFAULT | ||||||
|  |  | ||||||
							
								
								
									
										9
									
								
								vendor/github.com/tetratelabs/wazero/internal/descriptor/table.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								vendor/github.com/tetratelabs/wazero/internal/descriptor/table.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -154,11 +154,6 @@ func (t *Table[Key, Item]) Range(f func(Key, Item) bool) { | ||||||
| 
 | 
 | ||||||
| // Reset clears the content of the table. | // Reset clears the content of the table. | ||||||
| func (t *Table[Key, Item]) Reset() { | func (t *Table[Key, Item]) Reset() { | ||||||
| 	for i := range t.masks { | 	clear(t.masks) | ||||||
| 		t.masks[i] = 0 | 	clear(t.items) | ||||||
| 	} |  | ||||||
| 	var zero Item |  | ||||||
| 	for i := range t.items { |  | ||||||
| 		t.items[i] = zero |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										48
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/compiler.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										48
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/compiler.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -26,9 +26,12 @@ const ( | ||||||
| type ( | type ( | ||||||
| 	controlFrame struct { | 	controlFrame struct { | ||||||
| 		frameID uint32 | 		frameID uint32 | ||||||
| 		// originalStackLen holds the number of values on the stack | 		// originalStackLenWithoutParam holds the number of values on the stack | ||||||
| 		// when Start executing this control frame minus params for the block. | 		// when Start executing this control frame minus params for the block. | ||||||
| 		originalStackLenWithoutParam int | 		originalStackLenWithoutParam int | ||||||
|  | 		// originalStackLenWithoutParamUint64 is almost the same as originalStackLenWithoutParam | ||||||
|  | 		// except that it holds the number of values on the stack in uint64. | ||||||
|  | 		originalStackLenWithoutParamUint64 int | ||||||
| 		blockType                          *wasm.FunctionType | 		blockType                          *wasm.FunctionType | ||||||
| 		kind                               controlFrameKind | 		kind                               controlFrameKind | ||||||
| 	} | 	} | ||||||
|  | @ -157,6 +160,8 @@ type compiler struct { | ||||||
| 	enabledFeatures            api.CoreFeatures | 	enabledFeatures            api.CoreFeatures | ||||||
| 	callFrameStackSizeInUint64 int | 	callFrameStackSizeInUint64 int | ||||||
| 	stack                      []unsignedType | 	stack                      []unsignedType | ||||||
|  | 	// stackLenInUint64 is the length of the stack in uint64. | ||||||
|  | 	stackLenInUint64 int | ||||||
| 	currentFrameID   uint32 | 	currentFrameID   uint32 | ||||||
| 	controlFrames    controlFrames | 	controlFrames    controlFrames | ||||||
| 	unreachableState struct { | 	unreachableState struct { | ||||||
|  | @ -341,6 +346,7 @@ func (c *compiler) Next() (*compilationResult, error) { | ||||||
| 	c.pc = 0 | 	c.pc = 0 | ||||||
| 	c.currentOpPC = 0 | 	c.currentOpPC = 0 | ||||||
| 	c.currentFrameID = 0 | 	c.currentFrameID = 0 | ||||||
|  | 	c.stackLenInUint64 = 0 | ||||||
| 	c.unreachableState.on, c.unreachableState.depth = false, 0 | 	c.unreachableState.on, c.unreachableState.depth = false, 0 | ||||||
| 
 | 
 | ||||||
| 	if err := c.compile(sig, code.Body, code.LocalTypes, code.BodyOffsetInCodeSection); err != nil { | 	if err := c.compile(sig, code.Body, code.LocalTypes, code.BodyOffsetInCodeSection); err != nil { | ||||||
|  | @ -451,6 +457,7 @@ operatorSwitch: | ||||||
| 		frame := controlFrame{ | 		frame := controlFrame{ | ||||||
| 			frameID:                            c.nextFrameID(), | 			frameID:                            c.nextFrameID(), | ||||||
| 			originalStackLenWithoutParam:       len(c.stack) - len(bt.Params), | 			originalStackLenWithoutParam:       len(c.stack) - len(bt.Params), | ||||||
|  | 			originalStackLenWithoutParamUint64: c.stackLenInUint64 - bt.ParamNumInUint64, | ||||||
| 			kind:                               controlFrameKindBlockWithoutContinuationLabel, | 			kind:                               controlFrameKindBlockWithoutContinuationLabel, | ||||||
| 			blockType:                          bt, | 			blockType:                          bt, | ||||||
| 		} | 		} | ||||||
|  | @ -475,6 +482,7 @@ operatorSwitch: | ||||||
| 		frame := controlFrame{ | 		frame := controlFrame{ | ||||||
| 			frameID:                            c.nextFrameID(), | 			frameID:                            c.nextFrameID(), | ||||||
| 			originalStackLenWithoutParam:       len(c.stack) - len(bt.Params), | 			originalStackLenWithoutParam:       len(c.stack) - len(bt.Params), | ||||||
|  | 			originalStackLenWithoutParamUint64: c.stackLenInUint64 - bt.ParamNumInUint64, | ||||||
| 			kind:                               controlFrameKindLoop, | 			kind:                               controlFrameKindLoop, | ||||||
| 			blockType:                          bt, | 			blockType:                          bt, | ||||||
| 		} | 		} | ||||||
|  | @ -517,6 +525,7 @@ operatorSwitch: | ||||||
| 		frame := controlFrame{ | 		frame := controlFrame{ | ||||||
| 			frameID:                            c.nextFrameID(), | 			frameID:                            c.nextFrameID(), | ||||||
| 			originalStackLenWithoutParam:       len(c.stack) - len(bt.Params), | 			originalStackLenWithoutParam:       len(c.stack) - len(bt.Params), | ||||||
|  | 			originalStackLenWithoutParamUint64: c.stackLenInUint64 - bt.ParamNumInUint64, | ||||||
| 			// Note this will be set to controlFrameKindIfWithElse | 			// Note this will be set to controlFrameKindIfWithElse | ||||||
| 			// when else opcode found later. | 			// when else opcode found later. | ||||||
| 			kind:      controlFrameKindIfWithoutElse, | 			kind:      controlFrameKindIfWithoutElse, | ||||||
|  | @ -543,7 +552,7 @@ operatorSwitch: | ||||||
| 			// If it is currently in unreachable, and the non-nested if, | 			// If it is currently in unreachable, and the non-nested if, | ||||||
| 			// reset the stack so we can correctly handle the else block. | 			// reset the stack so we can correctly handle the else block. | ||||||
| 			top := c.controlFrames.top() | 			top := c.controlFrames.top() | ||||||
| 			c.stack = c.stack[:top.originalStackLenWithoutParam] | 			c.stackSwitchAt(top) | ||||||
| 			top.kind = controlFrameKindIfWithElse | 			top.kind = controlFrameKindIfWithElse | ||||||
| 
 | 
 | ||||||
| 			// Re-push the parameters to the if block so that else block can use them. | 			// Re-push the parameters to the if block so that else block can use them. | ||||||
|  | @ -572,7 +581,7 @@ operatorSwitch: | ||||||
| 
 | 
 | ||||||
| 		// Reset the stack manipulated by the then block, and re-push the block param types to the stack. | 		// Reset the stack manipulated by the then block, and re-push the block param types to the stack. | ||||||
| 
 | 
 | ||||||
| 		c.stack = c.stack[:frame.originalStackLenWithoutParam] | 		c.stackSwitchAt(frame) | ||||||
| 		for _, t := range frame.blockType.Params { | 		for _, t := range frame.blockType.Params { | ||||||
| 			c.stackPush(wasmValueTypeTounsignedType(t)) | 			c.stackPush(wasmValueTypeTounsignedType(t)) | ||||||
| 		} | 		} | ||||||
|  | @ -601,7 +610,7 @@ operatorSwitch: | ||||||
| 				return nil | 				return nil | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			c.stack = c.stack[:frame.originalStackLenWithoutParam] | 			c.stackSwitchAt(frame) | ||||||
| 			for _, t := range frame.blockType.Results { | 			for _, t := range frame.blockType.Results { | ||||||
| 				c.stackPush(wasmValueTypeTounsignedType(t)) | 				c.stackPush(wasmValueTypeTounsignedType(t)) | ||||||
| 			} | 			} | ||||||
|  | @ -628,7 +637,7 @@ operatorSwitch: | ||||||
| 		// We need to reset the stack so that | 		// We need to reset the stack so that | ||||||
| 		// the values pushed inside the block. | 		// the values pushed inside the block. | ||||||
| 		dropOp := newOperationDrop(c.getFrameDropRange(frame, true)) | 		dropOp := newOperationDrop(c.getFrameDropRange(frame, true)) | ||||||
| 		c.stack = c.stack[:frame.originalStackLenWithoutParam] | 		c.stackSwitchAt(frame) | ||||||
| 
 | 
 | ||||||
| 		// Push the result types onto the stack. | 		// Push the result types onto the stack. | ||||||
| 		for _, t := range frame.blockType.Results { | 		for _, t := range frame.blockType.Results { | ||||||
|  | @ -3505,6 +3514,11 @@ func (c *compiler) stackPeek() (ret unsignedType) { | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func (c *compiler) stackSwitchAt(frame *controlFrame) { | ||||||
|  | 	c.stack = c.stack[:frame.originalStackLenWithoutParam] | ||||||
|  | 	c.stackLenInUint64 = frame.originalStackLenWithoutParamUint64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func (c *compiler) stackPop() (ret unsignedType) { | func (c *compiler) stackPop() (ret unsignedType) { | ||||||
| 	// No need to check stack bound | 	// No need to check stack bound | ||||||
| 	// as we can assume that all the operations | 	// as we can assume that all the operations | ||||||
|  | @ -3512,11 +3526,13 @@ func (c *compiler) stackPop() (ret unsignedType) { | ||||||
| 	// at module validation phase. | 	// at module validation phase. | ||||||
| 	ret = c.stack[len(c.stack)-1] | 	ret = c.stack[len(c.stack)-1] | ||||||
| 	c.stack = c.stack[:len(c.stack)-1] | 	c.stack = c.stack[:len(c.stack)-1] | ||||||
|  | 	c.stackLenInUint64 -= 1 + int(unsignedTypeV128&ret>>2) | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *compiler) stackPush(ts unsignedType) { | func (c *compiler) stackPush(ts unsignedType) { | ||||||
| 	c.stack = append(c.stack, ts) | 	c.stack = append(c.stack, ts) | ||||||
|  | 	c.stackLenInUint64 += 1 + int(unsignedTypeV128&ts>>2) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // emit adds the operations into the result. | // emit adds the operations into the result. | ||||||
|  | @ -3565,7 +3581,7 @@ func (c *compiler) emitDefaultValue(t wasm.ValueType) { | ||||||
| // of the n-th local. | // of the n-th local. | ||||||
| func (c *compiler) localDepth(index wasm.Index) int { | func (c *compiler) localDepth(index wasm.Index) int { | ||||||
| 	height := c.localIndexToStackHeightInUint64[index] | 	height := c.localIndexToStackHeightInUint64[index] | ||||||
| 	return c.stackLenInUint64(len(c.stack)) - 1 - int(height) | 	return c.stackLenInUint64 - 1 - height | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *compiler) localType(index wasm.Index) (t wasm.ValueType) { | func (c *compiler) localType(index wasm.Index) (t wasm.ValueType) { | ||||||
|  | @ -3592,14 +3608,7 @@ func (c *compiler) getFrameDropRange(frame *controlFrame, isEnd bool) inclusiveR | ||||||
| 	} else { | 	} else { | ||||||
| 		start = frame.blockType.ResultNumInUint64 | 		start = frame.blockType.ResultNumInUint64 | ||||||
| 	} | 	} | ||||||
| 	var end int | 	end := c.stackLenInUint64 - 1 - frame.originalStackLenWithoutParamUint64 | ||||||
| 	if frame.kind == controlFrameKindFunction { |  | ||||||
| 		// On the function return, we eliminate all the contents on the stack |  | ||||||
| 		// including locals (existing below of frame.originalStackLen) |  | ||||||
| 		end = c.stackLenInUint64(len(c.stack)) - 1 |  | ||||||
| 	} else { |  | ||||||
| 		end = c.stackLenInUint64(len(c.stack)) - 1 - c.stackLenInUint64(frame.originalStackLenWithoutParam) |  | ||||||
| 	} |  | ||||||
| 	if start <= end { | 	if start <= end { | ||||||
| 		return inclusiveRange{Start: int32(start), End: int32(end)} | 		return inclusiveRange{Start: int32(start), End: int32(end)} | ||||||
| 	} else { | 	} else { | ||||||
|  | @ -3607,17 +3616,6 @@ func (c *compiler) getFrameDropRange(frame *controlFrame, isEnd bool) inclusiveR | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *compiler) stackLenInUint64(ceil int) (ret int) { |  | ||||||
| 	for i := 0; i < ceil; i++ { |  | ||||||
| 		if c.stack[i] == unsignedTypeV128 { |  | ||||||
| 			ret += 2 |  | ||||||
| 		} else { |  | ||||||
| 			ret++ |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 	return |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *compiler) readMemoryArg(tag string) (memoryArg, error) { | func (c *compiler) readMemoryArg(tag string) (memoryArg, error) { | ||||||
| 	c.result.UsesMemory = true | 	c.result.UsesMemory = true | ||||||
| 	alignment, num, err := leb128.LoadUint32(c.body[c.pc+1:]) | 	alignment, num, err := leb128.LoadUint32(c.body[c.pc+1:]) | ||||||
|  |  | ||||||
							
								
								
									
										26
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/interpreter.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/interpreter.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -3901,14 +3901,9 @@ func (ce *callEngine) callNativeFunc(ctx context.Context, m *wasm.ModuleInstance | ||||||
| 		case operationKindV128Dot: | 		case operationKindV128Dot: | ||||||
| 			x2Hi, x2Lo := ce.popValue(), ce.popValue() | 			x2Hi, x2Lo := ce.popValue(), ce.popValue() | ||||||
| 			x1Hi, x1Lo := ce.popValue(), ce.popValue() | 			x1Hi, x1Lo := ce.popValue(), ce.popValue() | ||||||
| 			ce.pushValue( | 			lo, hi := v128Dot(x1Hi, x1Lo, x2Hi, x2Lo) | ||||||
| 				uint64(uint32(int32(int16(x1Lo>>0))*int32(int16(x2Lo>>0))+int32(int16(x1Lo>>16))*int32(int16(x2Lo>>16)))) | | 			ce.pushValue(lo) | ||||||
| 					(uint64(uint32(int32(int16(x1Lo>>32))*int32(int16(x2Lo>>32))+int32(int16(x1Lo>>48))*int32(int16(x2Lo>>48)))) << 32), | 			ce.pushValue(hi) | ||||||
| 			) |  | ||||||
| 			ce.pushValue( |  | ||||||
| 				uint64(uint32(int32(int16(x1Hi>>0))*int32(int16(x2Hi>>0))+int32(int16(x1Hi>>16))*int32(int16(x2Hi>>16)))) | |  | ||||||
| 					(uint64(uint32(int32(int16(x1Hi>>32))*int32(int16(x2Hi>>32))+int32(int16(x1Hi>>48))*int32(int16(x2Hi>>48)))) << 32), |  | ||||||
| 			) |  | ||||||
| 			frame.pc++ | 			frame.pc++ | ||||||
| 		case operationKindV128ITruncSatFromF: | 		case operationKindV128ITruncSatFromF: | ||||||
| 			hi, lo := ce.popValue(), ce.popValue() | 			hi, lo := ce.popValue(), ce.popValue() | ||||||
|  | @ -4584,3 +4579,18 @@ func (ce *callEngine) callGoFuncWithStack(ctx context.Context, m *wasm.ModuleIns | ||||||
| 		ce.stack = ce.stack[0 : len(ce.stack)-shrinkLen] | 		ce.stack = ce.stack[0 : len(ce.stack)-shrinkLen] | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // v128Dot performs a dot product of two 64-bit vectors. | ||||||
|  | // Note: for some reason (which I suspect is due to a bug in Go compiler's regalloc), | ||||||
|  | // inlining this function causes a bug which happens **only when** we run with -race AND arm64 AND Go 1.22. | ||||||
|  | func v128Dot(x1Hi, x1Lo, x2Hi, x2Lo uint64) (uint64, uint64) { | ||||||
|  | 	r1 := int32(int16(x1Lo>>0)) * int32(int16(x2Lo>>0)) | ||||||
|  | 	r2 := int32(int16(x1Lo>>16)) * int32(int16(x2Lo>>16)) | ||||||
|  | 	r3 := int32(int16(x1Lo>>32)) * int32(int16(x2Lo>>32)) | ||||||
|  | 	r4 := int32(int16(x1Lo>>48)) * int32(int16(x2Lo>>48)) | ||||||
|  | 	r5 := int32(int16(x1Hi>>0)) * int32(int16(x2Hi>>0)) | ||||||
|  | 	r6 := int32(int16(x1Hi>>16)) * int32(int16(x2Hi>>16)) | ||||||
|  | 	r7 := int32(int16(x1Hi>>32)) * int32(int16(x2Hi>>32)) | ||||||
|  | 	r8 := int32(int16(x1Hi>>48)) * int32(int16(x2Hi>>48)) | ||||||
|  | 	return uint64(uint32(r1+r2)) | (uint64(uint32(r3+r4)) << 32), uint64(uint32(r5+r6)) | (uint64(uint32(r7+r8)) << 32) | ||||||
|  | } | ||||||
|  |  | ||||||
							
								
								
									
										48
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/compiler.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										48
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/compiler.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -69,7 +69,7 @@ type Compiler interface { | ||||||
| 	AllocateVReg(typ ssa.Type) regalloc.VReg | 	AllocateVReg(typ ssa.Type) regalloc.VReg | ||||||
| 
 | 
 | ||||||
| 	// ValueDefinition returns the definition of the given value. | 	// ValueDefinition returns the definition of the given value. | ||||||
| 	ValueDefinition(ssa.Value) *SSAValueDefinition | 	ValueDefinition(ssa.Value) SSAValueDefinition | ||||||
| 
 | 
 | ||||||
| 	// VRegOf returns the virtual register of the given ssa.Value. | 	// VRegOf returns the virtual register of the given ssa.Value. | ||||||
| 	VRegOf(value ssa.Value) regalloc.VReg | 	VRegOf(value ssa.Value) regalloc.VReg | ||||||
|  | @ -79,13 +79,13 @@ type Compiler interface { | ||||||
| 
 | 
 | ||||||
| 	// MatchInstr returns true if the given definition is from an instruction with the given opcode, the current group ID, | 	// MatchInstr returns true if the given definition is from an instruction with the given opcode, the current group ID, | ||||||
| 	// and a refcount of 1. That means, the instruction can be merged/swapped within the current instruction group. | 	// and a refcount of 1. That means, the instruction can be merged/swapped within the current instruction group. | ||||||
| 	MatchInstr(def *SSAValueDefinition, opcode ssa.Opcode) bool | 	MatchInstr(def SSAValueDefinition, opcode ssa.Opcode) bool | ||||||
| 
 | 
 | ||||||
| 	// MatchInstrOneOf is the same as MatchInstr but for multiple opcodes. If it matches one of ssa.Opcode, | 	// MatchInstrOneOf is the same as MatchInstr but for multiple opcodes. If it matches one of ssa.Opcode, | ||||||
| 	// this returns the opcode. Otherwise, this returns ssa.OpcodeInvalid. | 	// this returns the opcode. Otherwise, this returns ssa.OpcodeInvalid. | ||||||
| 	// | 	// | ||||||
| 	// Note: caller should be careful to avoid excessive allocation on opcodes slice. | 	// Note: caller should be careful to avoid excessive allocation on opcodes slice. | ||||||
| 	MatchInstrOneOf(def *SSAValueDefinition, opcodes []ssa.Opcode) ssa.Opcode | 	MatchInstrOneOf(def SSAValueDefinition, opcodes []ssa.Opcode) ssa.Opcode | ||||||
| 
 | 
 | ||||||
| 	// AddRelocationInfo appends the relocation information for the function reference at the current buffer offset. | 	// AddRelocationInfo appends the relocation information for the function reference at the current buffer offset. | ||||||
| 	AddRelocationInfo(funcRef ssa.FuncRef) | 	AddRelocationInfo(funcRef ssa.FuncRef) | ||||||
|  | @ -126,10 +126,7 @@ type compiler struct { | ||||||
| 	nextVRegID regalloc.VRegID | 	nextVRegID regalloc.VRegID | ||||||
| 	// ssaValueToVRegs maps ssa.ValueID to regalloc.VReg. | 	// ssaValueToVRegs maps ssa.ValueID to regalloc.VReg. | ||||||
| 	ssaValueToVRegs [] /* VRegID to */ regalloc.VReg | 	ssaValueToVRegs [] /* VRegID to */ regalloc.VReg | ||||||
| 	// ssaValueDefinitions maps ssa.ValueID to its definition. | 	ssaValuesInfo   []ssa.ValueInfo | ||||||
| 	ssaValueDefinitions []SSAValueDefinition |  | ||||||
| 	// ssaValueRefCounts is a cached list obtained by ssa.Builder.ValueRefCounts(). |  | ||||||
| 	ssaValueRefCounts []int |  | ||||||
| 	// returnVRegs is the list of virtual registers that store the return values. | 	// returnVRegs is the list of virtual registers that store the return values. | ||||||
| 	returnVRegs  []regalloc.VReg | 	returnVRegs  []regalloc.VReg | ||||||
| 	varEdges     [][2]regalloc.VReg | 	varEdges     [][2]regalloc.VReg | ||||||
|  | @ -206,15 +203,10 @@ func (c *compiler) setCurrentGroupID(gid ssa.InstructionGroupID) { | ||||||
| // assignVirtualRegisters assigns a virtual register to each ssa.ValueID Valid in the ssa.Builder. | // assignVirtualRegisters assigns a virtual register to each ssa.ValueID Valid in the ssa.Builder. | ||||||
| func (c *compiler) assignVirtualRegisters() { | func (c *compiler) assignVirtualRegisters() { | ||||||
| 	builder := c.ssaBuilder | 	builder := c.ssaBuilder | ||||||
| 	refCounts := builder.ValueRefCounts() | 	c.ssaValuesInfo = builder.ValuesInfo() | ||||||
| 	c.ssaValueRefCounts = refCounts |  | ||||||
| 
 | 
 | ||||||
| 	need := len(refCounts) | 	if diff := len(c.ssaValuesInfo) - len(c.ssaValueToVRegs); diff > 0 { | ||||||
| 	if need >= len(c.ssaValueToVRegs) { | 		c.ssaValueToVRegs = append(c.ssaValueToVRegs, make([]regalloc.VReg, diff+1)...) | ||||||
| 		c.ssaValueToVRegs = append(c.ssaValueToVRegs, make([]regalloc.VReg, need+1)...) |  | ||||||
| 	} |  | ||||||
| 	if need >= len(c.ssaValueDefinitions) { |  | ||||||
| 		c.ssaValueDefinitions = append(c.ssaValueDefinitions, make([]SSAValueDefinition, need+1)...) |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for blk := builder.BlockIteratorReversePostOrderBegin(); blk != nil; blk = builder.BlockIteratorReversePostOrderNext() { | 	for blk := builder.BlockIteratorReversePostOrderBegin(); blk != nil; blk = builder.BlockIteratorReversePostOrderNext() { | ||||||
|  | @ -225,40 +217,26 @@ func (c *compiler) assignVirtualRegisters() { | ||||||
| 			typ := p.Type() | 			typ := p.Type() | ||||||
| 			vreg := c.AllocateVReg(typ) | 			vreg := c.AllocateVReg(typ) | ||||||
| 			c.ssaValueToVRegs[pid] = vreg | 			c.ssaValueToVRegs[pid] = vreg | ||||||
| 			c.ssaValueDefinitions[pid] = SSAValueDefinition{BlockParamValue: p, BlkParamVReg: vreg} |  | ||||||
| 			c.ssaTypeOfVRegID[vreg.ID()] = p.Type() | 			c.ssaTypeOfVRegID[vreg.ID()] = p.Type() | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// Assigns each value to a virtual register produced by instructions. | 		// Assigns each value to a virtual register produced by instructions. | ||||||
| 		for cur := blk.Root(); cur != nil; cur = cur.Next() { | 		for cur := blk.Root(); cur != nil; cur = cur.Next() { | ||||||
| 			r, rs := cur.Returns() | 			r, rs := cur.Returns() | ||||||
| 			var N int |  | ||||||
| 			if r.Valid() { | 			if r.Valid() { | ||||||
| 				id := r.ID() | 				id := r.ID() | ||||||
| 				ssaTyp := r.Type() | 				ssaTyp := r.Type() | ||||||
| 				typ := r.Type() | 				typ := r.Type() | ||||||
| 				vReg := c.AllocateVReg(typ) | 				vReg := c.AllocateVReg(typ) | ||||||
| 				c.ssaValueToVRegs[id] = vReg | 				c.ssaValueToVRegs[id] = vReg | ||||||
| 				c.ssaValueDefinitions[id] = SSAValueDefinition{ |  | ||||||
| 					Instr:    cur, |  | ||||||
| 					N:        0, |  | ||||||
| 					RefCount: refCounts[id], |  | ||||||
| 				} |  | ||||||
| 				c.ssaTypeOfVRegID[vReg.ID()] = ssaTyp | 				c.ssaTypeOfVRegID[vReg.ID()] = ssaTyp | ||||||
| 				N++ |  | ||||||
| 			} | 			} | ||||||
| 			for _, r := range rs { | 			for _, r := range rs { | ||||||
| 				id := r.ID() | 				id := r.ID() | ||||||
| 				ssaTyp := r.Type() | 				ssaTyp := r.Type() | ||||||
| 				vReg := c.AllocateVReg(ssaTyp) | 				vReg := c.AllocateVReg(ssaTyp) | ||||||
| 				c.ssaValueToVRegs[id] = vReg | 				c.ssaValueToVRegs[id] = vReg | ||||||
| 				c.ssaValueDefinitions[id] = SSAValueDefinition{ |  | ||||||
| 					Instr:    cur, |  | ||||||
| 					N:        N, |  | ||||||
| 					RefCount: refCounts[id], |  | ||||||
| 				} |  | ||||||
| 				c.ssaTypeOfVRegID[vReg.ID()] = ssaTyp | 				c.ssaTypeOfVRegID[vReg.ID()] = ssaTyp | ||||||
| 				N++ |  | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -299,8 +277,12 @@ func (c *compiler) Init() { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // ValueDefinition implements Compiler.ValueDefinition. | // ValueDefinition implements Compiler.ValueDefinition. | ||||||
| func (c *compiler) ValueDefinition(value ssa.Value) *SSAValueDefinition { | func (c *compiler) ValueDefinition(value ssa.Value) SSAValueDefinition { | ||||||
| 	return &c.ssaValueDefinitions[value.ID()] | 	return SSAValueDefinition{ | ||||||
|  | 		V:        value, | ||||||
|  | 		Instr:    c.ssaBuilder.InstructionOfValue(value), | ||||||
|  | 		RefCount: c.ssaValuesInfo[value.ID()].RefCount, | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // VRegOf implements Compiler.VRegOf. | // VRegOf implements Compiler.VRegOf. | ||||||
|  | @ -319,7 +301,7 @@ func (c *compiler) TypeOf(v regalloc.VReg) ssa.Type { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // MatchInstr implements Compiler.MatchInstr. | // MatchInstr implements Compiler.MatchInstr. | ||||||
| func (c *compiler) MatchInstr(def *SSAValueDefinition, opcode ssa.Opcode) bool { | func (c *compiler) MatchInstr(def SSAValueDefinition, opcode ssa.Opcode) bool { | ||||||
| 	instr := def.Instr | 	instr := def.Instr | ||||||
| 	return def.IsFromInstr() && | 	return def.IsFromInstr() && | ||||||
| 		instr.Opcode() == opcode && | 		instr.Opcode() == opcode && | ||||||
|  | @ -328,7 +310,7 @@ func (c *compiler) MatchInstr(def *SSAValueDefinition, opcode ssa.Opcode) bool { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // MatchInstrOneOf implements Compiler.MatchInstrOneOf. | // MatchInstrOneOf implements Compiler.MatchInstrOneOf. | ||||||
| func (c *compiler) MatchInstrOneOf(def *SSAValueDefinition, opcodes []ssa.Opcode) ssa.Opcode { | func (c *compiler) MatchInstrOneOf(def SSAValueDefinition, opcodes []ssa.Opcode) ssa.Opcode { | ||||||
| 	instr := def.Instr | 	instr := def.Instr | ||||||
| 	if !def.IsFromInstr() { | 	if !def.IsFromInstr() { | ||||||
| 		return ssa.OpcodeInvalid | 		return ssa.OpcodeInvalid | ||||||
|  |  | ||||||
							
								
								
									
										38
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/compiler_lower.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										38
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/compiler_lower.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -9,7 +9,7 @@ import ( | ||||||
| func (c *compiler) Lower() { | func (c *compiler) Lower() { | ||||||
| 	c.assignVirtualRegisters() | 	c.assignVirtualRegisters() | ||||||
| 	c.mach.SetCurrentABI(c.GetFunctionABI(c.ssaBuilder.Signature())) | 	c.mach.SetCurrentABI(c.GetFunctionABI(c.ssaBuilder.Signature())) | ||||||
| 	c.mach.ExecutableContext().StartLoweringFunction(c.ssaBuilder.BlockIDMax()) | 	c.mach.StartLoweringFunction(c.ssaBuilder.BlockIDMax()) | ||||||
| 	c.lowerBlocks() | 	c.lowerBlocks() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -20,12 +20,11 @@ func (c *compiler) lowerBlocks() { | ||||||
| 		c.lowerBlock(blk) | 		c.lowerBlock(blk) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ectx := c.mach.ExecutableContext() |  | ||||||
| 	// After lowering all blocks, we need to link adjacent blocks to layout one single instruction list. | 	// After lowering all blocks, we need to link adjacent blocks to layout one single instruction list. | ||||||
| 	var prev ssa.BasicBlock | 	var prev ssa.BasicBlock | ||||||
| 	for next := builder.BlockIteratorReversePostOrderBegin(); next != nil; next = builder.BlockIteratorReversePostOrderNext() { | 	for next := builder.BlockIteratorReversePostOrderBegin(); next != nil; next = builder.BlockIteratorReversePostOrderNext() { | ||||||
| 		if prev != nil { | 		if prev != nil { | ||||||
| 			ectx.LinkAdjacentBlocks(prev, next) | 			c.mach.LinkAdjacentBlocks(prev, next) | ||||||
| 		} | 		} | ||||||
| 		prev = next | 		prev = next | ||||||
| 	} | 	} | ||||||
|  | @ -33,8 +32,7 @@ func (c *compiler) lowerBlocks() { | ||||||
| 
 | 
 | ||||||
| func (c *compiler) lowerBlock(blk ssa.BasicBlock) { | func (c *compiler) lowerBlock(blk ssa.BasicBlock) { | ||||||
| 	mach := c.mach | 	mach := c.mach | ||||||
| 	ectx := mach.ExecutableContext() | 	mach.StartBlock(blk) | ||||||
| 	ectx.StartBlock(blk) |  | ||||||
| 
 | 
 | ||||||
| 	// We traverse the instructions in reverse order because we might want to lower multiple | 	// We traverse the instructions in reverse order because we might want to lower multiple | ||||||
| 	// instructions together. | 	// instructions together. | ||||||
|  | @ -76,7 +74,7 @@ func (c *compiler) lowerBlock(blk ssa.BasicBlock) { | ||||||
| 		default: | 		default: | ||||||
| 			mach.LowerInstr(cur) | 			mach.LowerInstr(cur) | ||||||
| 		} | 		} | ||||||
| 		ectx.FlushPendingInstructions() | 		mach.FlushPendingInstructions() | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Finally, if this is the entry block, we have to insert copies of arguments from the real location to the VReg. | 	// Finally, if this is the entry block, we have to insert copies of arguments from the real location to the VReg. | ||||||
|  | @ -84,7 +82,7 @@ func (c *compiler) lowerBlock(blk ssa.BasicBlock) { | ||||||
| 		c.lowerFunctionArguments(blk) | 		c.lowerFunctionArguments(blk) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ectx.EndBlock() | 	mach.EndBlock() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // lowerBranches is called right after StartBlock and before any LowerInstr call if | // lowerBranches is called right after StartBlock and before any LowerInstr call if | ||||||
|  | @ -93,23 +91,24 @@ func (c *compiler) lowerBlock(blk ssa.BasicBlock) { | ||||||
| // | // | ||||||
| // See ssa.Instruction IsBranching, and the comment on ssa.BasicBlock. | // See ssa.Instruction IsBranching, and the comment on ssa.BasicBlock. | ||||||
| func (c *compiler) lowerBranches(br0, br1 *ssa.Instruction) { | func (c *compiler) lowerBranches(br0, br1 *ssa.Instruction) { | ||||||
| 	ectx := c.mach.ExecutableContext() | 	mach := c.mach | ||||||
| 
 | 
 | ||||||
| 	c.setCurrentGroupID(br0.GroupID()) | 	c.setCurrentGroupID(br0.GroupID()) | ||||||
| 	c.mach.LowerSingleBranch(br0) | 	c.mach.LowerSingleBranch(br0) | ||||||
| 	ectx.FlushPendingInstructions() | 	mach.FlushPendingInstructions() | ||||||
| 	if br1 != nil { | 	if br1 != nil { | ||||||
| 		c.setCurrentGroupID(br1.GroupID()) | 		c.setCurrentGroupID(br1.GroupID()) | ||||||
| 		c.mach.LowerConditionalBranch(br1) | 		c.mach.LowerConditionalBranch(br1) | ||||||
| 		ectx.FlushPendingInstructions() | 		mach.FlushPendingInstructions() | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if br0.Opcode() == ssa.OpcodeJump { | 	if br0.Opcode() == ssa.OpcodeJump { | ||||||
| 		_, args, target := br0.BranchData() | 		_, args, targetBlockID := br0.BranchData() | ||||||
| 		argExists := len(args) != 0 | 		argExists := len(args) != 0 | ||||||
| 		if argExists && br1 != nil { | 		if argExists && br1 != nil { | ||||||
| 			panic("BUG: critical edge split failed") | 			panic("BUG: critical edge split failed") | ||||||
| 		} | 		} | ||||||
|  | 		target := c.ssaBuilder.BasicBlock(targetBlockID) | ||||||
| 		if argExists && target.ReturnBlock() { | 		if argExists && target.ReturnBlock() { | ||||||
| 			if len(args) > 0 { | 			if len(args) > 0 { | ||||||
| 				c.mach.LowerReturns(args) | 				c.mach.LowerReturns(args) | ||||||
|  | @ -118,24 +117,25 @@ func (c *compiler) lowerBranches(br0, br1 *ssa.Instruction) { | ||||||
| 			c.lowerBlockArguments(args, target) | 			c.lowerBlockArguments(args, target) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	ectx.FlushPendingInstructions() | 	mach.FlushPendingInstructions() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *compiler) lowerFunctionArguments(entry ssa.BasicBlock) { | func (c *compiler) lowerFunctionArguments(entry ssa.BasicBlock) { | ||||||
| 	ectx := c.mach.ExecutableContext() | 	mach := c.mach | ||||||
| 
 | 
 | ||||||
| 	c.tmpVals = c.tmpVals[:0] | 	c.tmpVals = c.tmpVals[:0] | ||||||
|  | 	data := c.ssaBuilder.ValuesInfo() | ||||||
| 	for i := 0; i < entry.Params(); i++ { | 	for i := 0; i < entry.Params(); i++ { | ||||||
| 		p := entry.Param(i) | 		p := entry.Param(i) | ||||||
| 		if c.ssaValueRefCounts[p.ID()] > 0 { | 		if data[p.ID()].RefCount > 0 { | ||||||
| 			c.tmpVals = append(c.tmpVals, p) | 			c.tmpVals = append(c.tmpVals, p) | ||||||
| 		} else { | 		} else { | ||||||
| 			// If the argument is not used, we can just pass an invalid value. | 			// If the argument is not used, we can just pass an invalid value. | ||||||
| 			c.tmpVals = append(c.tmpVals, ssa.ValueInvalid) | 			c.tmpVals = append(c.tmpVals, ssa.ValueInvalid) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	c.mach.LowerParams(c.tmpVals) | 	mach.LowerParams(c.tmpVals) | ||||||
| 	ectx.FlushPendingInstructions() | 	mach.FlushPendingInstructions() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // lowerBlockArguments lowers how to pass arguments to the given successor block. | // lowerBlockArguments lowers how to pass arguments to the given successor block. | ||||||
|  | @ -152,12 +152,12 @@ func (c *compiler) lowerBlockArguments(args []ssa.Value, succ ssa.BasicBlock) { | ||||||
| 		src := args[i] | 		src := args[i] | ||||||
| 
 | 
 | ||||||
| 		dstReg := c.VRegOf(dst) | 		dstReg := c.VRegOf(dst) | ||||||
| 		srcDef := c.ssaValueDefinitions[src.ID()] | 		srcInstr := c.ssaBuilder.InstructionOfValue(src) | ||||||
| 		if srcDef.IsFromInstr() && srcDef.Instr.Constant() { | 		if srcInstr != nil && srcInstr.Constant() { | ||||||
| 			c.constEdges = append(c.constEdges, struct { | 			c.constEdges = append(c.constEdges, struct { | ||||||
| 				cInst *ssa.Instruction | 				cInst *ssa.Instruction | ||||||
| 				dst   regalloc.VReg | 				dst   regalloc.VReg | ||||||
| 			}{cInst: srcDef.Instr, dst: dstReg}) | 			}{cInst: srcInstr, dst: dstReg}) | ||||||
| 		} else { | 		} else { | ||||||
| 			srcReg := c.VRegOf(src) | 			srcReg := c.VRegOf(src) | ||||||
| 			// Even when the src=dst, insert the move so that we can keep such registers keep-alive. | 			// Even when the src=dst, insert the move so that we can keep such registers keep-alive. | ||||||
|  |  | ||||||
							
								
								
									
										221
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/executable_context.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										221
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/executable_context.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,221 +0,0 @@ | ||||||
| package backend |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"fmt" |  | ||||||
| 	"math" |  | ||||||
| 
 |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| type ExecutableContext interface { |  | ||||||
| 	// StartLoweringFunction is called when the lowering of the given function is started. |  | ||||||
| 	// maximumBlockID is the maximum value of ssa.BasicBlockID existing in the function. |  | ||||||
| 	StartLoweringFunction(maximumBlockID ssa.BasicBlockID) |  | ||||||
| 
 |  | ||||||
| 	// LinkAdjacentBlocks is called after finished lowering all blocks in order to create one single instruction list. |  | ||||||
| 	LinkAdjacentBlocks(prev, next ssa.BasicBlock) |  | ||||||
| 
 |  | ||||||
| 	// StartBlock is called when the compilation of the given block is started. |  | ||||||
| 	// The order of this being called is the reverse post order of the ssa.BasicBlock(s) as we iterate with |  | ||||||
| 	// ssa.Builder BlockIteratorReversePostOrderBegin and BlockIteratorReversePostOrderEnd. |  | ||||||
| 	StartBlock(ssa.BasicBlock) |  | ||||||
| 
 |  | ||||||
| 	// EndBlock is called when the compilation of the current block is finished. |  | ||||||
| 	EndBlock() |  | ||||||
| 
 |  | ||||||
| 	// FlushPendingInstructions flushes the pending instructions to the buffer. |  | ||||||
| 	// This will be called after the lowering of each SSA Instruction. |  | ||||||
| 	FlushPendingInstructions() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type ExecutableContextT[Instr any] struct { |  | ||||||
| 	CurrentSSABlk ssa.BasicBlock |  | ||||||
| 
 |  | ||||||
| 	// InstrPool is the InstructionPool of instructions. |  | ||||||
| 	InstructionPool wazevoapi.Pool[Instr] |  | ||||||
| 	asNop           func(*Instr) |  | ||||||
| 	setNext         func(*Instr, *Instr) |  | ||||||
| 	setPrev         func(*Instr, *Instr) |  | ||||||
| 
 |  | ||||||
| 	// RootInstr is the root instruction of the executable. |  | ||||||
| 	RootInstr         *Instr |  | ||||||
| 	labelPositionPool wazevoapi.Pool[LabelPosition[Instr]] |  | ||||||
| 	NextLabel         Label |  | ||||||
| 	// LabelPositions maps a label to the instructions of the region which the label represents. |  | ||||||
| 	LabelPositions     []*LabelPosition[Instr] |  | ||||||
| 	OrderedBlockLabels []*LabelPosition[Instr] |  | ||||||
| 
 |  | ||||||
| 	// PerBlockHead and PerBlockEnd are the head and tail of the instruction list per currently-compiled ssa.BasicBlock. |  | ||||||
| 	PerBlockHead, PerBlockEnd *Instr |  | ||||||
| 	// PendingInstructions are the instructions which are not yet emitted into the instruction list. |  | ||||||
| 	PendingInstructions []*Instr |  | ||||||
| 
 |  | ||||||
| 	// SsaBlockIDToLabels maps an SSA block ID to the label. |  | ||||||
| 	SsaBlockIDToLabels []Label |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func NewExecutableContextT[Instr any]( |  | ||||||
| 	resetInstruction func(*Instr), |  | ||||||
| 	setNext func(*Instr, *Instr), |  | ||||||
| 	setPrev func(*Instr, *Instr), |  | ||||||
| 	asNop func(*Instr), |  | ||||||
| ) *ExecutableContextT[Instr] { |  | ||||||
| 	return &ExecutableContextT[Instr]{ |  | ||||||
| 		InstructionPool:   wazevoapi.NewPool[Instr](resetInstruction), |  | ||||||
| 		asNop:             asNop, |  | ||||||
| 		setNext:           setNext, |  | ||||||
| 		setPrev:           setPrev, |  | ||||||
| 		labelPositionPool: wazevoapi.NewPool[LabelPosition[Instr]](resetLabelPosition[Instr]), |  | ||||||
| 		NextLabel:         LabelInvalid, |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func resetLabelPosition[T any](l *LabelPosition[T]) { |  | ||||||
| 	*l = LabelPosition[T]{} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // StartLoweringFunction implements ExecutableContext. |  | ||||||
| func (e *ExecutableContextT[Instr]) StartLoweringFunction(max ssa.BasicBlockID) { |  | ||||||
| 	imax := int(max) |  | ||||||
| 	if len(e.SsaBlockIDToLabels) <= imax { |  | ||||||
| 		// Eagerly allocate labels for the blocks since the underlying slice will be used for the next iteration. |  | ||||||
| 		e.SsaBlockIDToLabels = append(e.SsaBlockIDToLabels, make([]Label, imax+1)...) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (e *ExecutableContextT[Instr]) StartBlock(blk ssa.BasicBlock) { |  | ||||||
| 	e.CurrentSSABlk = blk |  | ||||||
| 
 |  | ||||||
| 	l := e.SsaBlockIDToLabels[e.CurrentSSABlk.ID()] |  | ||||||
| 	if l == LabelInvalid { |  | ||||||
| 		l = e.AllocateLabel() |  | ||||||
| 		e.SsaBlockIDToLabels[blk.ID()] = l |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	end := e.allocateNop0() |  | ||||||
| 	e.PerBlockHead, e.PerBlockEnd = end, end |  | ||||||
| 
 |  | ||||||
| 	labelPos := e.GetOrAllocateLabelPosition(l) |  | ||||||
| 	e.OrderedBlockLabels = append(e.OrderedBlockLabels, labelPos) |  | ||||||
| 	labelPos.Begin, labelPos.End = end, end |  | ||||||
| 	labelPos.SB = blk |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // EndBlock implements ExecutableContext. |  | ||||||
| func (e *ExecutableContextT[T]) EndBlock() { |  | ||||||
| 	// Insert nop0 as the head of the block for convenience to simplify the logic of inserting instructions. |  | ||||||
| 	e.insertAtPerBlockHead(e.allocateNop0()) |  | ||||||
| 
 |  | ||||||
| 	l := e.SsaBlockIDToLabels[e.CurrentSSABlk.ID()] |  | ||||||
| 	e.LabelPositions[l].Begin = e.PerBlockHead |  | ||||||
| 
 |  | ||||||
| 	if e.CurrentSSABlk.EntryBlock() { |  | ||||||
| 		e.RootInstr = e.PerBlockHead |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (e *ExecutableContextT[T]) insertAtPerBlockHead(i *T) { |  | ||||||
| 	if e.PerBlockHead == nil { |  | ||||||
| 		e.PerBlockHead = i |  | ||||||
| 		e.PerBlockEnd = i |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
| 	e.setNext(i, e.PerBlockHead) |  | ||||||
| 	e.setPrev(e.PerBlockHead, i) |  | ||||||
| 	e.PerBlockHead = i |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // FlushPendingInstructions implements ExecutableContext. |  | ||||||
| func (e *ExecutableContextT[T]) FlushPendingInstructions() { |  | ||||||
| 	l := len(e.PendingInstructions) |  | ||||||
| 	if l == 0 { |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
| 	for i := l - 1; i >= 0; i-- { // reverse because we lower instructions in reverse order. |  | ||||||
| 		e.insertAtPerBlockHead(e.PendingInstructions[i]) |  | ||||||
| 	} |  | ||||||
| 	e.PendingInstructions = e.PendingInstructions[:0] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (e *ExecutableContextT[T]) Reset() { |  | ||||||
| 	e.labelPositionPool.Reset() |  | ||||||
| 	e.InstructionPool.Reset() |  | ||||||
| 	for i := range e.LabelPositions { |  | ||||||
| 		e.LabelPositions[i] = nil |  | ||||||
| 	} |  | ||||||
| 	e.PendingInstructions = e.PendingInstructions[:0] |  | ||||||
| 	e.OrderedBlockLabels = e.OrderedBlockLabels[:0] |  | ||||||
| 	e.RootInstr = nil |  | ||||||
| 	e.SsaBlockIDToLabels = e.SsaBlockIDToLabels[:0] |  | ||||||
| 	e.PerBlockHead, e.PerBlockEnd = nil, nil |  | ||||||
| 	e.NextLabel = LabelInvalid |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // AllocateLabel allocates an unused label. |  | ||||||
| func (e *ExecutableContextT[T]) AllocateLabel() Label { |  | ||||||
| 	e.NextLabel++ |  | ||||||
| 	return e.NextLabel |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (e *ExecutableContextT[T]) GetOrAllocateLabelPosition(l Label) *LabelPosition[T] { |  | ||||||
| 	if len(e.LabelPositions) <= int(l) { |  | ||||||
| 		e.LabelPositions = append(e.LabelPositions, make([]*LabelPosition[T], int(l)+1-len(e.LabelPositions))...) |  | ||||||
| 	} |  | ||||||
| 	ret := e.LabelPositions[l] |  | ||||||
| 	if ret == nil { |  | ||||||
| 		ret = e.labelPositionPool.Allocate() |  | ||||||
| 		ret.L = l |  | ||||||
| 		e.LabelPositions[l] = ret |  | ||||||
| 	} |  | ||||||
| 	return ret |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (e *ExecutableContextT[T]) GetOrAllocateSSABlockLabel(blk ssa.BasicBlock) Label { |  | ||||||
| 	if blk.ReturnBlock() { |  | ||||||
| 		return LabelReturn |  | ||||||
| 	} |  | ||||||
| 	l := e.SsaBlockIDToLabels[blk.ID()] |  | ||||||
| 	if l == LabelInvalid { |  | ||||||
| 		l = e.AllocateLabel() |  | ||||||
| 		e.SsaBlockIDToLabels[blk.ID()] = l |  | ||||||
| 	} |  | ||||||
| 	return l |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (e *ExecutableContextT[T]) allocateNop0() *T { |  | ||||||
| 	i := e.InstructionPool.Allocate() |  | ||||||
| 	e.asNop(i) |  | ||||||
| 	return i |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LinkAdjacentBlocks implements backend.Machine. |  | ||||||
| func (e *ExecutableContextT[T]) LinkAdjacentBlocks(prev, next ssa.BasicBlock) { |  | ||||||
| 	prevLabelPos := e.LabelPositions[e.GetOrAllocateSSABlockLabel(prev)] |  | ||||||
| 	nextLabelPos := e.LabelPositions[e.GetOrAllocateSSABlockLabel(next)] |  | ||||||
| 	e.setNext(prevLabelPos.End, nextLabelPos.Begin) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LabelPosition represents the regions of the generated code which the label represents. |  | ||||||
| type LabelPosition[Instr any] struct { |  | ||||||
| 	SB           ssa.BasicBlock |  | ||||||
| 	L            Label |  | ||||||
| 	Begin, End   *Instr |  | ||||||
| 	BinaryOffset int64 |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Label represents a position in the generated code which is either |  | ||||||
| // a real instruction or the constant InstructionPool (e.g. jump tables). |  | ||||||
| // |  | ||||||
| // This is exactly the same as the traditional "label" in assembly code. |  | ||||||
| type Label uint32 |  | ||||||
| 
 |  | ||||||
| const ( |  | ||||||
| 	LabelInvalid Label = 0 |  | ||||||
| 	LabelReturn  Label = math.MaxUint32 |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // String implements backend.Machine. |  | ||||||
| func (l Label) String() string { |  | ||||||
| 	return fmt.Sprintf("L%d", l) |  | ||||||
| } |  | ||||||
|  | @ -14,7 +14,6 @@ var calleeSavedVRegs = []regalloc.VReg{ | ||||||
| 
 | 
 | ||||||
| // CompileGoFunctionTrampoline implements backend.Machine. | // CompileGoFunctionTrampoline implements backend.Machine. | ||||||
| func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig *ssa.Signature, needModuleContextPtr bool) []byte { | func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig *ssa.Signature, needModuleContextPtr bool) []byte { | ||||||
| 	ectx := m.ectx |  | ||||||
| 	argBegin := 1 // Skips exec context by default. | 	argBegin := 1 // Skips exec context by default. | ||||||
| 	if needModuleContextPtr { | 	if needModuleContextPtr { | ||||||
| 		argBegin++ | 		argBegin++ | ||||||
|  | @ -25,7 +24,7 @@ func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig * | ||||||
| 	m.currentABI = abi | 	m.currentABI = abi | ||||||
| 
 | 
 | ||||||
| 	cur := m.allocateNop() | 	cur := m.allocateNop() | ||||||
| 	ectx.RootInstr = cur | 	m.rootInstr = cur | ||||||
| 
 | 
 | ||||||
| 	// Execution context is always the first argument. | 	// Execution context is always the first argument. | ||||||
| 	execCtrPtr := raxVReg | 	execCtrPtr := raxVReg | ||||||
|  | @ -272,7 +271,7 @@ func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig * | ||||||
| 	cur = m.revertRBPRSP(cur) | 	cur = m.revertRBPRSP(cur) | ||||||
| 	linkInstr(cur, m.allocateInstr().asRet()) | 	linkInstr(cur, m.allocateInstr().asRet()) | ||||||
| 
 | 
 | ||||||
| 	m.encodeWithoutSSA(ectx.RootInstr) | 	m.encodeWithoutSSA(m.rootInstr) | ||||||
| 	return m.c.Buf() | 	return m.c.Buf() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -347,10 +346,8 @@ var stackGrowSaveVRegs = []regalloc.VReg{ | ||||||
| 
 | 
 | ||||||
| // CompileStackGrowCallSequence implements backend.Machine. | // CompileStackGrowCallSequence implements backend.Machine. | ||||||
| func (m *machine) CompileStackGrowCallSequence() []byte { | func (m *machine) CompileStackGrowCallSequence() []byte { | ||||||
| 	ectx := m.ectx |  | ||||||
| 
 |  | ||||||
| 	cur := m.allocateNop() | 	cur := m.allocateNop() | ||||||
| 	ectx.RootInstr = cur | 	m.rootInstr = cur | ||||||
| 
 | 
 | ||||||
| 	cur = m.setupRBPRSP(cur) | 	cur = m.setupRBPRSP(cur) | ||||||
| 
 | 
 | ||||||
|  | @ -379,7 +376,7 @@ func (m *machine) CompileStackGrowCallSequence() []byte { | ||||||
| 	cur = m.revertRBPRSP(cur) | 	cur = m.revertRBPRSP(cur) | ||||||
| 	linkInstr(cur, m.allocateInstr().asRet()) | 	linkInstr(cur, m.allocateInstr().asRet()) | ||||||
| 
 | 
 | ||||||
| 	m.encodeWithoutSSA(ectx.RootInstr) | 	m.encodeWithoutSSA(m.rootInstr) | ||||||
| 	return m.c.Buf() | 	return m.c.Buf() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										33
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/instr.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										33
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/instr.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -17,16 +17,6 @@ type instruction struct { | ||||||
| 	kind                instructionKind | 	kind                instructionKind | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Next implements regalloc.Instr. |  | ||||||
| func (i *instruction) Next() regalloc.Instr { |  | ||||||
| 	return i.next |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Prev implements regalloc.Instr. |  | ||||||
| func (i *instruction) Prev() regalloc.Instr { |  | ||||||
| 	return i.prev |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // IsCall implements regalloc.Instr. | // IsCall implements regalloc.Instr. | ||||||
| func (i *instruction) IsCall() bool { return i.kind == call } | func (i *instruction) IsCall() bool { return i.kind == call } | ||||||
| 
 | 
 | ||||||
|  | @ -36,9 +26,6 @@ func (i *instruction) IsIndirectCall() bool { return i.kind == callIndirect } | ||||||
| // IsReturn implements regalloc.Instr. | // IsReturn implements regalloc.Instr. | ||||||
| func (i *instruction) IsReturn() bool { return i.kind == ret } | func (i *instruction) IsReturn() bool { return i.kind == ret } | ||||||
| 
 | 
 | ||||||
| // AddedBeforeRegAlloc implements regalloc.Instr. |  | ||||||
| func (i *instruction) AddedBeforeRegAlloc() bool { return i.addedBeforeRegAlloc } |  | ||||||
| 
 |  | ||||||
| // String implements regalloc.Instr. | // String implements regalloc.Instr. | ||||||
| func (i *instruction) String() string { | func (i *instruction) String() string { | ||||||
| 	switch i.kind { | 	switch i.kind { | ||||||
|  | @ -651,26 +638,14 @@ func resetInstruction(i *instruction) { | ||||||
| 	*i = instruction{} | 	*i = instruction{} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func setNext(i *instruction, next *instruction) { | func (i *instruction) asNop0WithLabel(label label) *instruction { //nolint | ||||||
| 	i.next = next |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func setPrev(i *instruction, prev *instruction) { |  | ||||||
| 	i.prev = prev |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func asNop(i *instruction) { |  | ||||||
| 	i.kind = nop0 |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (i *instruction) asNop0WithLabel(label backend.Label) *instruction { //nolint |  | ||||||
| 	i.kind = nop0 | 	i.kind = nop0 | ||||||
| 	i.u1 = uint64(label) | 	i.u1 = uint64(label) | ||||||
| 	return i | 	return i | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (i *instruction) nop0Label() backend.Label { | func (i *instruction) nop0Label() label { | ||||||
| 	return backend.Label(i.u1) | 	return label(i.u1) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type instructionKind byte | type instructionKind byte | ||||||
|  | @ -1161,7 +1136,7 @@ func (i *instruction) asJmp(target operand) *instruction { | ||||||
| 	return i | 	return i | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (i *instruction) jmpLabel() backend.Label { | func (i *instruction) jmpLabel() label { | ||||||
| 	switch i.kind { | 	switch i.kind { | ||||||
| 	case jmp, jmpIf, lea, xmmUnaryRmR: | 	case jmp, jmpIf, lea, xmmUnaryRmR: | ||||||
| 		return i.op1.label() | 		return i.op1.label() | ||||||
|  |  | ||||||
|  | @ -130,9 +130,9 @@ func (m *machine) lowerAddendsToAmode(x, y addend, offBase uint32) *amode { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerAddend(x *backend.SSAValueDefinition) addend { | func (m *machine) lowerAddend(x backend.SSAValueDefinition) addend { | ||||||
| 	if x.IsFromBlockParam() { | 	if !x.IsFromInstr() { | ||||||
| 		return addend{x.BlkParamVReg, 0, 0} | 		return addend{m.c.VRegOf(x.V), 0, 0} | ||||||
| 	} | 	} | ||||||
| 	// Ensure the addend is not referenced in multiple places; we will discard nested Iadds. | 	// Ensure the addend is not referenced in multiple places; we will discard nested Iadds. | ||||||
| 	op := m.c.MatchInstrOneOf(x, addendsMatchOpcodes[:]) | 	op := m.c.MatchInstrOneOf(x, addendsMatchOpcodes[:]) | ||||||
|  |  | ||||||
							
								
								
									
										366
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/machine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										366
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/machine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -16,18 +16,13 @@ import ( | ||||||
| 
 | 
 | ||||||
| // NewBackend returns a new backend for arm64. | // NewBackend returns a new backend for arm64. | ||||||
| func NewBackend() backend.Machine { | func NewBackend() backend.Machine { | ||||||
| 	ectx := backend.NewExecutableContextT[instruction]( | 	m := &machine{ | ||||||
| 		resetInstruction, |  | ||||||
| 		setNext, |  | ||||||
| 		setPrev, |  | ||||||
| 		asNop, |  | ||||||
| 	) |  | ||||||
| 	return &machine{ |  | ||||||
| 		ectx:                                ectx, |  | ||||||
| 		cpuFeatures:                         platform.CpuFeatures, | 		cpuFeatures:                         platform.CpuFeatures, | ||||||
| 		regAlloc:                            regalloc.NewAllocator(regInfo), | 		regAlloc:                            regalloc.NewAllocator[*instruction, *labelPosition, *regAllocFn](regInfo), | ||||||
| 		spillSlots:                          map[regalloc.VRegID]int64{}, | 		spillSlots:                          map[regalloc.VRegID]int64{}, | ||||||
| 		amodePool:                           wazevoapi.NewPool[amode](nil), | 		amodePool:                           wazevoapi.NewPool[amode](nil), | ||||||
|  | 		labelPositionPool:                   wazevoapi.NewIDedPool[labelPosition](resetLabelPosition), | ||||||
|  | 		instrPool:                           wazevoapi.NewPool[instruction](resetInstruction), | ||||||
| 		constSwizzleMaskConstIndex:          -1, | 		constSwizzleMaskConstIndex:          -1, | ||||||
| 		constSqmulRoundSatIndex:             -1, | 		constSqmulRoundSatIndex:             -1, | ||||||
| 		constI8x16SHLMaskTableIndex:         -1, | 		constI8x16SHLMaskTableIndex:         -1, | ||||||
|  | @ -41,23 +36,46 @@ func NewBackend() backend.Machine { | ||||||
| 		constExtAddPairwiseI16x8uMask1Index: -1, | 		constExtAddPairwiseI16x8uMask1Index: -1, | ||||||
| 		constExtAddPairwiseI16x8uMask2Index: -1, | 		constExtAddPairwiseI16x8uMask2Index: -1, | ||||||
| 	} | 	} | ||||||
|  | 	m.regAllocFn.m = m | ||||||
|  | 	return m | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type ( | type ( | ||||||
| 	// machine implements backend.Machine for amd64. | 	// machine implements backend.Machine for amd64. | ||||||
| 	machine struct { | 	machine struct { | ||||||
| 		c                        backend.Compiler | 		c                        backend.Compiler | ||||||
| 		ectx                     *backend.ExecutableContextT[instruction] |  | ||||||
| 		stackBoundsCheckDisabled bool | 		stackBoundsCheckDisabled bool | ||||||
| 
 | 
 | ||||||
|  | 		instrPool wazevoapi.Pool[instruction] | ||||||
| 		amodePool wazevoapi.Pool[amode] | 		amodePool wazevoapi.Pool[amode] | ||||||
| 
 | 
 | ||||||
| 		cpuFeatures platform.CpuFeatureFlags | 		cpuFeatures platform.CpuFeatureFlags | ||||||
| 
 | 
 | ||||||
| 		regAlloc        regalloc.Allocator | 		regAlloc        regalloc.Allocator[*instruction, *labelPosition, *regAllocFn] | ||||||
| 		regAllocFn      *backend.RegAllocFunction[*instruction, *machine] | 		regAllocFn      regAllocFn | ||||||
| 		regAllocStarted bool | 		regAllocStarted bool | ||||||
| 
 | 
 | ||||||
|  | 		// labelPositionPool is the pool of labelPosition. The id is the label where | ||||||
|  | 		// if the label is less than the maxSSABlockID, it's the ssa.BasicBlockID. | ||||||
|  | 		labelPositionPool wazevoapi.IDedPool[labelPosition] | ||||||
|  | 		// nextLabel is the next label to be allocated. The first free label comes after maxSSABlockID | ||||||
|  | 		// so that we can have an identical label for the SSA block ID, which is useful for debugging. | ||||||
|  | 		nextLabel label | ||||||
|  | 		// rootInstr is the first instruction of the function. | ||||||
|  | 		rootInstr *instruction | ||||||
|  | 		// currentLabelPos is the currently-compiled ssa.BasicBlock's labelPosition. | ||||||
|  | 		currentLabelPos *labelPosition | ||||||
|  | 		// orderedSSABlockLabelPos is the ordered list of labelPosition in the generated code for each ssa.BasicBlock. | ||||||
|  | 		orderedSSABlockLabelPos []*labelPosition | ||||||
|  | 		// returnLabelPos is the labelPosition for the return block. | ||||||
|  | 		returnLabelPos labelPosition | ||||||
|  | 		// perBlockHead and perBlockEnd are the head and tail of the instruction list per currently-compiled ssa.BasicBlock. | ||||||
|  | 		perBlockHead, perBlockEnd *instruction | ||||||
|  | 		// pendingInstructions are the instructions which are not yet emitted into the instruction list. | ||||||
|  | 		pendingInstructions []*instruction | ||||||
|  | 		// maxSSABlockID is the maximum ssa.BasicBlockID in the current function. | ||||||
|  | 		maxSSABlockID label | ||||||
|  | 
 | ||||||
| 		spillSlotSize int64 | 		spillSlotSize int64 | ||||||
| 		spillSlots    map[regalloc.VRegID]int64 | 		spillSlots    map[regalloc.VRegID]int64 | ||||||
| 		currentABI    *backend.FunctionABI | 		currentABI    *backend.FunctionABI | ||||||
|  | @ -67,7 +85,10 @@ type ( | ||||||
| 
 | 
 | ||||||
| 		labelResolutionPends []labelResolutionPend | 		labelResolutionPends []labelResolutionPend | ||||||
| 
 | 
 | ||||||
|  | 		// jmpTableTargets holds the labels of the jump table targets. | ||||||
| 		jmpTableTargets [][]uint32 | 		jmpTableTargets [][]uint32 | ||||||
|  | 		// jmpTableTargetNext is the index to the jmpTableTargets slice to be used for the next jump table. | ||||||
|  | 		jmpTableTargetsNext int | ||||||
| 		consts              []_const | 		consts              []_const | ||||||
| 
 | 
 | ||||||
| 		constSwizzleMaskConstIndex, constSqmulRoundSatIndex, | 		constSwizzleMaskConstIndex, constSqmulRoundSatIndex, | ||||||
|  | @ -81,7 +102,8 @@ type ( | ||||||
| 	_const struct { | 	_const struct { | ||||||
| 		lo, hi   uint64 | 		lo, hi   uint64 | ||||||
| 		_var     []byte | 		_var     []byte | ||||||
| 		label  *labelPosition | 		label    label | ||||||
|  | 		labelPos *labelPosition | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	labelResolutionPend struct { | 	labelResolutionPend struct { | ||||||
|  | @ -90,22 +112,73 @@ type ( | ||||||
| 		// imm32Offset is the offset of the last 4 bytes of the instruction. | 		// imm32Offset is the offset of the last 4 bytes of the instruction. | ||||||
| 		imm32Offset int64 | 		imm32Offset int64 | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	labelPosition = backend.LabelPosition[instruction] |  | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func (m *machine) getOrAllocateConstLabel(i *int, _var []byte) backend.Label { | type ( | ||||||
|  | 	// label represents a position in the generated code which is either | ||||||
|  | 	// a real instruction or the constant InstructionPool (e.g. jump tables). | ||||||
|  | 	// | ||||||
|  | 	// This is exactly the same as the traditional "label" in assembly code. | ||||||
|  | 	label uint32 | ||||||
|  | 
 | ||||||
|  | 	// labelPosition represents the regions of the generated code which the label represents. | ||||||
|  | 	// This implements regalloc.Block. | ||||||
|  | 	labelPosition struct { | ||||||
|  | 		// sb is not nil if this corresponds to a ssa.BasicBlock. | ||||||
|  | 		sb ssa.BasicBlock | ||||||
|  | 		// cur is used to walk through the instructions in the block during the register allocation. | ||||||
|  | 		cur, | ||||||
|  | 		// begin and end are the first and last instructions of the block. | ||||||
|  | 		begin, end *instruction | ||||||
|  | 		// binaryOffset is the offset in the binary where the label is located. | ||||||
|  | 		binaryOffset int64 | ||||||
|  | 	} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // String implements backend.Machine. | ||||||
|  | func (l label) String() string { | ||||||
|  | 	return fmt.Sprintf("L%d", l) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func resetLabelPosition(l *labelPosition) { | ||||||
|  | 	*l = labelPosition{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const labelReturn = math.MaxUint32 | ||||||
|  | 
 | ||||||
|  | func ssaBlockLabel(sb ssa.BasicBlock) label { | ||||||
|  | 	if sb.ReturnBlock() { | ||||||
|  | 		return labelReturn | ||||||
|  | 	} | ||||||
|  | 	return label(sb.ID()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getOrAllocateSSABlockLabelPosition returns the labelPosition for the given basic block. | ||||||
|  | func (m *machine) getOrAllocateSSABlockLabelPosition(sb ssa.BasicBlock) *labelPosition { | ||||||
|  | 	if sb.ReturnBlock() { | ||||||
|  | 		m.returnLabelPos.sb = sb | ||||||
|  | 		return &m.returnLabelPos | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	l := ssaBlockLabel(sb) | ||||||
|  | 	pos := m.labelPositionPool.GetOrAllocate(int(l)) | ||||||
|  | 	pos.sb = sb | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m *machine) getOrAllocateConstLabel(i *int, _var []byte) label { | ||||||
| 	index := *i | 	index := *i | ||||||
| 	if index == -1 { | 	if index == -1 { | ||||||
| 		label := m.allocateLabel() | 		l, pos := m.allocateLabel() | ||||||
| 		index = len(m.consts) | 		index = len(m.consts) | ||||||
| 		m.consts = append(m.consts, _const{ | 		m.consts = append(m.consts, _const{ | ||||||
| 			_var:     _var, | 			_var:     _var, | ||||||
| 			label: label, | 			label:    l, | ||||||
|  | 			labelPos: pos, | ||||||
| 		}) | 		}) | ||||||
| 		*i = index | 		*i = index | ||||||
| 	} | 	} | ||||||
| 	return m.consts[index].label.L | 	return m.consts[index].label | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Reset implements backend.Machine. | // Reset implements backend.Machine. | ||||||
|  | @ -120,18 +193,20 @@ func (m *machine) Reset() { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	m.stackBoundsCheckDisabled = false | 	m.stackBoundsCheckDisabled = false | ||||||
| 	m.ectx.Reset() |  | ||||||
| 
 |  | ||||||
| 	m.regAllocFn.Reset() |  | ||||||
| 	m.regAlloc.Reset() | 	m.regAlloc.Reset() | ||||||
|  | 	m.labelPositionPool.Reset() | ||||||
|  | 	m.instrPool.Reset() | ||||||
| 	m.regAllocStarted = false | 	m.regAllocStarted = false | ||||||
| 	m.clobberedRegs = m.clobberedRegs[:0] | 	m.clobberedRegs = m.clobberedRegs[:0] | ||||||
| 
 | 
 | ||||||
| 	m.spillSlotSize = 0 | 	m.spillSlotSize = 0 | ||||||
| 	m.maxRequiredStackSizeForCalls = 0 | 	m.maxRequiredStackSizeForCalls = 0 | ||||||
|  | 	m.perBlockHead, m.perBlockEnd, m.rootInstr = nil, nil, nil | ||||||
|  | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
|  | 	m.orderedSSABlockLabelPos = m.orderedSSABlockLabelPos[:0] | ||||||
| 
 | 
 | ||||||
| 	m.amodePool.Reset() | 	m.amodePool.Reset() | ||||||
| 	m.jmpTableTargets = m.jmpTableTargets[:0] | 	m.jmpTableTargetsNext = 0 | ||||||
| 	m.constSwizzleMaskConstIndex = -1 | 	m.constSwizzleMaskConstIndex = -1 | ||||||
| 	m.constSqmulRoundSatIndex = -1 | 	m.constSqmulRoundSatIndex = -1 | ||||||
| 	m.constI8x16SHLMaskTableIndex = -1 | 	m.constI8x16SHLMaskTableIndex = -1 | ||||||
|  | @ -146,8 +221,63 @@ func (m *machine) Reset() { | ||||||
| 	m.constExtAddPairwiseI16x8uMask2Index = -1 | 	m.constExtAddPairwiseI16x8uMask2Index = -1 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // ExecutableContext implements backend.Machine. | // StartLoweringFunction implements backend.Machine StartLoweringFunction. | ||||||
| func (m *machine) ExecutableContext() backend.ExecutableContext { return m.ectx } | func (m *machine) StartLoweringFunction(maxBlockID ssa.BasicBlockID) { | ||||||
|  | 	m.maxSSABlockID = label(maxBlockID) | ||||||
|  | 	m.nextLabel = label(maxBlockID) + 1 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LinkAdjacentBlocks implements backend.Machine. | ||||||
|  | func (m *machine) LinkAdjacentBlocks(prev, next ssa.BasicBlock) { | ||||||
|  | 	prevPos, nextPos := m.getOrAllocateSSABlockLabelPosition(prev), m.getOrAllocateSSABlockLabelPosition(next) | ||||||
|  | 	prevPos.end.next = nextPos.begin | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StartBlock implements backend.Machine. | ||||||
|  | func (m *machine) StartBlock(blk ssa.BasicBlock) { | ||||||
|  | 	m.currentLabelPos = m.getOrAllocateSSABlockLabelPosition(blk) | ||||||
|  | 	labelPos := m.currentLabelPos | ||||||
|  | 	end := m.allocateNop() | ||||||
|  | 	m.perBlockHead, m.perBlockEnd = end, end | ||||||
|  | 	labelPos.begin, labelPos.end = end, end | ||||||
|  | 	m.orderedSSABlockLabelPos = append(m.orderedSSABlockLabelPos, labelPos) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // EndBlock implements ExecutableContext. | ||||||
|  | func (m *machine) EndBlock() { | ||||||
|  | 	// Insert nop0 as the head of the block for convenience to simplify the logic of inserting instructions. | ||||||
|  | 	m.insertAtPerBlockHead(m.allocateNop()) | ||||||
|  | 
 | ||||||
|  | 	m.currentLabelPos.begin = m.perBlockHead | ||||||
|  | 
 | ||||||
|  | 	if m.currentLabelPos.sb.EntryBlock() { | ||||||
|  | 		m.rootInstr = m.perBlockHead | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m *machine) insertAtPerBlockHead(i *instruction) { | ||||||
|  | 	if m.perBlockHead == nil { | ||||||
|  | 		m.perBlockHead = i | ||||||
|  | 		m.perBlockEnd = i | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	i.next = m.perBlockHead | ||||||
|  | 	m.perBlockHead.prev = i | ||||||
|  | 	m.perBlockHead = i | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FlushPendingInstructions implements backend.Machine. | ||||||
|  | func (m *machine) FlushPendingInstructions() { | ||||||
|  | 	l := len(m.pendingInstructions) | ||||||
|  | 	if l == 0 { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	for i := l - 1; i >= 0; i-- { // reverse because we lower instructions in reverse order. | ||||||
|  | 		m.insertAtPerBlockHead(m.pendingInstructions[i]) | ||||||
|  | 	} | ||||||
|  | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| // DisableStackCheck implements backend.Machine. | // DisableStackCheck implements backend.Machine. | ||||||
| func (m *machine) DisableStackCheck() { m.stackBoundsCheckDisabled = true } | func (m *machine) DisableStackCheck() { m.stackBoundsCheckDisabled = true } | ||||||
|  | @ -155,23 +285,17 @@ func (m *machine) DisableStackCheck() { m.stackBoundsCheckDisabled = true } | ||||||
| // SetCompiler implements backend.Machine. | // SetCompiler implements backend.Machine. | ||||||
| func (m *machine) SetCompiler(c backend.Compiler) { | func (m *machine) SetCompiler(c backend.Compiler) { | ||||||
| 	m.c = c | 	m.c = c | ||||||
| 	m.regAllocFn = backend.NewRegAllocFunction[*instruction, *machine](m, c.SSABuilder(), c) | 	m.regAllocFn.ssaB = c.SSABuilder() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // SetCurrentABI implements backend.Machine. | // SetCurrentABI implements backend.Machine. | ||||||
| func (m *machine) SetCurrentABI(abi *backend.FunctionABI) { | func (m *machine) SetCurrentABI(abi *backend.FunctionABI) { m.currentABI = abi } | ||||||
| 	m.currentABI = abi |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| // RegAlloc implements backend.Machine. | // RegAlloc implements backend.Machine. | ||||||
| func (m *machine) RegAlloc() { | func (m *machine) RegAlloc() { | ||||||
| 	rf := m.regAllocFn | 	rf := m.regAllocFn | ||||||
| 	for _, pos := range m.ectx.OrderedBlockLabels { |  | ||||||
| 		rf.AddBlock(pos.SB, pos.L, pos.Begin, pos.End) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	m.regAllocStarted = true | 	m.regAllocStarted = true | ||||||
| 	m.regAlloc.DoAllocation(rf) | 	m.regAlloc.DoAllocation(&rf) | ||||||
| 	// Now that we know the final spill slot size, we must align spillSlotSize to 16 bytes. | 	// Now that we know the final spill slot size, we must align spillSlotSize to 16 bytes. | ||||||
| 	m.spillSlotSize = (m.spillSlotSize + 15) &^ 15 | 	m.spillSlotSize = (m.spillSlotSize + 15) &^ 15 | ||||||
| } | } | ||||||
|  | @ -184,49 +308,54 @@ func (m *machine) InsertReturn() { | ||||||
| 
 | 
 | ||||||
| // LowerSingleBranch implements backend.Machine. | // LowerSingleBranch implements backend.Machine. | ||||||
| func (m *machine) LowerSingleBranch(b *ssa.Instruction) { | func (m *machine) LowerSingleBranch(b *ssa.Instruction) { | ||||||
| 	ectx := m.ectx |  | ||||||
| 	switch b.Opcode() { | 	switch b.Opcode() { | ||||||
| 	case ssa.OpcodeJump: | 	case ssa.OpcodeJump: | ||||||
| 		_, _, targetBlk := b.BranchData() | 		_, _, targetBlkID := b.BranchData() | ||||||
| 		if b.IsFallthroughJump() { | 		if b.IsFallthroughJump() { | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
| 		jmp := m.allocateInstr() | 		jmp := m.allocateInstr() | ||||||
| 		target := ectx.GetOrAllocateSSABlockLabel(targetBlk) | 		target := ssaBlockLabel(m.c.SSABuilder().BasicBlock(targetBlkID)) | ||||||
| 		if target == backend.LabelReturn { | 		if target == labelReturn { | ||||||
| 			jmp.asRet() | 			jmp.asRet() | ||||||
| 		} else { | 		} else { | ||||||
| 			jmp.asJmp(newOperandLabel(target)) | 			jmp.asJmp(newOperandLabel(target)) | ||||||
| 		} | 		} | ||||||
| 		m.insert(jmp) | 		m.insert(jmp) | ||||||
| 	case ssa.OpcodeBrTable: | 	case ssa.OpcodeBrTable: | ||||||
| 		index, target := b.BrTableData() | 		index, targetBlkIDs := b.BrTableData() | ||||||
| 		m.lowerBrTable(index, target) | 		m.lowerBrTable(index, targetBlkIDs) | ||||||
| 	default: | 	default: | ||||||
| 		panic("BUG: unexpected branch opcode" + b.Opcode().String()) | 		panic("BUG: unexpected branch opcode" + b.Opcode().String()) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) addJmpTableTarget(targets []ssa.BasicBlock) (index int) { | func (m *machine) addJmpTableTarget(targets ssa.Values) (index int) { | ||||||
| 	// TODO: reuse the slice! | 	if m.jmpTableTargetsNext == len(m.jmpTableTargets) { | ||||||
| 	labels := make([]uint32, len(targets)) | 		m.jmpTableTargets = append(m.jmpTableTargets, make([]uint32, 0, len(targets.View()))) | ||||||
| 	for j, target := range targets { | 	} | ||||||
| 		labels[j] = uint32(m.ectx.GetOrAllocateSSABlockLabel(target)) | 
 | ||||||
|  | 	index = m.jmpTableTargetsNext | ||||||
|  | 	m.jmpTableTargetsNext++ | ||||||
|  | 	m.jmpTableTargets[index] = m.jmpTableTargets[index][:0] | ||||||
|  | 	for _, targetBlockID := range targets.View() { | ||||||
|  | 		target := m.c.SSABuilder().BasicBlock(ssa.BasicBlockID(targetBlockID)) | ||||||
|  | 		m.jmpTableTargets[index] = append(m.jmpTableTargets[index], uint32(ssaBlockLabel(target))) | ||||||
| 	} | 	} | ||||||
| 	index = len(m.jmpTableTargets) |  | ||||||
| 	m.jmpTableTargets = append(m.jmpTableTargets, labels) |  | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| var condBranchMatches = [...]ssa.Opcode{ssa.OpcodeIcmp, ssa.OpcodeFcmp} | var condBranchMatches = [...]ssa.Opcode{ssa.OpcodeIcmp, ssa.OpcodeFcmp} | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerBrTable(index ssa.Value, targets []ssa.BasicBlock) { | func (m *machine) lowerBrTable(index ssa.Value, targets ssa.Values) { | ||||||
| 	_v := m.getOperand_Reg(m.c.ValueDefinition(index)) | 	_v := m.getOperand_Reg(m.c.ValueDefinition(index)) | ||||||
| 	v := m.copyToTmp(_v.reg()) | 	v := m.copyToTmp(_v.reg()) | ||||||
| 
 | 
 | ||||||
|  | 	targetCount := len(targets.View()) | ||||||
|  | 
 | ||||||
| 	// First, we need to do the bounds check. | 	// First, we need to do the bounds check. | ||||||
| 	maxIndex := m.c.AllocateVReg(ssa.TypeI32) | 	maxIndex := m.c.AllocateVReg(ssa.TypeI32) | ||||||
| 	m.lowerIconst(maxIndex, uint64(len(targets)-1), false) | 	m.lowerIconst(maxIndex, uint64(targetCount-1), false) | ||||||
| 	cmp := m.allocateInstr().asCmpRmiR(true, newOperandReg(maxIndex), v, false) | 	cmp := m.allocateInstr().asCmpRmiR(true, newOperandReg(maxIndex), v, false) | ||||||
| 	m.insert(cmp) | 	m.insert(cmp) | ||||||
| 
 | 
 | ||||||
|  | @ -255,23 +384,22 @@ func (m *machine) lowerBrTable(index ssa.Value, targets []ssa.BasicBlock) { | ||||||
| 
 | 
 | ||||||
| 	jmpTable := m.allocateInstr() | 	jmpTable := m.allocateInstr() | ||||||
| 	targetSliceIndex := m.addJmpTableTarget(targets) | 	targetSliceIndex := m.addJmpTableTarget(targets) | ||||||
| 	jmpTable.asJmpTableSequence(targetSliceIndex, len(targets)) | 	jmpTable.asJmpTableSequence(targetSliceIndex, targetCount) | ||||||
| 	m.insert(jmpTable) | 	m.insert(jmpTable) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // LowerConditionalBranch implements backend.Machine. | // LowerConditionalBranch implements backend.Machine. | ||||||
| func (m *machine) LowerConditionalBranch(b *ssa.Instruction) { | func (m *machine) LowerConditionalBranch(b *ssa.Instruction) { | ||||||
| 	exctx := m.ectx | 	cval, args, targetBlkID := b.BranchData() | ||||||
| 	cval, args, targetBlk := b.BranchData() |  | ||||||
| 	if len(args) > 0 { | 	if len(args) > 0 { | ||||||
| 		panic(fmt.Sprintf( | 		panic(fmt.Sprintf( | ||||||
| 			"conditional branch shouldn't have args; likely a bug in critical edge splitting: from %s to %s", | 			"conditional branch shouldn't have args; likely a bug in critical edge splitting: from %s to %s", | ||||||
| 			exctx.CurrentSSABlk, | 			m.currentLabelPos.sb, | ||||||
| 			targetBlk, | 			targetBlkID, | ||||||
| 		)) | 		)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	target := exctx.GetOrAllocateSSABlockLabel(targetBlk) | 	target := ssaBlockLabel(m.c.SSABuilder().BasicBlock(targetBlkID)) | ||||||
| 	cvalDef := m.c.ValueDefinition(cval) | 	cvalDef := m.c.ValueDefinition(cval) | ||||||
| 
 | 
 | ||||||
| 	switch m.c.MatchInstrOneOf(cvalDef, condBranchMatches[:]) { | 	switch m.c.MatchInstrOneOf(cvalDef, condBranchMatches[:]) { | ||||||
|  | @ -1272,9 +1400,9 @@ func (m *machine) lowerVconst(dst regalloc.VReg, lo, hi uint64) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	load := m.allocateInstr() | 	load := m.allocateInstr() | ||||||
| 	constLabel := m.allocateLabel() | 	l, pos := m.allocateLabel() | ||||||
| 	m.consts = append(m.consts, _const{label: constLabel, lo: lo, hi: hi}) | 	m.consts = append(m.consts, _const{label: l, labelPos: pos, lo: lo, hi: hi}) | ||||||
| 	load.asXmmUnaryRmR(sseOpcodeMovdqu, newOperandMem(m.newAmodeRipRel(constLabel.L)), dst) | 	load.asXmmUnaryRmR(sseOpcodeMovdqu, newOperandMem(m.newAmodeRipRel(l)), dst) | ||||||
| 	m.insert(load) | 	m.insert(load) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1473,21 +1601,24 @@ func (m *machine) lowerExitIfTrueWithCode(execCtx regalloc.VReg, cond ssa.Value, | ||||||
| 	jmpIf.asJmpIf(condFromSSAIntCmpCond(c).invert(), newOperandLabel(l)) | 	jmpIf.asJmpIf(condFromSSAIntCmpCond(c).invert(), newOperandLabel(l)) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) tryLowerBandToFlag(x, y *backend.SSAValueDefinition) (ok bool) { | func (m *machine) tryLowerBandToFlag(x, y backend.SSAValueDefinition) (ok bool) { | ||||||
| 	var target *backend.SSAValueDefinition | 	var target backend.SSAValueDefinition | ||||||
|  | 	var got bool | ||||||
| 	if x.IsFromInstr() && x.Instr.Constant() && x.Instr.ConstantVal() == 0 { | 	if x.IsFromInstr() && x.Instr.Constant() && x.Instr.ConstantVal() == 0 { | ||||||
| 		if m.c.MatchInstr(y, ssa.OpcodeBand) { | 		if m.c.MatchInstr(y, ssa.OpcodeBand) { | ||||||
| 			target = y | 			target = y | ||||||
|  | 			got = true | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if y.IsFromInstr() && y.Instr.Constant() && y.Instr.ConstantVal() == 0 { | 	if y.IsFromInstr() && y.Instr.Constant() && y.Instr.ConstantVal() == 0 { | ||||||
| 		if m.c.MatchInstr(x, ssa.OpcodeBand) { | 		if m.c.MatchInstr(x, ssa.OpcodeBand) { | ||||||
| 			target = x | 			target = x | ||||||
|  | 			got = true | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if target == nil { | 	if !got { | ||||||
| 		return false | 		return false | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1522,7 +1653,7 @@ func (m *machine) allocateExitInstructions(execCtx, exitCodeReg regalloc.VReg) ( | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerExitWithCode(execCtx regalloc.VReg, code wazevoapi.ExitCode) (afterLabel backend.Label) { | func (m *machine) lowerExitWithCode(execCtx regalloc.VReg, code wazevoapi.ExitCode) (afterLabel label) { | ||||||
| 	exitCodeReg := rbpVReg | 	exitCodeReg := rbpVReg | ||||||
| 	saveRsp, saveRbp, setExitCode := m.allocateExitInstructions(execCtx, exitCodeReg) | 	saveRsp, saveRbp, setExitCode := m.allocateExitInstructions(execCtx, exitCodeReg) | ||||||
| 
 | 
 | ||||||
|  | @ -1819,9 +1950,9 @@ func (m *machine) lowerCall(si *ssa.Instruction) { | ||||||
| 
 | 
 | ||||||
| // callerGenVRegToFunctionArg is the opposite of GenFunctionArgToVReg, which is used to generate the | // callerGenVRegToFunctionArg is the opposite of GenFunctionArgToVReg, which is used to generate the | ||||||
| // caller side of the function call. | // caller side of the function call. | ||||||
| func (m *machine) callerGenVRegToFunctionArg(a *backend.FunctionABI, argIndex int, reg regalloc.VReg, def *backend.SSAValueDefinition, stackSlotSize int64) { | func (m *machine) callerGenVRegToFunctionArg(a *backend.FunctionABI, argIndex int, reg regalloc.VReg, def backend.SSAValueDefinition, stackSlotSize int64) { | ||||||
| 	arg := &a.Args[argIndex] | 	arg := &a.Args[argIndex] | ||||||
| 	if def != nil && def.IsFromInstr() { | 	if def.IsFromInstr() { | ||||||
| 		// Constant instructions are inlined. | 		// Constant instructions are inlined. | ||||||
| 		if inst := def.Instr; inst.Constant() { | 		if inst := def.Instr; inst.Constant() { | ||||||
| 			m.insertLoadConstant(inst, reg) | 			m.insertLoadConstant(inst, reg) | ||||||
|  | @ -1904,25 +2035,20 @@ func (m *machine) InsertMove(dst, src regalloc.VReg, typ ssa.Type) { | ||||||
| 
 | 
 | ||||||
| // Format implements backend.Machine. | // Format implements backend.Machine. | ||||||
| func (m *machine) Format() string { | func (m *machine) Format() string { | ||||||
| 	ectx := m.ectx | 	begins := map[*instruction]label{} | ||||||
| 	begins := map[*instruction]backend.Label{} | 	for l := label(0); l < m.nextLabel; l++ { | ||||||
| 	for _, pos := range ectx.LabelPositions { | 		pos := m.labelPositionPool.Get(int(l)) | ||||||
| 		if pos != nil { | 		if pos != nil { | ||||||
| 			begins[pos.Begin] = pos.L | 			begins[pos.begin] = l | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	irBlocks := map[backend.Label]ssa.BasicBlockID{} |  | ||||||
| 	for i, l := range ectx.SsaBlockIDToLabels { |  | ||||||
| 		irBlocks[l] = ssa.BasicBlockID(i) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	var lines []string | 	var lines []string | ||||||
| 	for cur := ectx.RootInstr; cur != nil; cur = cur.next { | 	for cur := m.rootInstr; cur != nil; cur = cur.next { | ||||||
| 		if l, ok := begins[cur]; ok { | 		if l, ok := begins[cur]; ok { | ||||||
| 			var labelStr string | 			var labelStr string | ||||||
| 			if blkID, ok := irBlocks[l]; ok { | 			if l <= m.maxSSABlockID { | ||||||
| 				labelStr = fmt.Sprintf("%s (SSA Block: %s):", l, blkID) | 				labelStr = fmt.Sprintf("%s (SSA Block: blk%d):", l, l) | ||||||
| 			} else { | 			} else { | ||||||
| 				labelStr = fmt.Sprintf("%s:", l) | 				labelStr = fmt.Sprintf("%s:", l) | ||||||
| 			} | 			} | ||||||
|  | @ -1935,9 +2061,9 @@ func (m *machine) Format() string { | ||||||
| 	} | 	} | ||||||
| 	for _, vc := range m.consts { | 	for _, vc := range m.consts { | ||||||
| 		if vc._var == nil { | 		if vc._var == nil { | ||||||
| 			lines = append(lines, fmt.Sprintf("%s: const [%d %d]", vc.label.L, vc.lo, vc.hi)) | 			lines = append(lines, fmt.Sprintf("%s: const [%d %d]", vc.label, vc.lo, vc.hi)) | ||||||
| 		} else { | 		} else { | ||||||
| 			lines = append(lines, fmt.Sprintf("%s: const %#x", vc.label.L, vc._var)) | 			lines = append(lines, fmt.Sprintf("%s: const %#x", vc.label, vc._var)) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	return "\n" + strings.Join(lines, "\n") + "\n" | 	return "\n" + strings.Join(lines, "\n") + "\n" | ||||||
|  | @ -1945,18 +2071,14 @@ func (m *machine) Format() string { | ||||||
| 
 | 
 | ||||||
| func (m *machine) encodeWithoutSSA(root *instruction) { | func (m *machine) encodeWithoutSSA(root *instruction) { | ||||||
| 	m.labelResolutionPends = m.labelResolutionPends[:0] | 	m.labelResolutionPends = m.labelResolutionPends[:0] | ||||||
| 	ectx := m.ectx |  | ||||||
| 
 |  | ||||||
| 	bufPtr := m.c.BufPtr() | 	bufPtr := m.c.BufPtr() | ||||||
| 	for cur := root; cur != nil; cur = cur.next { | 	for cur := root; cur != nil; cur = cur.next { | ||||||
| 		offset := int64(len(*bufPtr)) | 		offset := int64(len(*bufPtr)) | ||||||
| 		if cur.kind == nop0 { | 		if cur.kind == nop0 { | ||||||
| 			l := cur.nop0Label() | 			l := cur.nop0Label() | ||||||
| 			if int(l) >= len(ectx.LabelPositions) { | 			pos := m.labelPositionPool.Get(int(l)) | ||||||
| 				continue | 			if pos != nil { | ||||||
| 			} | 				pos.binaryOffset = offset | ||||||
| 			if pos := ectx.LabelPositions[l]; pos != nil { |  | ||||||
| 				pos.BinaryOffset = offset |  | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | @ -1973,7 +2095,7 @@ func (m *machine) encodeWithoutSSA(root *instruction) { | ||||||
| 		switch p.instr.kind { | 		switch p.instr.kind { | ||||||
| 		case jmp, jmpIf, lea: | 		case jmp, jmpIf, lea: | ||||||
| 			target := p.instr.jmpLabel() | 			target := p.instr.jmpLabel() | ||||||
| 			targetOffset := ectx.LabelPositions[target].BinaryOffset | 			targetOffset := m.labelPositionPool.Get(int(target)).binaryOffset | ||||||
| 			imm32Offset := p.imm32Offset | 			imm32Offset := p.imm32Offset | ||||||
| 			jmpOffset := int32(targetOffset - (p.imm32Offset + 4)) // +4 because RIP points to the next instruction. | 			jmpOffset := int32(targetOffset - (p.imm32Offset + 4)) // +4 because RIP points to the next instruction. | ||||||
| 			binary.LittleEndian.PutUint32((*bufPtr)[imm32Offset:], uint32(jmpOffset)) | 			binary.LittleEndian.PutUint32((*bufPtr)[imm32Offset:], uint32(jmpOffset)) | ||||||
|  | @ -1985,33 +2107,33 @@ func (m *machine) encodeWithoutSSA(root *instruction) { | ||||||
| 
 | 
 | ||||||
| // Encode implements backend.Machine Encode. | // Encode implements backend.Machine Encode. | ||||||
| func (m *machine) Encode(ctx context.Context) (err error) { | func (m *machine) Encode(ctx context.Context) (err error) { | ||||||
| 	ectx := m.ectx |  | ||||||
| 	bufPtr := m.c.BufPtr() | 	bufPtr := m.c.BufPtr() | ||||||
| 
 | 
 | ||||||
| 	var fn string | 	var fn string | ||||||
| 	var fnIndex int | 	var fnIndex int | ||||||
| 	var labelToSSABlockID map[backend.Label]ssa.BasicBlockID | 	var labelPosToLabel map[*labelPosition]label | ||||||
| 	if wazevoapi.PerfMapEnabled { | 	if wazevoapi.PerfMapEnabled { | ||||||
| 		fn = wazevoapi.GetCurrentFunctionName(ctx) | 		fn = wazevoapi.GetCurrentFunctionName(ctx) | ||||||
| 		labelToSSABlockID = make(map[backend.Label]ssa.BasicBlockID) | 		labelPosToLabel = make(map[*labelPosition]label) | ||||||
| 		for i, l := range ectx.SsaBlockIDToLabels { | 		for i := 0; i <= m.labelPositionPool.MaxIDEncountered(); i++ { | ||||||
| 			labelToSSABlockID[l] = ssa.BasicBlockID(i) | 			pos := m.labelPositionPool.Get(i) | ||||||
|  | 			labelPosToLabel[pos] = label(i) | ||||||
| 		} | 		} | ||||||
| 		fnIndex = wazevoapi.GetCurrentFunctionIndex(ctx) | 		fnIndex = wazevoapi.GetCurrentFunctionIndex(ctx) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	m.labelResolutionPends = m.labelResolutionPends[:0] | 	m.labelResolutionPends = m.labelResolutionPends[:0] | ||||||
| 	for _, pos := range ectx.OrderedBlockLabels { | 	for _, pos := range m.orderedSSABlockLabelPos { | ||||||
| 		offset := int64(len(*bufPtr)) | 		offset := int64(len(*bufPtr)) | ||||||
| 		pos.BinaryOffset = offset | 		pos.binaryOffset = offset | ||||||
| 		for cur := pos.Begin; cur != pos.End.next; cur = cur.next { | 		for cur := pos.begin; cur != pos.end.next; cur = cur.next { | ||||||
| 			offset := int64(len(*bufPtr)) | 			offset := int64(len(*bufPtr)) | ||||||
| 
 | 
 | ||||||
| 			switch cur.kind { | 			switch cur.kind { | ||||||
| 			case nop0: | 			case nop0: | ||||||
| 				l := cur.nop0Label() | 				l := cur.nop0Label() | ||||||
| 				if pos := ectx.LabelPositions[l]; pos != nil { | 				if pos := m.labelPositionPool.Get(int(l)); pos != nil { | ||||||
| 					pos.BinaryOffset = offset | 					pos.binaryOffset = offset | ||||||
| 				} | 				} | ||||||
| 			case sourceOffsetInfo: | 			case sourceOffsetInfo: | ||||||
| 				m.c.AddSourceOffsetInfo(offset, cur.sourceOffsetInfo()) | 				m.c.AddSourceOffsetInfo(offset, cur.sourceOffsetInfo()) | ||||||
|  | @ -2026,22 +2148,16 @@ func (m *machine) Encode(ctx context.Context) (err error) { | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if wazevoapi.PerfMapEnabled { | 		if wazevoapi.PerfMapEnabled { | ||||||
| 			l := pos.L | 			l := labelPosToLabel[pos] | ||||||
| 			var labelStr string |  | ||||||
| 			if blkID, ok := labelToSSABlockID[l]; ok { |  | ||||||
| 				labelStr = fmt.Sprintf("%s::SSA_Block[%s]", l, blkID) |  | ||||||
| 			} else { |  | ||||||
| 				labelStr = l.String() |  | ||||||
| 			} |  | ||||||
| 			size := int64(len(*bufPtr)) - offset | 			size := int64(len(*bufPtr)) - offset | ||||||
| 			wazevoapi.PerfMap.AddModuleEntry(fnIndex, offset, uint64(size), fmt.Sprintf("%s:::::%s", fn, labelStr)) | 			wazevoapi.PerfMap.AddModuleEntry(fnIndex, offset, uint64(size), fmt.Sprintf("%s:::::%s", fn, l)) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for i := range m.consts { | 	for i := range m.consts { | ||||||
| 		offset := int64(len(*bufPtr)) | 		offset := int64(len(*bufPtr)) | ||||||
| 		vc := &m.consts[i] | 		vc := &m.consts[i] | ||||||
| 		vc.label.BinaryOffset = offset | 		vc.labelPos.binaryOffset = offset | ||||||
| 		if vc._var == nil { | 		if vc._var == nil { | ||||||
| 			lo, hi := vc.lo, vc.hi | 			lo, hi := vc.lo, vc.hi | ||||||
| 			m.c.Emit8Bytes(lo) | 			m.c.Emit8Bytes(lo) | ||||||
|  | @ -2059,7 +2175,7 @@ func (m *machine) Encode(ctx context.Context) (err error) { | ||||||
| 		switch p.instr.kind { | 		switch p.instr.kind { | ||||||
| 		case jmp, jmpIf, lea, xmmUnaryRmR: | 		case jmp, jmpIf, lea, xmmUnaryRmR: | ||||||
| 			target := p.instr.jmpLabel() | 			target := p.instr.jmpLabel() | ||||||
| 			targetOffset := ectx.LabelPositions[target].BinaryOffset | 			targetOffset := m.labelPositionPool.Get(int(target)).binaryOffset | ||||||
| 			imm32Offset := p.imm32Offset | 			imm32Offset := p.imm32Offset | ||||||
| 			jmpOffset := int32(targetOffset - (p.imm32Offset + 4)) // +4 because RIP points to the next instruction. | 			jmpOffset := int32(targetOffset - (p.imm32Offset + 4)) // +4 because RIP points to the next instruction. | ||||||
| 			binary.LittleEndian.PutUint32(buf[imm32Offset:], uint32(jmpOffset)) | 			binary.LittleEndian.PutUint32(buf[imm32Offset:], uint32(jmpOffset)) | ||||||
|  | @ -2068,7 +2184,7 @@ func (m *machine) Encode(ctx context.Context) (err error) { | ||||||
| 			// Each entry is the offset from the beginning of the jmpTableIsland instruction in 8 bytes. | 			// Each entry is the offset from the beginning of the jmpTableIsland instruction in 8 bytes. | ||||||
| 			targets := m.jmpTableTargets[p.instr.u1] | 			targets := m.jmpTableTargets[p.instr.u1] | ||||||
| 			for i, l := range targets { | 			for i, l := range targets { | ||||||
| 				targetOffset := ectx.LabelPositions[backend.Label(l)].BinaryOffset | 				targetOffset := m.labelPositionPool.Get(int(l)).binaryOffset | ||||||
| 				jmpOffset := targetOffset - tableBegin | 				jmpOffset := targetOffset - tableBegin | ||||||
| 				binary.LittleEndian.PutUint64(buf[tableBegin+int64(i)*8:], uint64(jmpOffset)) | 				binary.LittleEndian.PutUint64(buf[tableBegin+int64(i)*8:], uint64(jmpOffset)) | ||||||
| 			} | 			} | ||||||
|  | @ -2097,7 +2213,7 @@ func (m *machine) ResolveRelocations(refToBinaryOffset []int, binary []byte, rel | ||||||
| // CallTrampolineIslandInfo implements backend.Machine CallTrampolineIslandInfo. | // CallTrampolineIslandInfo implements backend.Machine CallTrampolineIslandInfo. | ||||||
| func (m *machine) CallTrampolineIslandInfo(_ int) (_, _ int, _ error) { return } | func (m *machine) CallTrampolineIslandInfo(_ int) (_, _ int, _ error) { return } | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerIcmpToFlag(xd, yd *backend.SSAValueDefinition, _64 bool) { | func (m *machine) lowerIcmpToFlag(xd, yd backend.SSAValueDefinition, _64 bool) { | ||||||
| 	x := m.getOperand_Reg(xd) | 	x := m.getOperand_Reg(xd) | ||||||
| 	y := m.getOperand_Mem_Imm32_Reg(yd) | 	y := m.getOperand_Mem_Imm32_Reg(yd) | ||||||
| 	cmp := m.allocateInstr().asCmpRmiR(true, y, x.reg(), _64) | 	cmp := m.allocateInstr().asCmpRmiR(true, y, x.reg(), _64) | ||||||
|  | @ -2140,7 +2256,7 @@ func (m *machine) lowerFcmpToFlags(instr *ssa.Instruction) (f1, f2 cond, and boo | ||||||
| 
 | 
 | ||||||
| // allocateInstr allocates an instruction. | // allocateInstr allocates an instruction. | ||||||
| func (m *machine) allocateInstr() *instruction { | func (m *machine) allocateInstr() *instruction { | ||||||
| 	instr := m.ectx.InstructionPool.Allocate() | 	instr := m.instrPool.Allocate() | ||||||
| 	if !m.regAllocStarted { | 	if !m.regAllocStarted { | ||||||
| 		instr.addedBeforeRegAlloc = true | 		instr.addedBeforeRegAlloc = true | ||||||
| 	} | 	} | ||||||
|  | @ -2154,24 +2270,22 @@ func (m *machine) allocateNop() *instruction { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) insert(i *instruction) { | func (m *machine) insert(i *instruction) { | ||||||
| 	ectx := m.ectx | 	m.pendingInstructions = append(m.pendingInstructions, i) | ||||||
| 	ectx.PendingInstructions = append(ectx.PendingInstructions, i) |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) allocateBrTarget() (nop *instruction, l backend.Label) { //nolint | func (m *machine) allocateBrTarget() (nop *instruction, l label) { //nolint | ||||||
| 	pos := m.allocateLabel() | 	l, pos := m.allocateLabel() | ||||||
| 	l = pos.L |  | ||||||
| 	nop = m.allocateInstr() | 	nop = m.allocateInstr() | ||||||
| 	nop.asNop0WithLabel(l) | 	nop.asNop0WithLabel(l) | ||||||
| 	pos.Begin, pos.End = nop, nop | 	pos.begin, pos.end = nop, nop | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) allocateLabel() *labelPosition { | func (m *machine) allocateLabel() (label, *labelPosition) { | ||||||
| 	ectx := m.ectx | 	l := m.nextLabel | ||||||
| 	l := ectx.AllocateLabel() | 	pos := m.labelPositionPool.GetOrAllocate(int(l)) | ||||||
| 	pos := ectx.GetOrAllocateLabelPosition(l) | 	m.nextLabel++ | ||||||
| 	return pos | 	return l, pos | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) getVRegSpillSlotOffsetFromSP(id regalloc.VRegID, size byte) int64 { | func (m *machine) getVRegSpillSlotOffsetFromSP(id regalloc.VRegID, size byte) int64 { | ||||||
|  | @ -3185,22 +3299,22 @@ func (m *machine) lowerShuffle(x, y ssa.Value, lo, hi uint64, ret ssa.Value) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	xmaskLabel := m.allocateLabel() | 	xl, xmaskPos := m.allocateLabel() | ||||||
| 	m.consts = append(m.consts, _const{lo: xMask[0], hi: xMask[1], label: xmaskLabel}) | 	m.consts = append(m.consts, _const{lo: xMask[0], hi: xMask[1], label: xl, labelPos: xmaskPos}) | ||||||
| 	ymaskLabel := m.allocateLabel() | 	yl, ymaskPos := m.allocateLabel() | ||||||
| 	m.consts = append(m.consts, _const{lo: yMask[0], hi: yMask[1], label: ymaskLabel}) | 	m.consts = append(m.consts, _const{lo: yMask[0], hi: yMask[1], label: yl, labelPos: ymaskPos}) | ||||||
| 
 | 
 | ||||||
| 	xx, yy := m.getOperand_Reg(m.c.ValueDefinition(x)), m.getOperand_Reg(m.c.ValueDefinition(y)) | 	xx, yy := m.getOperand_Reg(m.c.ValueDefinition(x)), m.getOperand_Reg(m.c.ValueDefinition(y)) | ||||||
| 	tmpX, tmpY := m.copyToTmp(xx.reg()), m.copyToTmp(yy.reg()) | 	tmpX, tmpY := m.copyToTmp(xx.reg()), m.copyToTmp(yy.reg()) | ||||||
| 
 | 
 | ||||||
| 	// Apply mask to X. | 	// Apply mask to X. | ||||||
| 	tmp := m.c.AllocateVReg(ssa.TypeV128) | 	tmp := m.c.AllocateVReg(ssa.TypeV128) | ||||||
| 	loadMaskLo := m.allocateInstr().asXmmUnaryRmR(sseOpcodeMovdqu, newOperandMem(m.newAmodeRipRel(xmaskLabel.L)), tmp) | 	loadMaskLo := m.allocateInstr().asXmmUnaryRmR(sseOpcodeMovdqu, newOperandMem(m.newAmodeRipRel(xl)), tmp) | ||||||
| 	m.insert(loadMaskLo) | 	m.insert(loadMaskLo) | ||||||
| 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePshufb, newOperandReg(tmp), tmpX)) | 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePshufb, newOperandReg(tmp), tmpX)) | ||||||
| 
 | 
 | ||||||
| 	// Apply mask to Y. | 	// Apply mask to Y. | ||||||
| 	loadMaskHi := m.allocateInstr().asXmmUnaryRmR(sseOpcodeMovdqu, newOperandMem(m.newAmodeRipRel(ymaskLabel.L)), tmp) | 	loadMaskHi := m.allocateInstr().asXmmUnaryRmR(sseOpcodeMovdqu, newOperandMem(m.newAmodeRipRel(yl)), tmp) | ||||||
| 	m.insert(loadMaskHi) | 	m.insert(loadMaskHi) | ||||||
| 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePshufb, newOperandReg(tmp), tmpY)) | 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePshufb, newOperandReg(tmp), tmpY)) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -12,7 +12,7 @@ func (m *machine) PostRegAlloc() { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) setupPrologue() { | func (m *machine) setupPrologue() { | ||||||
| 	cur := m.ectx.RootInstr | 	cur := m.rootInstr | ||||||
| 	prevInitInst := cur.next | 	prevInitInst := cur.next | ||||||
| 
 | 
 | ||||||
| 	// At this point, we have the stack layout as follows: | 	// At this point, we have the stack layout as follows: | ||||||
|  | @ -130,14 +130,13 @@ func (m *machine) setupPrologue() { | ||||||
| // 3. Inserts the dec/inc RSP instruction right before/after the call instruction. | // 3. Inserts the dec/inc RSP instruction right before/after the call instruction. | ||||||
| // 4. Lowering that is supposed to be done after regalloc. | // 4. Lowering that is supposed to be done after regalloc. | ||||||
| func (m *machine) postRegAlloc() { | func (m *machine) postRegAlloc() { | ||||||
| 	ectx := m.ectx | 	for cur := m.rootInstr; cur != nil; cur = cur.next { | ||||||
| 	for cur := ectx.RootInstr; cur != nil; cur = cur.next { |  | ||||||
| 		switch k := cur.kind; k { | 		switch k := cur.kind; k { | ||||||
| 		case ret: | 		case ret: | ||||||
| 			m.setupEpilogueAfter(cur.prev) | 			m.setupEpilogueAfter(cur.prev) | ||||||
| 			continue | 			continue | ||||||
| 		case fcvtToSintSequence, fcvtToUintSequence: | 		case fcvtToSintSequence, fcvtToUintSequence: | ||||||
| 			m.ectx.PendingInstructions = m.ectx.PendingInstructions[:0] | 			m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 			if k == fcvtToSintSequence { | 			if k == fcvtToSintSequence { | ||||||
| 				m.lowerFcvtToSintSequenceAfterRegalloc(cur) | 				m.lowerFcvtToSintSequenceAfterRegalloc(cur) | ||||||
| 			} else { | 			} else { | ||||||
|  | @ -146,29 +145,29 @@ func (m *machine) postRegAlloc() { | ||||||
| 			prev := cur.prev | 			prev := cur.prev | ||||||
| 			next := cur.next | 			next := cur.next | ||||||
| 			cur := prev | 			cur := prev | ||||||
| 			for _, instr := range m.ectx.PendingInstructions { | 			for _, instr := range m.pendingInstructions { | ||||||
| 				cur = linkInstr(cur, instr) | 				cur = linkInstr(cur, instr) | ||||||
| 			} | 			} | ||||||
| 			linkInstr(cur, next) | 			linkInstr(cur, next) | ||||||
| 			continue | 			continue | ||||||
| 		case xmmCMov: | 		case xmmCMov: | ||||||
| 			m.ectx.PendingInstructions = m.ectx.PendingInstructions[:0] | 			m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 			m.lowerXmmCmovAfterRegAlloc(cur) | 			m.lowerXmmCmovAfterRegAlloc(cur) | ||||||
| 			prev := cur.prev | 			prev := cur.prev | ||||||
| 			next := cur.next | 			next := cur.next | ||||||
| 			cur := prev | 			cur := prev | ||||||
| 			for _, instr := range m.ectx.PendingInstructions { | 			for _, instr := range m.pendingInstructions { | ||||||
| 				cur = linkInstr(cur, instr) | 				cur = linkInstr(cur, instr) | ||||||
| 			} | 			} | ||||||
| 			linkInstr(cur, next) | 			linkInstr(cur, next) | ||||||
| 			continue | 			continue | ||||||
| 		case idivRemSequence: | 		case idivRemSequence: | ||||||
| 			m.ectx.PendingInstructions = m.ectx.PendingInstructions[:0] | 			m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 			m.lowerIDivRemSequenceAfterRegAlloc(cur) | 			m.lowerIDivRemSequenceAfterRegAlloc(cur) | ||||||
| 			prev := cur.prev | 			prev := cur.prev | ||||||
| 			next := cur.next | 			next := cur.next | ||||||
| 			cur := prev | 			cur := prev | ||||||
| 			for _, instr := range m.ectx.PendingInstructions { | 			for _, instr := range m.pendingInstructions { | ||||||
| 				cur = linkInstr(cur, instr) | 				cur = linkInstr(cur, instr) | ||||||
| 			} | 			} | ||||||
| 			linkInstr(cur, next) | 			linkInstr(cur, next) | ||||||
|  |  | ||||||
|  | @ -1,13 +1,226 @@ | ||||||
| package amd64 | package amd64 | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend" |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc" | 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc" | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" | 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // InsertMoveBefore implements backend.RegAllocFunctionMachine. | // regAllocFn implements regalloc.Function. | ||||||
| func (m *machine) InsertMoveBefore(dst, src regalloc.VReg, instr *instruction) { | type regAllocFn struct { | ||||||
|  | 	ssaB                   ssa.Builder | ||||||
|  | 	m                      *machine | ||||||
|  | 	loopNestingForestRoots []ssa.BasicBlock | ||||||
|  | 	blockIter              int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // PostOrderBlockIteratorBegin implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) PostOrderBlockIteratorBegin() *labelPosition { | ||||||
|  | 	f.blockIter = len(f.m.orderedSSABlockLabelPos) - 1 | ||||||
|  | 	return f.PostOrderBlockIteratorNext() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // PostOrderBlockIteratorNext implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) PostOrderBlockIteratorNext() *labelPosition { | ||||||
|  | 	if f.blockIter < 0 { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	b := f.m.orderedSSABlockLabelPos[f.blockIter] | ||||||
|  | 	f.blockIter-- | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReversePostOrderBlockIteratorBegin implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReversePostOrderBlockIteratorBegin() *labelPosition { | ||||||
|  | 	f.blockIter = 0 | ||||||
|  | 	return f.ReversePostOrderBlockIteratorNext() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReversePostOrderBlockIteratorNext implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReversePostOrderBlockIteratorNext() *labelPosition { | ||||||
|  | 	if f.blockIter >= len(f.m.orderedSSABlockLabelPos) { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	b := f.m.orderedSSABlockLabelPos[f.blockIter] | ||||||
|  | 	f.blockIter++ | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ClobberedRegisters implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ClobberedRegisters(regs []regalloc.VReg) { | ||||||
|  | 	f.m.clobberedRegs = append(f.m.clobberedRegs[:0], regs...) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestRoots implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LoopNestingForestRoots() int { | ||||||
|  | 	f.loopNestingForestRoots = f.ssaB.LoopNestingForestRoots() | ||||||
|  | 	return len(f.loopNestingForestRoots) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestRoot implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LoopNestingForestRoot(i int) *labelPosition { | ||||||
|  | 	root := f.loopNestingForestRoots[i] | ||||||
|  | 	pos := f.m.getOrAllocateSSABlockLabelPosition(root) | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LowestCommonAncestor implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LowestCommonAncestor(blk1, blk2 *labelPosition) *labelPosition { | ||||||
|  | 	sb := f.ssaB.LowestCommonAncestor(blk1.sb, blk2.sb) | ||||||
|  | 	pos := f.m.getOrAllocateSSABlockLabelPosition(sb) | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Idom implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) Idom(blk *labelPosition) *labelPosition { | ||||||
|  | 	sb := f.ssaB.Idom(blk.sb) | ||||||
|  | 	pos := f.m.getOrAllocateSSABlockLabelPosition(sb) | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SwapBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) SwapBefore(x1, x2, tmp regalloc.VReg, instr *instruction) { | ||||||
|  | 	f.m.swap(instr.prev, x1, x2, tmp) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StoreRegisterBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) StoreRegisterBefore(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertStoreRegisterAt(v, instr, false) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StoreRegisterAfter implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) StoreRegisterAfter(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertStoreRegisterAt(v, instr, true) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReloadRegisterBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReloadRegisterBefore(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertReloadRegisterAt(v, instr, false) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReloadRegisterAfter implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReloadRegisterAfter(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertReloadRegisterAt(v, instr, true) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InsertMoveBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) InsertMoveBefore(dst, src regalloc.VReg, instr *instruction) { | ||||||
|  | 	f.m.insertMoveBefore(dst, src, instr) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestChild implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LoopNestingForestChild(pos *labelPosition, i int) *labelPosition { | ||||||
|  | 	childSB := pos.sb.LoopNestingForestChildren()[i] | ||||||
|  | 	return f.m.getOrAllocateSSABlockLabelPosition(childSB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Succ implements regalloc.Block. | ||||||
|  | func (f *regAllocFn) Succ(pos *labelPosition, i int) *labelPosition { | ||||||
|  | 	succSB := pos.sb.Succ(i) | ||||||
|  | 	if succSB.ReturnBlock() { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	return f.m.getOrAllocateSSABlockLabelPosition(succSB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Pred implements regalloc.Block. | ||||||
|  | func (f *regAllocFn) Pred(pos *labelPosition, i int) *labelPosition { | ||||||
|  | 	predSB := pos.sb.Pred(i) | ||||||
|  | 	return f.m.getOrAllocateSSABlockLabelPosition(predSB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlockParams implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) BlockParams(pos *labelPosition, regs *[]regalloc.VReg) []regalloc.VReg { | ||||||
|  | 	c := f.m.c | ||||||
|  | 	*regs = (*regs)[:0] | ||||||
|  | 	for i := 0; i < pos.sb.Params(); i++ { | ||||||
|  | 		v := c.VRegOf(pos.sb.Param(i)) | ||||||
|  | 		*regs = append(*regs, v) | ||||||
|  | 	} | ||||||
|  | 	return *regs | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ID implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) ID() int32 { | ||||||
|  | 	return int32(pos.sb.ID()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrIteratorBegin implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrIteratorBegin() *instruction { | ||||||
|  | 	ret := pos.begin | ||||||
|  | 	pos.cur = ret | ||||||
|  | 	return ret | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrIteratorNext implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrIteratorNext() *instruction { | ||||||
|  | 	for { | ||||||
|  | 		if pos.cur == pos.end { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 		instr := pos.cur.next | ||||||
|  | 		pos.cur = instr | ||||||
|  | 		if instr == nil { | ||||||
|  | 			return nil | ||||||
|  | 		} else if instr.addedBeforeRegAlloc { | ||||||
|  | 			// Only concerned about the instruction added before regalloc. | ||||||
|  | 			return instr | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrRevIteratorBegin implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrRevIteratorBegin() *instruction { | ||||||
|  | 	pos.cur = pos.end | ||||||
|  | 	return pos.cur | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrRevIteratorNext implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrRevIteratorNext() *instruction { | ||||||
|  | 	for { | ||||||
|  | 		if pos.cur == pos.begin { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 		instr := pos.cur.prev | ||||||
|  | 		pos.cur = instr | ||||||
|  | 		if instr == nil { | ||||||
|  | 			return nil | ||||||
|  | 		} else if instr.addedBeforeRegAlloc { | ||||||
|  | 			// Only concerned about the instruction added before regalloc. | ||||||
|  | 			return instr | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FirstInstr implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) FirstInstr() *instruction { return pos.begin } | ||||||
|  | 
 | ||||||
|  | // LastInstrForInsertion implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) LastInstrForInsertion() *instruction { | ||||||
|  | 	return lastInstrForInsertion(pos.begin, pos.end) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Preds implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) Preds() int { return pos.sb.Preds() } | ||||||
|  | 
 | ||||||
|  | // Entry implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) Entry() bool { return pos.sb.EntryBlock() } | ||||||
|  | 
 | ||||||
|  | // Succs implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) Succs() int { return pos.sb.Succs() } | ||||||
|  | 
 | ||||||
|  | // LoopHeader implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) LoopHeader() bool { return pos.sb.LoopHeader() } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestChildren implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) LoopNestingForestChildren() int { | ||||||
|  | 	return len(pos.sb.LoopNestingForestChildren()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m *machine) insertMoveBefore(dst, src regalloc.VReg, instr *instruction) { | ||||||
| 	typ := src.RegType() | 	typ := src.RegType() | ||||||
| 	if typ != dst.RegType() { | 	if typ != dst.RegType() { | ||||||
| 		panic("BUG: src and dst must have the same type") | 		panic("BUG: src and dst must have the same type") | ||||||
|  | @ -26,8 +239,7 @@ func (m *machine) InsertMoveBefore(dst, src regalloc.VReg, instr *instruction) { | ||||||
| 	linkInstr(cur, prevNext) | 	linkInstr(cur, prevNext) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // InsertStoreRegisterAt implements backend.RegAllocFunctionMachine. | func (m *machine) insertStoreRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { | ||||||
| func (m *machine) InsertStoreRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { |  | ||||||
| 	if !v.IsRealReg() { | 	if !v.IsRealReg() { | ||||||
| 		panic("BUG: VReg must be backed by real reg to be stored") | 		panic("BUG: VReg must be backed by real reg to be stored") | ||||||
| 	} | 	} | ||||||
|  | @ -61,8 +273,7 @@ func (m *machine) InsertStoreRegisterAt(v regalloc.VReg, instr *instruction, aft | ||||||
| 	return linkInstr(cur, prevNext) | 	return linkInstr(cur, prevNext) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // InsertReloadRegisterAt implements backend.RegAllocFunctionMachine. | func (m *machine) insertReloadRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { | ||||||
| func (m *machine) InsertReloadRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { |  | ||||||
| 	if !v.IsRealReg() { | 	if !v.IsRealReg() { | ||||||
| 		panic("BUG: VReg must be backed by real reg to be stored") | 		panic("BUG: VReg must be backed by real reg to be stored") | ||||||
| 	} | 	} | ||||||
|  | @ -98,13 +309,7 @@ func (m *machine) InsertReloadRegisterAt(v regalloc.VReg, instr *instruction, af | ||||||
| 	return linkInstr(cur, prevNext) | 	return linkInstr(cur, prevNext) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // ClobberedRegisters implements backend.RegAllocFunctionMachine. | func (m *machine) swap(cur *instruction, x1, x2, tmp regalloc.VReg) { | ||||||
| func (m *machine) ClobberedRegisters(regs []regalloc.VReg) { |  | ||||||
| 	m.clobberedRegs = append(m.clobberedRegs[:0], regs...) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Swap implements backend.RegAllocFunctionMachine. |  | ||||||
| func (m *machine) Swap(cur *instruction, x1, x2, tmp regalloc.VReg) { |  | ||||||
| 	if x1.RegType() == regalloc.RegTypeInt { | 	if x1.RegType() == regalloc.RegTypeInt { | ||||||
| 		prevNext := cur.next | 		prevNext := cur.next | ||||||
| 		xc := m.allocateInstr().asXCHG(x1, newOperandReg(x2), 8) | 		xc := m.allocateInstr().asXCHG(x1, newOperandReg(x2), 8) | ||||||
|  | @ -113,25 +318,24 @@ func (m *machine) Swap(cur *instruction, x1, x2, tmp regalloc.VReg) { | ||||||
| 	} else { | 	} else { | ||||||
| 		if tmp.Valid() { | 		if tmp.Valid() { | ||||||
| 			prevNext := cur.next | 			prevNext := cur.next | ||||||
| 			m.InsertMoveBefore(tmp, x1, prevNext) | 			m.insertMoveBefore(tmp, x1, prevNext) | ||||||
| 			m.InsertMoveBefore(x1, x2, prevNext) | 			m.insertMoveBefore(x1, x2, prevNext) | ||||||
| 			m.InsertMoveBefore(x2, tmp, prevNext) | 			m.insertMoveBefore(x2, tmp, prevNext) | ||||||
| 		} else { | 		} else { | ||||||
| 			prevNext := cur.next | 			prevNext := cur.next | ||||||
| 			r2 := x2.RealReg() | 			r2 := x2.RealReg() | ||||||
| 			// Temporarily spill x1 to stack. | 			// Temporarily spill x1 to stack. | ||||||
| 			cur = m.InsertStoreRegisterAt(x1, cur, true).prev | 			cur = m.insertStoreRegisterAt(x1, cur, true).prev | ||||||
| 			// Then move x2 to x1. | 			// Then move x2 to x1. | ||||||
| 			cur = linkInstr(cur, m.allocateInstr().asXmmUnaryRmR(sseOpcodeMovdqa, newOperandReg(x2), x1)) | 			cur = linkInstr(cur, m.allocateInstr().asXmmUnaryRmR(sseOpcodeMovdqa, newOperandReg(x2), x1)) | ||||||
| 			linkInstr(cur, prevNext) | 			linkInstr(cur, prevNext) | ||||||
| 			// Then reload the original value on x1 from stack to r2. | 			// Then reload the original value on x1 from stack to r2. | ||||||
| 			m.InsertReloadRegisterAt(x1.SetRealReg(r2), cur, true) | 			m.insertReloadRegisterAt(x1.SetRealReg(r2), cur, true) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // LastInstrForInsertion implements backend.RegAllocFunctionMachine. | func lastInstrForInsertion(begin, end *instruction) *instruction { | ||||||
| func (m *machine) LastInstrForInsertion(begin, end *instruction) *instruction { |  | ||||||
| 	cur := end | 	cur := end | ||||||
| 	for cur.kind == nop0 { | 	for cur.kind == nop0 { | ||||||
| 		cur = cur.prev | 		cur = cur.prev | ||||||
|  | @ -146,8 +350,3 @@ func (m *machine) LastInstrForInsertion(begin, end *instruction) *instruction { | ||||||
| 		return end | 		return end | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 |  | ||||||
| // SSABlockLabel implements backend.RegAllocFunctionMachine. |  | ||||||
| func (m *machine) SSABlockLabel(id ssa.BasicBlockID) backend.Label { |  | ||||||
| 	return m.ectx.SsaBlockIDToLabels[id] |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  | @ -127,7 +127,7 @@ func (m *machine) lowerSqmulRoundSat(x, y, ret ssa.Value) { | ||||||
| 	tmpX := m.copyToTmp(xx.reg()) | 	tmpX := m.copyToTmp(xx.reg()) | ||||||
| 
 | 
 | ||||||
| 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePmulhrsw, yy, tmpX)) | 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePmulhrsw, yy, tmpX)) | ||||||
| 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePcmpeqd, newOperandReg(tmpX), tmp)) | 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePcmpeqw, newOperandReg(tmpX), tmp)) | ||||||
| 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePxor, newOperandReg(tmp), tmpX)) | 	m.insert(m.allocateInstr().asXmmRmR(sseOpcodePxor, newOperandReg(tmp), tmpX)) | ||||||
| 
 | 
 | ||||||
| 	m.copyTo(tmpX, m.c.VRegOf(ret)) | 	m.copyTo(tmpX, m.c.VRegOf(ret)) | ||||||
|  |  | ||||||
|  | @ -59,7 +59,7 @@ func (o *operand) format(_64 bool) string { | ||||||
| 	case operandKindImm32: | 	case operandKindImm32: | ||||||
| 		return fmt.Sprintf("$%d", int32(o.imm32())) | 		return fmt.Sprintf("$%d", int32(o.imm32())) | ||||||
| 	case operandKindLabel: | 	case operandKindLabel: | ||||||
| 		return backend.Label(o.imm32()).String() | 		return label(o.imm32()).String() | ||||||
| 	default: | 	default: | ||||||
| 		panic(fmt.Sprintf("BUG: invalid operand: %s", o.kind)) | 		panic(fmt.Sprintf("BUG: invalid operand: %s", o.kind)) | ||||||
| 	} | 	} | ||||||
|  | @ -85,22 +85,22 @@ func (o *operand) imm32() uint32 { | ||||||
| 	return uint32(o.data) | 	return uint32(o.data) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (o *operand) label() backend.Label { | func (o *operand) label() label { | ||||||
| 	switch o.kind { | 	switch o.kind { | ||||||
| 	case operandKindLabel: | 	case operandKindLabel: | ||||||
| 		return backend.Label(o.data) | 		return label(o.data) | ||||||
| 	case operandKindMem: | 	case operandKindMem: | ||||||
| 		mem := o.addressMode() | 		mem := o.addressMode() | ||||||
| 		if mem.kind() != amodeRipRel { | 		if mem.kind() != amodeRipRel { | ||||||
| 			panic("BUG: invalid label") | 			panic("BUG: invalid label") | ||||||
| 		} | 		} | ||||||
| 		return backend.Label(mem.imm32) | 		return label(mem.imm32) | ||||||
| 	default: | 	default: | ||||||
| 		panic("BUG: invalid operand kind") | 		panic("BUG: invalid operand kind") | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func newOperandLabel(label backend.Label) operand { | func newOperandLabel(label label) operand { | ||||||
| 	return operand{kind: operandKindLabel, data: uint64(label)} | 	return operand{kind: operandKindLabel, data: uint64(label)} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -221,7 +221,7 @@ func (m *machine) newAmodeRegRegShift(imm32 uint32, base, index regalloc.VReg, s | ||||||
| 	return ret | 	return ret | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) newAmodeRipRel(label backend.Label) *amode { | func (m *machine) newAmodeRipRel(label label) *amode { | ||||||
| 	ret := m.amodePool.Allocate() | 	ret := m.amodePool.Allocate() | ||||||
| 	*ret = amode{kindWithShift: uint32(amodeRipRel), imm32: uint32(label)} | 	*ret = amode{kindWithShift: uint32(amodeRipRel), imm32: uint32(label)} | ||||||
| 	return ret | 	return ret | ||||||
|  | @ -246,18 +246,18 @@ func (a *amode) String() string { | ||||||
| 			"%d(%s,%s,%d)", | 			"%d(%s,%s,%d)", | ||||||
| 			int32(a.imm32), formatVRegSized(a.base, true), formatVRegSized(a.index, true), shift) | 			int32(a.imm32), formatVRegSized(a.base, true), formatVRegSized(a.index, true), shift) | ||||||
| 	case amodeRipRel: | 	case amodeRipRel: | ||||||
| 		return fmt.Sprintf("%s(%%rip)", backend.Label(a.imm32)) | 		return fmt.Sprintf("%s(%%rip)", label(a.imm32)) | ||||||
| 	default: | 	default: | ||||||
| 		panic("BUG: invalid amode kind") | 		panic("BUG: invalid amode kind") | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) getOperand_Mem_Reg(def *backend.SSAValueDefinition) (op operand) { | func (m *machine) getOperand_Mem_Reg(def backend.SSAValueDefinition) (op operand) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return newOperandReg(def.BlkParamVReg) | 		return newOperandReg(m.c.VRegOf(def.V)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if def.SSAValue().Type() == ssa.TypeV128 { | 	if def.V.Type() == ssa.TypeV128 { | ||||||
| 		// SIMD instructions require strict memory alignment, so we don't support the memory operand for V128 at the moment. | 		// SIMD instructions require strict memory alignment, so we don't support the memory operand for V128 at the moment. | ||||||
| 		return m.getOperand_Reg(def) | 		return m.getOperand_Reg(def) | ||||||
| 	} | 	} | ||||||
|  | @ -272,9 +272,9 @@ func (m *machine) getOperand_Mem_Reg(def *backend.SSAValueDefinition) (op operan | ||||||
| 	return m.getOperand_Reg(def) | 	return m.getOperand_Reg(def) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) getOperand_Mem_Imm32_Reg(def *backend.SSAValueDefinition) (op operand) { | func (m *machine) getOperand_Mem_Imm32_Reg(def backend.SSAValueDefinition) (op operand) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return newOperandReg(def.BlkParamVReg) | 		return newOperandReg(m.c.VRegOf(def.V)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if m.c.MatchInstr(def, ssa.OpcodeLoad) { | 	if m.c.MatchInstr(def, ssa.OpcodeLoad) { | ||||||
|  | @ -287,9 +287,9 @@ func (m *machine) getOperand_Mem_Imm32_Reg(def *backend.SSAValueDefinition) (op | ||||||
| 	return m.getOperand_Imm32_Reg(def) | 	return m.getOperand_Imm32_Reg(def) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) getOperand_Imm32_Reg(def *backend.SSAValueDefinition) (op operand) { | func (m *machine) getOperand_Imm32_Reg(def backend.SSAValueDefinition) (op operand) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return newOperandReg(def.BlkParamVReg) | 		return newOperandReg(m.c.VRegOf(def.V)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	instr := def.Instr | 	instr := def.Instr | ||||||
|  | @ -323,24 +323,14 @@ func asImm32(val uint64, allowSignExt bool) (uint32, bool) { | ||||||
| 	return u32val, true | 	return u32val, true | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) getOperand_Reg(def *backend.SSAValueDefinition) (op operand) { | func (m *machine) getOperand_Reg(def backend.SSAValueDefinition) (op operand) { | ||||||
| 	var v regalloc.VReg | 	var v regalloc.VReg | ||||||
| 	if def.IsFromBlockParam() { | 	if instr := def.Instr; instr != nil && instr.Constant() { | ||||||
| 		v = def.BlkParamVReg |  | ||||||
| 	} else { |  | ||||||
| 		instr := def.Instr |  | ||||||
| 		if instr.Constant() { |  | ||||||
| 		// We inline all the constant instructions so that we could reduce the register usage. | 		// We inline all the constant instructions so that we could reduce the register usage. | ||||||
| 		v = m.lowerConstant(instr) | 		v = m.lowerConstant(instr) | ||||||
| 		instr.MarkLowered() | 		instr.MarkLowered() | ||||||
| 	} else { | 	} else { | ||||||
| 			if n := def.N; n == 0 { | 		v = m.c.VRegOf(def.V) | ||||||
| 				v = m.c.VRegOf(instr.Return()) |  | ||||||
| 			} else { |  | ||||||
| 				_, rs := instr.Returns() |  | ||||||
| 				v = m.c.VRegOf(rs[n-1]) |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 	return newOperandReg(v) | 	return newOperandReg(v) | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										11
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/reflect.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/reflect.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,11 +0,0 @@ | ||||||
| //go:build !tinygo |  | ||||||
| 
 |  | ||||||
| package amd64 |  | ||||||
| 
 |  | ||||||
| import "reflect" |  | ||||||
| 
 |  | ||||||
| // setSliceLimits sets both Cap and Len for the given reflected slice. |  | ||||||
| func setSliceLimits(s *reflect.SliceHeader, limit uintptr) { |  | ||||||
| 	s.Len = int(limit) |  | ||||||
| 	s.Cap = int(limit) |  | ||||||
| } |  | ||||||
|  | @ -1,11 +0,0 @@ | ||||||
| //go:build tinygo |  | ||||||
| 
 |  | ||||||
| package amd64 |  | ||||||
| 
 |  | ||||||
| import "reflect" |  | ||||||
| 
 |  | ||||||
| // setSliceLimits sets both Cap and Len for the given reflected slice. |  | ||||||
| func setSliceLimits(s *reflect.SliceHeader, limit uintptr) { |  | ||||||
| 	s.Len = limit |  | ||||||
| 	s.Len = limit |  | ||||||
| } |  | ||||||
							
								
								
									
										10
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/stack.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/amd64/stack.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -9,12 +9,14 @@ import ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func stackView(rbp, top uintptr) []byte { | func stackView(rbp, top uintptr) []byte { | ||||||
|  | 	l := int(top - rbp) | ||||||
| 	var stackBuf []byte | 	var stackBuf []byte | ||||||
| 	{ | 	{ | ||||||
| 		// TODO: use unsafe.Slice after floor version is set to Go 1.20. | 		//nolint:staticcheck | ||||||
| 		hdr := (*reflect.SliceHeader)(unsafe.Pointer(&stackBuf)) | 		hdr := (*reflect.SliceHeader)(unsafe.Pointer(&stackBuf)) | ||||||
| 		hdr.Data = rbp | 		hdr.Data = rbp | ||||||
| 		setSliceLimits(hdr, top-rbp) | 		hdr.Len = l | ||||||
|  | 		hdr.Cap = l | ||||||
| 	} | 	} | ||||||
| 	return stackBuf | 	return stackBuf | ||||||
| } | } | ||||||
|  | @ -72,9 +74,9 @@ func GoCallStackView(stackPointerBeforeGoCall *uint64) []uint64 { | ||||||
| 	//              |   SizeInBytes   | | 	//              |   SizeInBytes   | | ||||||
| 	//              +-----------------+ <---- stackPointerBeforeGoCall | 	//              +-----------------+ <---- stackPointerBeforeGoCall | ||||||
| 	//                 (low address) | 	//                 (low address) | ||||||
| 	data := unsafe.Pointer(uintptr(unsafe.Pointer(stackPointerBeforeGoCall)) + 8) | 	data := unsafe.Add(unsafe.Pointer(stackPointerBeforeGoCall), 8) | ||||||
| 	size := *stackPointerBeforeGoCall / 8 | 	size := *stackPointerBeforeGoCall / 8 | ||||||
| 	return unsafe.Slice((*uint64)(data), int(size)) | 	return unsafe.Slice((*uint64)(data), size) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func AdjustClonedStack(oldRsp, oldTop, rsp, rbp, top uintptr) { | func AdjustClonedStack(oldRsp, oldTop, rsp, rbp, top uintptr) { | ||||||
|  |  | ||||||
							
								
								
									
										9
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/arm64/abi.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/arm64/abi.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -182,9 +182,9 @@ func (m *machine) LowerReturns(rets []ssa.Value) { | ||||||
| 
 | 
 | ||||||
| // callerGenVRegToFunctionArg is the opposite of GenFunctionArgToVReg, which is used to generate the | // callerGenVRegToFunctionArg is the opposite of GenFunctionArgToVReg, which is used to generate the | ||||||
| // caller side of the function call. | // caller side of the function call. | ||||||
| func (m *machine) callerGenVRegToFunctionArg(a *backend.FunctionABI, argIndex int, reg regalloc.VReg, def *backend.SSAValueDefinition, slotBegin int64) { | func (m *machine) callerGenVRegToFunctionArg(a *backend.FunctionABI, argIndex int, reg regalloc.VReg, def backend.SSAValueDefinition, slotBegin int64) { | ||||||
| 	arg := &a.Args[argIndex] | 	arg := &a.Args[argIndex] | ||||||
| 	if def != nil && def.IsFromInstr() { | 	if def.IsFromInstr() { | ||||||
| 		// Constant instructions are inlined. | 		// Constant instructions are inlined. | ||||||
| 		if inst := def.Instr; inst.Constant() { | 		if inst := def.Instr; inst.Constant() { | ||||||
| 			val := inst.Return() | 			val := inst.Return() | ||||||
|  | @ -228,10 +228,9 @@ func (m *machine) callerGenFunctionReturnVReg(a *backend.FunctionABI, retIndex i | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) resolveAddressModeForOffsetAndInsert(cur *instruction, offset int64, dstBits byte, rn regalloc.VReg, allowTmpRegUse bool) (*instruction, *addressMode) { | func (m *machine) resolveAddressModeForOffsetAndInsert(cur *instruction, offset int64, dstBits byte, rn regalloc.VReg, allowTmpRegUse bool) (*instruction, *addressMode) { | ||||||
| 	exct := m.executableContext | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 	exct.PendingInstructions = exct.PendingInstructions[:0] |  | ||||||
| 	mode := m.resolveAddressModeForOffset(offset, dstBits, rn, allowTmpRegUse) | 	mode := m.resolveAddressModeForOffset(offset, dstBits, rn, allowTmpRegUse) | ||||||
| 	for _, instr := range exct.PendingInstructions { | 	for _, instr := range m.pendingInstructions { | ||||||
| 		cur = linkInstr(cur, instr) | 		cur = linkInstr(cur, instr) | ||||||
| 	} | 	} | ||||||
| 	return cur, mode | 	return cur, mode | ||||||
|  |  | ||||||
|  | @ -14,7 +14,6 @@ var calleeSavedRegistersSorted = []regalloc.VReg{ | ||||||
| 
 | 
 | ||||||
| // CompileGoFunctionTrampoline implements backend.Machine. | // CompileGoFunctionTrampoline implements backend.Machine. | ||||||
| func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig *ssa.Signature, needModuleContextPtr bool) []byte { | func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig *ssa.Signature, needModuleContextPtr bool) []byte { | ||||||
| 	exct := m.executableContext |  | ||||||
| 	argBegin := 1 // Skips exec context by default. | 	argBegin := 1 // Skips exec context by default. | ||||||
| 	if needModuleContextPtr { | 	if needModuleContextPtr { | ||||||
| 		argBegin++ | 		argBegin++ | ||||||
|  | @ -26,7 +25,7 @@ func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig * | ||||||
| 
 | 
 | ||||||
| 	cur := m.allocateInstr() | 	cur := m.allocateInstr() | ||||||
| 	cur.asNop0() | 	cur.asNop0() | ||||||
| 	exct.RootInstr = cur | 	m.rootInstr = cur | ||||||
| 
 | 
 | ||||||
| 	// Execution context is always the first argument. | 	// Execution context is always the first argument. | ||||||
| 	execCtrPtr := x0VReg | 	execCtrPtr := x0VReg | ||||||
|  | @ -244,7 +243,7 @@ func (m *machine) CompileGoFunctionTrampoline(exitCode wazevoapi.ExitCode, sig * | ||||||
| 	ret.asRet() | 	ret.asRet() | ||||||
| 	linkInstr(cur, ret) | 	linkInstr(cur, ret) | ||||||
| 
 | 
 | ||||||
| 	m.encode(m.executableContext.RootInstr) | 	m.encode(m.rootInstr) | ||||||
| 	return m.compiler.Buf() | 	return m.compiler.Buf() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -302,20 +301,18 @@ func (m *machine) restoreRegistersInExecutionContext(cur *instruction, regs []re | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerConstantI64AndInsert(cur *instruction, dst regalloc.VReg, v int64) *instruction { | func (m *machine) lowerConstantI64AndInsert(cur *instruction, dst regalloc.VReg, v int64) *instruction { | ||||||
| 	exct := m.executableContext | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 	exct.PendingInstructions = exct.PendingInstructions[:0] |  | ||||||
| 	m.lowerConstantI64(dst, v) | 	m.lowerConstantI64(dst, v) | ||||||
| 	for _, instr := range exct.PendingInstructions { | 	for _, instr := range m.pendingInstructions { | ||||||
| 		cur = linkInstr(cur, instr) | 		cur = linkInstr(cur, instr) | ||||||
| 	} | 	} | ||||||
| 	return cur | 	return cur | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerConstantI32AndInsert(cur *instruction, dst regalloc.VReg, v int32) *instruction { | func (m *machine) lowerConstantI32AndInsert(cur *instruction, dst regalloc.VReg, v int32) *instruction { | ||||||
| 	exct := m.executableContext | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 	exct.PendingInstructions = exct.PendingInstructions[:0] |  | ||||||
| 	m.lowerConstantI32(dst, v) | 	m.lowerConstantI32(dst, v) | ||||||
| 	for _, instr := range exct.PendingInstructions { | 	for _, instr := range m.pendingInstructions { | ||||||
| 		cur = linkInstr(cur, instr) | 		cur = linkInstr(cur, instr) | ||||||
| 	} | 	} | ||||||
| 	return cur | 	return cur | ||||||
|  |  | ||||||
							
								
								
									
										27
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/arm64/instr.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/arm64/instr.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -36,18 +36,6 @@ type ( | ||||||
| 	instructionKind byte | 	instructionKind byte | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func asNop0(i *instruction) { |  | ||||||
| 	i.kind = nop0 |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func setNext(i, next *instruction) { |  | ||||||
| 	i.next = next |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func setPrev(i, prev *instruction) { |  | ||||||
| 	i.prev = prev |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // IsCall implements regalloc.Instr IsCall. | // IsCall implements regalloc.Instr IsCall. | ||||||
| func (i *instruction) IsCall() bool { | func (i *instruction) IsCall() bool { | ||||||
| 	return i.kind == call | 	return i.kind == call | ||||||
|  | @ -63,21 +51,6 @@ func (i *instruction) IsReturn() bool { | ||||||
| 	return i.kind == ret | 	return i.kind == ret | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Next implements regalloc.Instr Next. |  | ||||||
| func (i *instruction) Next() regalloc.Instr { |  | ||||||
| 	return i.next |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Prev implements regalloc.Instr Prev. |  | ||||||
| func (i *instruction) Prev() regalloc.Instr { |  | ||||||
| 	return i.prev |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // AddedBeforeRegAlloc implements regalloc.Instr AddedBeforeRegAlloc. |  | ||||||
| func (i *instruction) AddedBeforeRegAlloc() bool { |  | ||||||
| 	return i.addedBeforeRegAlloc |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type defKind byte | type defKind byte | ||||||
| 
 | 
 | ||||||
| const ( | const ( | ||||||
|  |  | ||||||
|  | @ -12,7 +12,7 @@ import ( | ||||||
| // Encode implements backend.Machine Encode. | // Encode implements backend.Machine Encode. | ||||||
| func (m *machine) Encode(ctx context.Context) error { | func (m *machine) Encode(ctx context.Context) error { | ||||||
| 	m.resolveRelativeAddresses(ctx) | 	m.resolveRelativeAddresses(ctx) | ||||||
| 	m.encode(m.executableContext.RootInstr) | 	m.encode(m.rootInstr) | ||||||
| 	if l := len(m.compiler.Buf()); l > maxFunctionExecutableSize { | 	if l := len(m.compiler.Buf()); l > maxFunctionExecutableSize { | ||||||
| 		return fmt.Errorf("function size exceeds the limit: %d > %d", l, maxFunctionExecutableSize) | 		return fmt.Errorf("function size exceeds the limit: %d > %d", l, maxFunctionExecutableSize) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -17,19 +17,18 @@ import ( | ||||||
| 
 | 
 | ||||||
| // LowerSingleBranch implements backend.Machine. | // LowerSingleBranch implements backend.Machine. | ||||||
| func (m *machine) LowerSingleBranch(br *ssa.Instruction) { | func (m *machine) LowerSingleBranch(br *ssa.Instruction) { | ||||||
| 	ectx := m.executableContext |  | ||||||
| 	switch br.Opcode() { | 	switch br.Opcode() { | ||||||
| 	case ssa.OpcodeJump: | 	case ssa.OpcodeJump: | ||||||
| 		_, _, targetBlk := br.BranchData() | 		_, _, targetBlkID := br.BranchData() | ||||||
| 		if br.IsFallthroughJump() { | 		if br.IsFallthroughJump() { | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
| 		b := m.allocateInstr() | 		b := m.allocateInstr() | ||||||
| 		target := ectx.GetOrAllocateSSABlockLabel(targetBlk) | 		targetBlk := m.compiler.SSABuilder().BasicBlock(targetBlkID) | ||||||
| 		if target == labelReturn { | 		if targetBlk.ReturnBlock() { | ||||||
| 			b.asRet() | 			b.asRet() | ||||||
| 		} else { | 		} else { | ||||||
| 			b.asBr(target) | 			b.asBr(ssaBlockLabel(targetBlk)) | ||||||
| 		} | 		} | ||||||
| 		m.insert(b) | 		m.insert(b) | ||||||
| 	case ssa.OpcodeBrTable: | 	case ssa.OpcodeBrTable: | ||||||
|  | @ -40,7 +39,8 @@ func (m *machine) LowerSingleBranch(br *ssa.Instruction) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerBrTable(i *ssa.Instruction) { | func (m *machine) lowerBrTable(i *ssa.Instruction) { | ||||||
| 	index, targets := i.BrTableData() | 	index, targetBlockIDs := i.BrTableData() | ||||||
|  | 	targetBlockCount := len(targetBlockIDs.View()) | ||||||
| 	indexOperand := m.getOperand_NR(m.compiler.ValueDefinition(index), extModeNone) | 	indexOperand := m.getOperand_NR(m.compiler.ValueDefinition(index), extModeNone) | ||||||
| 
 | 
 | ||||||
| 	// Firstly, we have to do the bounds check of the index, and | 	// Firstly, we have to do the bounds check of the index, and | ||||||
|  | @ -50,7 +50,7 @@ func (m *machine) lowerBrTable(i *ssa.Instruction) { | ||||||
| 	// subs wzr, index, maxIndexReg | 	// subs wzr, index, maxIndexReg | ||||||
| 	// csel adjustedIndex, maxIndexReg, index, hs ;; if index is higher or equal than maxIndexReg. | 	// csel adjustedIndex, maxIndexReg, index, hs ;; if index is higher or equal than maxIndexReg. | ||||||
| 	maxIndexReg := m.compiler.AllocateVReg(ssa.TypeI32) | 	maxIndexReg := m.compiler.AllocateVReg(ssa.TypeI32) | ||||||
| 	m.lowerConstantI32(maxIndexReg, int32(len(targets)-1)) | 	m.lowerConstantI32(maxIndexReg, int32(targetBlockCount-1)) | ||||||
| 	subs := m.allocateInstr() | 	subs := m.allocateInstr() | ||||||
| 	subs.asALU(aluOpSubS, xzrVReg, indexOperand, operandNR(maxIndexReg), false) | 	subs.asALU(aluOpSubS, xzrVReg, indexOperand, operandNR(maxIndexReg), false) | ||||||
| 	m.insert(subs) | 	m.insert(subs) | ||||||
|  | @ -61,24 +61,24 @@ func (m *machine) lowerBrTable(i *ssa.Instruction) { | ||||||
| 
 | 
 | ||||||
| 	brSequence := m.allocateInstr() | 	brSequence := m.allocateInstr() | ||||||
| 
 | 
 | ||||||
| 	tableIndex := m.addJmpTableTarget(targets) | 	tableIndex := m.addJmpTableTarget(targetBlockIDs) | ||||||
| 	brSequence.asBrTableSequence(adjustedIndex, tableIndex, len(targets)) | 	brSequence.asBrTableSequence(adjustedIndex, tableIndex, targetBlockCount) | ||||||
| 	m.insert(brSequence) | 	m.insert(brSequence) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // LowerConditionalBranch implements backend.Machine. | // LowerConditionalBranch implements backend.Machine. | ||||||
| func (m *machine) LowerConditionalBranch(b *ssa.Instruction) { | func (m *machine) LowerConditionalBranch(b *ssa.Instruction) { | ||||||
| 	exctx := m.executableContext | 	cval, args, targetBlkID := b.BranchData() | ||||||
| 	cval, args, targetBlk := b.BranchData() |  | ||||||
| 	if len(args) > 0 { | 	if len(args) > 0 { | ||||||
| 		panic(fmt.Sprintf( | 		panic(fmt.Sprintf( | ||||||
| 			"conditional branch shouldn't have args; likely a bug in critical edge splitting: from %s to %s", | 			"conditional branch shouldn't have args; likely a bug in critical edge splitting: from %s to %s", | ||||||
| 			exctx.CurrentSSABlk, | 			m.currentLabelPos.sb, | ||||||
| 			targetBlk, | 			targetBlkID, | ||||||
| 		)) | 		)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	target := exctx.GetOrAllocateSSABlockLabel(targetBlk) | 	targetBlk := m.compiler.SSABuilder().BasicBlock(targetBlkID) | ||||||
|  | 	target := ssaBlockLabel(targetBlk) | ||||||
| 	cvalDef := m.compiler.ValueDefinition(cval) | 	cvalDef := m.compiler.ValueDefinition(cval) | ||||||
| 
 | 
 | ||||||
| 	switch { | 	switch { | ||||||
|  | @ -791,7 +791,7 @@ func (m *machine) LowerInstr(instr *ssa.Instruction) { | ||||||
| 	default: | 	default: | ||||||
| 		panic("TODO: lowering " + op.String()) | 		panic("TODO: lowering " + op.String()) | ||||||
| 	} | 	} | ||||||
| 	m.executableContext.FlushPendingInstructions() | 	m.FlushPendingInstructions() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) lowerShuffle(rd regalloc.VReg, rn, rm operand, lane1, lane2 uint64) { | func (m *machine) lowerShuffle(rd regalloc.VReg, rn, rm operand, lane1, lane2 uint64) { | ||||||
|  |  | ||||||
|  | @ -162,9 +162,9 @@ func (o operand) assignReg(v regalloc.VReg) operand { | ||||||
| // | // | ||||||
| // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | ||||||
| // If the operand can be expressed as operandKindImm12, `mode` is ignored. | // If the operand can be expressed as operandKindImm12, `mode` is ignored. | ||||||
| func (m *machine) getOperand_Imm12_ER_SR_NR(def *backend.SSAValueDefinition, mode extMode) (op operand) { | func (m *machine) getOperand_Imm12_ER_SR_NR(def backend.SSAValueDefinition, mode extMode) (op operand) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return operandNR(def.BlkParamVReg) | 		return operandNR(m.compiler.VRegOf(def.V)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	instr := def.Instr | 	instr := def.Instr | ||||||
|  | @ -179,9 +179,9 @@ func (m *machine) getOperand_Imm12_ER_SR_NR(def *backend.SSAValueDefinition, mod | ||||||
| 
 | 
 | ||||||
| // getOperand_MaybeNegatedImm12_ER_SR_NR is almost the same as getOperand_Imm12_ER_SR_NR, but this might negate the immediate value. | // getOperand_MaybeNegatedImm12_ER_SR_NR is almost the same as getOperand_Imm12_ER_SR_NR, but this might negate the immediate value. | ||||||
| // If the immediate value is negated, the second return value is true, otherwise always false. | // If the immediate value is negated, the second return value is true, otherwise always false. | ||||||
| func (m *machine) getOperand_MaybeNegatedImm12_ER_SR_NR(def *backend.SSAValueDefinition, mode extMode) (op operand, negatedImm12 bool) { | func (m *machine) getOperand_MaybeNegatedImm12_ER_SR_NR(def backend.SSAValueDefinition, mode extMode) (op operand, negatedImm12 bool) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return operandNR(def.BlkParamVReg), false | 		return operandNR(m.compiler.VRegOf(def.V)), false | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	instr := def.Instr | 	instr := def.Instr | ||||||
|  | @ -193,7 +193,7 @@ func (m *machine) getOperand_MaybeNegatedImm12_ER_SR_NR(def *backend.SSAValueDef | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		signExtended := int64(c) | 		signExtended := int64(c) | ||||||
| 		if def.SSAValue().Type().Bits() == 32 { | 		if def.V.Type().Bits() == 32 { | ||||||
| 			signExtended = (signExtended << 32) >> 32 | 			signExtended = (signExtended << 32) >> 32 | ||||||
| 		} | 		} | ||||||
| 		negatedWithoutSign := -signExtended | 		negatedWithoutSign := -signExtended | ||||||
|  | @ -208,9 +208,9 @@ func (m *machine) getOperand_MaybeNegatedImm12_ER_SR_NR(def *backend.SSAValueDef | ||||||
| // ensureValueNR returns an operand of either operandKindER, operandKindSR, or operandKindNR from the given value (defined by `def). | // ensureValueNR returns an operand of either operandKindER, operandKindSR, or operandKindNR from the given value (defined by `def). | ||||||
| // | // | ||||||
| // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | ||||||
| func (m *machine) getOperand_ER_SR_NR(def *backend.SSAValueDefinition, mode extMode) (op operand) { | func (m *machine) getOperand_ER_SR_NR(def backend.SSAValueDefinition, mode extMode) (op operand) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return operandNR(def.BlkParamVReg) | 		return operandNR(m.compiler.VRegOf(def.V)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if m.compiler.MatchInstr(def, ssa.OpcodeSExtend) || m.compiler.MatchInstr(def, ssa.OpcodeUExtend) { | 	if m.compiler.MatchInstr(def, ssa.OpcodeSExtend) || m.compiler.MatchInstr(def, ssa.OpcodeUExtend) { | ||||||
|  | @ -251,9 +251,9 @@ func (m *machine) getOperand_ER_SR_NR(def *backend.SSAValueDefinition, mode extM | ||||||
| // ensureValueNR returns an operand of either operandKindSR or operandKindNR from the given value (defined by `def). | // ensureValueNR returns an operand of either operandKindSR or operandKindNR from the given value (defined by `def). | ||||||
| // | // | ||||||
| // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | ||||||
| func (m *machine) getOperand_SR_NR(def *backend.SSAValueDefinition, mode extMode) (op operand) { | func (m *machine) getOperand_SR_NR(def backend.SSAValueDefinition, mode extMode) (op operand) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return operandNR(def.BlkParamVReg) | 		return operandNR(m.compiler.VRegOf(def.V)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if m.compiler.MatchInstr(def, ssa.OpcodeIshl) { | 	if m.compiler.MatchInstr(def, ssa.OpcodeIshl) { | ||||||
|  | @ -273,9 +273,9 @@ func (m *machine) getOperand_SR_NR(def *backend.SSAValueDefinition, mode extMode | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // getOperand_ShiftImm_NR returns an operand of either operandKindShiftImm or operandKindNR from the given value (defined by `def). | // getOperand_ShiftImm_NR returns an operand of either operandKindShiftImm or operandKindNR from the given value (defined by `def). | ||||||
| func (m *machine) getOperand_ShiftImm_NR(def *backend.SSAValueDefinition, mode extMode, shiftBitWidth byte) (op operand) { | func (m *machine) getOperand_ShiftImm_NR(def backend.SSAValueDefinition, mode extMode, shiftBitWidth byte) (op operand) { | ||||||
| 	if def.IsFromBlockParam() { | 	if !def.IsFromInstr() { | ||||||
| 		return operandNR(def.BlkParamVReg) | 		return operandNR(m.compiler.VRegOf(def.V)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	instr := def.Instr | 	instr := def.Instr | ||||||
|  | @ -289,28 +289,18 @@ func (m *machine) getOperand_ShiftImm_NR(def *backend.SSAValueDefinition, mode e | ||||||
| // ensureValueNR returns an operand of operandKindNR from the given value (defined by `def). | // ensureValueNR returns an operand of operandKindNR from the given value (defined by `def). | ||||||
| // | // | ||||||
| // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | // `mode` is used to extend the operand if the bit length is smaller than mode.bits(). | ||||||
| func (m *machine) getOperand_NR(def *backend.SSAValueDefinition, mode extMode) (op operand) { | func (m *machine) getOperand_NR(def backend.SSAValueDefinition, mode extMode) (op operand) { | ||||||
| 	var v regalloc.VReg | 	var v regalloc.VReg | ||||||
| 	if def.IsFromBlockParam() { | 	if def.IsFromInstr() && def.Instr.Constant() { | ||||||
| 		v = def.BlkParamVReg |  | ||||||
| 	} else { |  | ||||||
| 		instr := def.Instr |  | ||||||
| 		if instr.Constant() { |  | ||||||
| 		// We inline all the constant instructions so that we could reduce the register usage. | 		// We inline all the constant instructions so that we could reduce the register usage. | ||||||
| 			v = m.lowerConstant(instr) | 		v = m.lowerConstant(def.Instr) | ||||||
| 			instr.MarkLowered() | 		def.Instr.MarkLowered() | ||||||
| 	} else { | 	} else { | ||||||
| 			if n := def.N; n == 0 { | 		v = m.compiler.VRegOf(def.V) | ||||||
| 				v = m.compiler.VRegOf(instr.Return()) |  | ||||||
| 			} else { |  | ||||||
| 				_, rs := instr.Returns() |  | ||||||
| 				v = m.compiler.VRegOf(rs[n-1]) |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	r := v | 	r := v | ||||||
| 	switch inBits := def.SSAValue().Type().Bits(); { | 	switch inBits := def.V.Type().Bits(); { | ||||||
| 	case mode == extModeNone: | 	case mode == extModeNone: | ||||||
| 	case inBits == 32 && (mode == extModeZeroExtend32 || mode == extModeSignExtend32): | 	case inBits == 32 && (mode == extModeZeroExtend32 || mode == extModeSignExtend32): | ||||||
| 	case inBits == 32 && mode == extModeZeroExtend64: | 	case inBits == 32 && mode == extModeZeroExtend64: | ||||||
|  |  | ||||||
							
								
								
									
										267
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/arm64/machine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										267
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/isa/arm64/machine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -3,6 +3,7 @@ package arm64 | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | 	"math" | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend" | 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend" | ||||||
|  | @ -15,11 +16,32 @@ type ( | ||||||
| 	// machine implements backend.Machine. | 	// machine implements backend.Machine. | ||||||
| 	machine struct { | 	machine struct { | ||||||
| 		compiler   backend.Compiler | 		compiler   backend.Compiler | ||||||
| 		executableContext *backend.ExecutableContextT[instruction] |  | ||||||
| 		currentABI *backend.FunctionABI | 		currentABI *backend.FunctionABI | ||||||
|  | 		instrPool  wazevoapi.Pool[instruction] | ||||||
|  | 		// labelPositionPool is the pool of labelPosition. The id is the label where | ||||||
|  | 		// if the label is less than the maxSSABlockID, it's the ssa.BasicBlockID. | ||||||
|  | 		labelPositionPool wazevoapi.IDedPool[labelPosition] | ||||||
| 
 | 
 | ||||||
| 		regAlloc   regalloc.Allocator | 		// nextLabel is the next label to be allocated. The first free label comes after maxSSABlockID | ||||||
| 		regAllocFn *backend.RegAllocFunction[*instruction, *machine] | 		// so that we can have an identical label for the SSA block ID, which is useful for debugging. | ||||||
|  | 		nextLabel label | ||||||
|  | 		// rootInstr is the first instruction of the function. | ||||||
|  | 		rootInstr *instruction | ||||||
|  | 		// currentLabelPos is the currently-compiled ssa.BasicBlock's labelPosition. | ||||||
|  | 		currentLabelPos *labelPosition | ||||||
|  | 		// orderedSSABlockLabelPos is the ordered list of labelPosition in the generated code for each ssa.BasicBlock. | ||||||
|  | 		orderedSSABlockLabelPos []*labelPosition | ||||||
|  | 		// returnLabelPos is the labelPosition for the return block. | ||||||
|  | 		returnLabelPos labelPosition | ||||||
|  | 		// perBlockHead and perBlockEnd are the head and tail of the instruction list per currently-compiled ssa.BasicBlock. | ||||||
|  | 		perBlockHead, perBlockEnd *instruction | ||||||
|  | 		// pendingInstructions are the instructions which are not yet emitted into the instruction list. | ||||||
|  | 		pendingInstructions []*instruction | ||||||
|  | 		// maxSSABlockID is the maximum ssa.BasicBlockID in the current function. | ||||||
|  | 		maxSSABlockID label | ||||||
|  | 
 | ||||||
|  | 		regAlloc   regalloc.Allocator[*instruction, *labelPosition, *regAllocFn] | ||||||
|  | 		regAllocFn regAllocFn | ||||||
| 
 | 
 | ||||||
| 		amodePool wazevoapi.Pool[addressMode] | 		amodePool wazevoapi.Pool[addressMode] | ||||||
| 
 | 
 | ||||||
|  | @ -35,6 +57,8 @@ type ( | ||||||
| 
 | 
 | ||||||
| 		// jmpTableTargets holds the labels of the jump table targets. | 		// jmpTableTargets holds the labels of the jump table targets. | ||||||
| 		jmpTableTargets [][]uint32 | 		jmpTableTargets [][]uint32 | ||||||
|  | 		// jmpTableTargetNext is the index to the jmpTableTargets slice to be used for the next jump table. | ||||||
|  | 		jmpTableTargetsNext int | ||||||
| 
 | 
 | ||||||
| 		// spillSlotSize is the size of the stack slot in bytes used for spilling registers. | 		// spillSlotSize is the size of the stack slot in bytes used for spilling registers. | ||||||
| 		// During the execution of the function, the stack looks like: | 		// During the execution of the function, the stack looks like: | ||||||
|  | @ -91,45 +115,132 @@ type ( | ||||||
| 		nextLabel label | 		nextLabel label | ||||||
| 		offset    int64 | 		offset    int64 | ||||||
| 	} | 	} | ||||||
|  | ) | ||||||
| 
 | 
 | ||||||
| 	labelPosition = backend.LabelPosition[instruction] | type ( | ||||||
| 	label         = backend.Label | 	// label represents a position in the generated code which is either | ||||||
|  | 	// a real instruction or the constant InstructionPool (e.g. jump tables). | ||||||
|  | 	// | ||||||
|  | 	// This is exactly the same as the traditional "label" in assembly code. | ||||||
|  | 	label uint32 | ||||||
|  | 
 | ||||||
|  | 	// labelPosition represents the regions of the generated code which the label represents. | ||||||
|  | 	// This implements regalloc.Block. | ||||||
|  | 	labelPosition struct { | ||||||
|  | 		// sb is not nil if this corresponds to a ssa.BasicBlock. | ||||||
|  | 		sb ssa.BasicBlock | ||||||
|  | 		// cur is used to walk through the instructions in the block during the register allocation. | ||||||
|  | 		cur, | ||||||
|  | 		// begin and end are the first and last instructions of the block. | ||||||
|  | 		begin, end *instruction | ||||||
|  | 		// binaryOffset is the offset in the binary where the label is located. | ||||||
|  | 		binaryOffset int64 | ||||||
|  | 	} | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| const ( | const ( | ||||||
| 	labelReturn  = backend.LabelReturn | 	labelReturn  label = math.MaxUint32 | ||||||
| 	labelInvalid = backend.LabelInvalid | 	labelInvalid       = labelReturn - 1 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | // String implements backend.Machine. | ||||||
|  | func (l label) String() string { | ||||||
|  | 	return fmt.Sprintf("L%d", l) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func resetLabelPosition(l *labelPosition) { | ||||||
|  | 	*l = labelPosition{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // NewBackend returns a new backend for arm64. | // NewBackend returns a new backend for arm64. | ||||||
| func NewBackend() backend.Machine { | func NewBackend() backend.Machine { | ||||||
| 	m := &machine{ | 	m := &machine{ | ||||||
| 		spillSlots:        make(map[regalloc.VRegID]int64), | 		spillSlots:        make(map[regalloc.VRegID]int64), | ||||||
| 		executableContext: newExecutableContext(), | 		regAlloc:          regalloc.NewAllocator[*instruction, *labelPosition, *regAllocFn](regInfo), | ||||||
| 		regAlloc:          regalloc.NewAllocator(regInfo), |  | ||||||
| 		amodePool:         wazevoapi.NewPool[addressMode](resetAddressMode), | 		amodePool:         wazevoapi.NewPool[addressMode](resetAddressMode), | ||||||
|  | 		instrPool:         wazevoapi.NewPool[instruction](resetInstruction), | ||||||
|  | 		labelPositionPool: wazevoapi.NewIDedPool[labelPosition](resetLabelPosition), | ||||||
| 	} | 	} | ||||||
|  | 	m.regAllocFn.m = m | ||||||
| 	return m | 	return m | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func newExecutableContext() *backend.ExecutableContextT[instruction] { | func ssaBlockLabel(sb ssa.BasicBlock) label { | ||||||
| 	return backend.NewExecutableContextT[instruction](resetInstruction, setNext, setPrev, asNop0) | 	if sb.ReturnBlock() { | ||||||
|  | 		return labelReturn | ||||||
|  | 	} | ||||||
|  | 	return label(sb.ID()) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // ExecutableContext implements backend.Machine. | // getOrAllocateSSABlockLabelPosition returns the labelPosition for the given basic block. | ||||||
| func (m *machine) ExecutableContext() backend.ExecutableContext { | func (m *machine) getOrAllocateSSABlockLabelPosition(sb ssa.BasicBlock) *labelPosition { | ||||||
| 	return m.executableContext | 	if sb.ReturnBlock() { | ||||||
|  | 		m.returnLabelPos.sb = sb | ||||||
|  | 		return &m.returnLabelPos | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	l := ssaBlockLabel(sb) | ||||||
|  | 	pos := m.labelPositionPool.GetOrAllocate(int(l)) | ||||||
|  | 	pos.sb = sb | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LinkAdjacentBlocks implements backend.Machine. | ||||||
|  | func (m *machine) LinkAdjacentBlocks(prev, next ssa.BasicBlock) { | ||||||
|  | 	prevPos, nextPos := m.getOrAllocateSSABlockLabelPosition(prev), m.getOrAllocateSSABlockLabelPosition(next) | ||||||
|  | 	prevPos.end.next = nextPos.begin | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StartBlock implements backend.Machine. | ||||||
|  | func (m *machine) StartBlock(blk ssa.BasicBlock) { | ||||||
|  | 	m.currentLabelPos = m.getOrAllocateSSABlockLabelPosition(blk) | ||||||
|  | 	labelPos := m.currentLabelPos | ||||||
|  | 	end := m.allocateNop() | ||||||
|  | 	m.perBlockHead, m.perBlockEnd = end, end | ||||||
|  | 	labelPos.begin, labelPos.end = end, end | ||||||
|  | 	m.orderedSSABlockLabelPos = append(m.orderedSSABlockLabelPos, labelPos) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // EndBlock implements ExecutableContext. | ||||||
|  | func (m *machine) EndBlock() { | ||||||
|  | 	// Insert nop0 as the head of the block for convenience to simplify the logic of inserting instructions. | ||||||
|  | 	m.insertAtPerBlockHead(m.allocateNop()) | ||||||
|  | 
 | ||||||
|  | 	m.currentLabelPos.begin = m.perBlockHead | ||||||
|  | 
 | ||||||
|  | 	if m.currentLabelPos.sb.EntryBlock() { | ||||||
|  | 		m.rootInstr = m.perBlockHead | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m *machine) insertAtPerBlockHead(i *instruction) { | ||||||
|  | 	if m.perBlockHead == nil { | ||||||
|  | 		m.perBlockHead = i | ||||||
|  | 		m.perBlockEnd = i | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	i.next = m.perBlockHead | ||||||
|  | 	m.perBlockHead.prev = i | ||||||
|  | 	m.perBlockHead = i | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FlushPendingInstructions implements backend.Machine. | ||||||
|  | func (m *machine) FlushPendingInstructions() { | ||||||
|  | 	l := len(m.pendingInstructions) | ||||||
|  | 	if l == 0 { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	for i := l - 1; i >= 0; i-- { // reverse because we lower instructions in reverse order. | ||||||
|  | 		m.insertAtPerBlockHead(m.pendingInstructions[i]) | ||||||
|  | 	} | ||||||
|  | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // RegAlloc implements backend.Machine Function. | // RegAlloc implements backend.Machine Function. | ||||||
| func (m *machine) RegAlloc() { | func (m *machine) RegAlloc() { | ||||||
| 	rf := m.regAllocFn |  | ||||||
| 	for _, pos := range m.executableContext.OrderedBlockLabels { |  | ||||||
| 		rf.AddBlock(pos.SB, pos.L, pos.Begin, pos.End) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	m.regAllocStarted = true | 	m.regAllocStarted = true | ||||||
| 	m.regAlloc.DoAllocation(rf) | 	m.regAlloc.DoAllocation(&m.regAllocFn) | ||||||
| 	// Now that we know the final spill slot size, we must align spillSlotSize to 16 bytes. | 	// Now that we know the final spill slot size, we must align spillSlotSize to 16 bytes. | ||||||
| 	m.spillSlotSize = (m.spillSlotSize + 15) &^ 15 | 	m.spillSlotSize = (m.spillSlotSize + 15) &^ 15 | ||||||
| } | } | ||||||
|  | @ -146,13 +257,22 @@ func (m *machine) Reset() { | ||||||
| 	m.clobberedRegs = m.clobberedRegs[:0] | 	m.clobberedRegs = m.clobberedRegs[:0] | ||||||
| 	m.regAllocStarted = false | 	m.regAllocStarted = false | ||||||
| 	m.regAlloc.Reset() | 	m.regAlloc.Reset() | ||||||
| 	m.regAllocFn.Reset() |  | ||||||
| 	m.spillSlotSize = 0 | 	m.spillSlotSize = 0 | ||||||
| 	m.unresolvedAddressModes = m.unresolvedAddressModes[:0] | 	m.unresolvedAddressModes = m.unresolvedAddressModes[:0] | ||||||
| 	m.maxRequiredStackSizeForCalls = 0 | 	m.maxRequiredStackSizeForCalls = 0 | ||||||
| 	m.executableContext.Reset() | 	m.jmpTableTargetsNext = 0 | ||||||
| 	m.jmpTableTargets = m.jmpTableTargets[:0] |  | ||||||
| 	m.amodePool.Reset() | 	m.amodePool.Reset() | ||||||
|  | 	m.instrPool.Reset() | ||||||
|  | 	m.labelPositionPool.Reset() | ||||||
|  | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
|  | 	m.perBlockHead, m.perBlockEnd, m.rootInstr = nil, nil, nil | ||||||
|  | 	m.orderedSSABlockLabelPos = m.orderedSSABlockLabelPos[:0] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StartLoweringFunction implements backend.Machine StartLoweringFunction. | ||||||
|  | func (m *machine) StartLoweringFunction(maxBlockID ssa.BasicBlockID) { | ||||||
|  | 	m.maxSSABlockID = label(maxBlockID) | ||||||
|  | 	m.nextLabel = label(maxBlockID) + 1 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // SetCurrentABI implements backend.Machine SetCurrentABI. | // SetCurrentABI implements backend.Machine SetCurrentABI. | ||||||
|  | @ -168,12 +288,11 @@ func (m *machine) DisableStackCheck() { | ||||||
| // SetCompiler implements backend.Machine. | // SetCompiler implements backend.Machine. | ||||||
| func (m *machine) SetCompiler(ctx backend.Compiler) { | func (m *machine) SetCompiler(ctx backend.Compiler) { | ||||||
| 	m.compiler = ctx | 	m.compiler = ctx | ||||||
| 	m.regAllocFn = backend.NewRegAllocFunction[*instruction, *machine](m, ctx.SSABuilder(), ctx) | 	m.regAllocFn.ssaB = ctx.SSABuilder() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) insert(i *instruction) { | func (m *machine) insert(i *instruction) { | ||||||
| 	ectx := m.executableContext | 	m.pendingInstructions = append(m.pendingInstructions, i) | ||||||
| 	ectx.PendingInstructions = append(ectx.PendingInstructions, i) |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) insertBrTargetLabel() label { | func (m *machine) insertBrTargetLabel() label { | ||||||
|  | @ -183,18 +302,18 @@ func (m *machine) insertBrTargetLabel() label { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) allocateBrTarget() (nop *instruction, l label) { | func (m *machine) allocateBrTarget() (nop *instruction, l label) { | ||||||
| 	ectx := m.executableContext | 	l = m.nextLabel | ||||||
| 	l = ectx.AllocateLabel() | 	m.nextLabel++ | ||||||
| 	nop = m.allocateInstr() | 	nop = m.allocateInstr() | ||||||
| 	nop.asNop0WithLabel(l) | 	nop.asNop0WithLabel(l) | ||||||
| 	pos := ectx.GetOrAllocateLabelPosition(l) | 	pos := m.labelPositionPool.GetOrAllocate(int(l)) | ||||||
| 	pos.Begin, pos.End = nop, nop | 	pos.begin, pos.end = nop, nop | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // allocateInstr allocates an instruction. | // allocateInstr allocates an instruction. | ||||||
| func (m *machine) allocateInstr() *instruction { | func (m *machine) allocateInstr() *instruction { | ||||||
| 	instr := m.executableContext.InstructionPool.Allocate() | 	instr := m.instrPool.Allocate() | ||||||
| 	if !m.regAllocStarted { | 	if !m.regAllocStarted { | ||||||
| 		instr.addedBeforeRegAlloc = true | 		instr.addedBeforeRegAlloc = true | ||||||
| 	} | 	} | ||||||
|  | @ -251,7 +370,6 @@ func (m *machine) resolveAddressingMode(arg0offset, ret0offset int64, i *instruc | ||||||
| 
 | 
 | ||||||
| // resolveRelativeAddresses resolves the relative addresses before encoding. | // resolveRelativeAddresses resolves the relative addresses before encoding. | ||||||
| func (m *machine) resolveRelativeAddresses(ctx context.Context) { | func (m *machine) resolveRelativeAddresses(ctx context.Context) { | ||||||
| 	ectx := m.executableContext |  | ||||||
| 	for { | 	for { | ||||||
| 		if len(m.unresolvedAddressModes) > 0 { | 		if len(m.unresolvedAddressModes) > 0 { | ||||||
| 			arg0offset, ret0offset := m.arg0OffsetFromSP(), m.ret0OffsetFromSP() | 			arg0offset, ret0offset := m.arg0OffsetFromSP(), m.ret0OffsetFromSP() | ||||||
|  | @ -265,35 +383,36 @@ func (m *machine) resolveRelativeAddresses(ctx context.Context) { | ||||||
| 
 | 
 | ||||||
| 		var fn string | 		var fn string | ||||||
| 		var fnIndex int | 		var fnIndex int | ||||||
| 		var labelToSSABlockID map[label]ssa.BasicBlockID | 		var labelPosToLabel map[*labelPosition]label | ||||||
| 		if wazevoapi.PerfMapEnabled { | 		if wazevoapi.PerfMapEnabled { | ||||||
| 			fn = wazevoapi.GetCurrentFunctionName(ctx) | 			labelPosToLabel = make(map[*labelPosition]label) | ||||||
| 			labelToSSABlockID = make(map[label]ssa.BasicBlockID) | 			for i := 0; i <= m.labelPositionPool.MaxIDEncountered(); i++ { | ||||||
| 			for i, l := range ectx.SsaBlockIDToLabels { | 				labelPosToLabel[m.labelPositionPool.Get(i)] = label(i) | ||||||
| 				labelToSSABlockID[l] = ssa.BasicBlockID(i) |  | ||||||
| 			} | 			} | ||||||
|  | 
 | ||||||
|  | 			fn = wazevoapi.GetCurrentFunctionName(ctx) | ||||||
| 			fnIndex = wazevoapi.GetCurrentFunctionIndex(ctx) | 			fnIndex = wazevoapi.GetCurrentFunctionIndex(ctx) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// Next, in order to determine the offsets of relative jumps, we have to calculate the size of each label. | 		// Next, in order to determine the offsets of relative jumps, we have to calculate the size of each label. | ||||||
| 		var offset int64 | 		var offset int64 | ||||||
| 		for i, pos := range ectx.OrderedBlockLabels { | 		for i, pos := range m.orderedSSABlockLabelPos { | ||||||
| 			pos.BinaryOffset = offset | 			pos.binaryOffset = offset | ||||||
| 			var size int64 | 			var size int64 | ||||||
| 			for cur := pos.Begin; ; cur = cur.next { | 			for cur := pos.begin; ; cur = cur.next { | ||||||
| 				switch cur.kind { | 				switch cur.kind { | ||||||
| 				case nop0: | 				case nop0: | ||||||
| 					l := cur.nop0Label() | 					l := cur.nop0Label() | ||||||
| 					if pos := ectx.LabelPositions[l]; pos != nil { | 					if pos := m.labelPositionPool.Get(int(l)); pos != nil { | ||||||
| 						pos.BinaryOffset = offset + size | 						pos.binaryOffset = offset + size | ||||||
| 					} | 					} | ||||||
| 				case condBr: | 				case condBr: | ||||||
| 					if !cur.condBrOffsetResolved() { | 					if !cur.condBrOffsetResolved() { | ||||||
| 						var nextLabel label | 						var nextLabel label | ||||||
| 						if i < len(ectx.OrderedBlockLabels)-1 { | 						if i < len(m.orderedSSABlockLabelPos)-1 { | ||||||
| 							// Note: this is only used when the block ends with fallthrough, | 							// Note: this is only used when the block ends with fallthrough, | ||||||
| 							// therefore can be safely assumed that the next block exists when it's needed. | 							// therefore can be safely assumed that the next block exists when it's needed. | ||||||
| 							nextLabel = ectx.OrderedBlockLabels[i+1].L | 							nextLabel = ssaBlockLabel(m.orderedSSABlockLabelPos[i+1].sb) | ||||||
| 						} | 						} | ||||||
| 						m.condBrRelocs = append(m.condBrRelocs, condBrReloc{ | 						m.condBrRelocs = append(m.condBrRelocs, condBrReloc{ | ||||||
| 							cbr: cur, currentLabelPos: pos, offset: offset + size, | 							cbr: cur, currentLabelPos: pos, offset: offset + size, | ||||||
|  | @ -302,21 +421,14 @@ func (m *machine) resolveRelativeAddresses(ctx context.Context) { | ||||||
| 					} | 					} | ||||||
| 				} | 				} | ||||||
| 				size += cur.size() | 				size += cur.size() | ||||||
| 				if cur == pos.End { | 				if cur == pos.end { | ||||||
| 					break | 					break | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			if wazevoapi.PerfMapEnabled { | 			if wazevoapi.PerfMapEnabled { | ||||||
| 				if size > 0 { | 				if size > 0 { | ||||||
| 					l := pos.L | 					wazevoapi.PerfMap.AddModuleEntry(fnIndex, offset, uint64(size), fmt.Sprintf("%s:::::%s", fn, labelPosToLabel[pos])) | ||||||
| 					var labelStr string |  | ||||||
| 					if blkID, ok := labelToSSABlockID[l]; ok { |  | ||||||
| 						labelStr = fmt.Sprintf("%s::SSA_Block[%s]", l, blkID) |  | ||||||
| 					} else { |  | ||||||
| 						labelStr = l.String() |  | ||||||
| 					} |  | ||||||
| 					wazevoapi.PerfMap.AddModuleEntry(fnIndex, offset, uint64(size), fmt.Sprintf("%s:::::%s", fn, labelStr)) |  | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 			offset += size | 			offset += size | ||||||
|  | @ -330,7 +442,7 @@ func (m *machine) resolveRelativeAddresses(ctx context.Context) { | ||||||
| 			offset := reloc.offset | 			offset := reloc.offset | ||||||
| 
 | 
 | ||||||
| 			target := cbr.condBrLabel() | 			target := cbr.condBrLabel() | ||||||
| 			offsetOfTarget := ectx.LabelPositions[target].BinaryOffset | 			offsetOfTarget := m.labelPositionPool.Get(int(target)).binaryOffset | ||||||
| 			diff := offsetOfTarget - offset | 			diff := offsetOfTarget - offset | ||||||
| 			if divided := diff >> 2; divided < minSignedInt19 || divided > maxSignedInt19 { | 			if divided := diff >> 2; divided < minSignedInt19 || divided > maxSignedInt19 { | ||||||
| 				// This case the conditional branch is too huge. We place the trampoline instructions at the end of the current block, | 				// This case the conditional branch is too huge. We place the trampoline instructions at the end of the current block, | ||||||
|  | @ -351,11 +463,11 @@ func (m *machine) resolveRelativeAddresses(ctx context.Context) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	var currentOffset int64 | 	var currentOffset int64 | ||||||
| 	for cur := ectx.RootInstr; cur != nil; cur = cur.next { | 	for cur := m.rootInstr; cur != nil; cur = cur.next { | ||||||
| 		switch cur.kind { | 		switch cur.kind { | ||||||
| 		case br: | 		case br: | ||||||
| 			target := cur.brLabel() | 			target := cur.brLabel() | ||||||
| 			offsetOfTarget := ectx.LabelPositions[target].BinaryOffset | 			offsetOfTarget := m.labelPositionPool.Get(int(target)).binaryOffset | ||||||
| 			diff := offsetOfTarget - currentOffset | 			diff := offsetOfTarget - currentOffset | ||||||
| 			divided := diff >> 2 | 			divided := diff >> 2 | ||||||
| 			if divided < minSignedInt26 || divided > maxSignedInt26 { | 			if divided < minSignedInt26 || divided > maxSignedInt26 { | ||||||
|  | @ -366,7 +478,7 @@ func (m *machine) resolveRelativeAddresses(ctx context.Context) { | ||||||
| 		case condBr: | 		case condBr: | ||||||
| 			if !cur.condBrOffsetResolved() { | 			if !cur.condBrOffsetResolved() { | ||||||
| 				target := cur.condBrLabel() | 				target := cur.condBrLabel() | ||||||
| 				offsetOfTarget := ectx.LabelPositions[target].BinaryOffset | 				offsetOfTarget := m.labelPositionPool.Get(int(target)).binaryOffset | ||||||
| 				diff := offsetOfTarget - currentOffset | 				diff := offsetOfTarget - currentOffset | ||||||
| 				if divided := diff >> 2; divided < minSignedInt19 || divided > maxSignedInt19 { | 				if divided := diff >> 2; divided < minSignedInt19 || divided > maxSignedInt19 { | ||||||
| 					panic("BUG: branch relocation for large conditional branch larger than 19-bit range must be handled properly") | 					panic("BUG: branch relocation for large conditional branch larger than 19-bit range must be handled properly") | ||||||
|  | @ -378,7 +490,7 @@ func (m *machine) resolveRelativeAddresses(ctx context.Context) { | ||||||
| 			targets := m.jmpTableTargets[tableIndex] | 			targets := m.jmpTableTargets[tableIndex] | ||||||
| 			for i := range targets { | 			for i := range targets { | ||||||
| 				l := label(targets[i]) | 				l := label(targets[i]) | ||||||
| 				offsetOfTarget := ectx.LabelPositions[l].BinaryOffset | 				offsetOfTarget := m.labelPositionPool.Get(int(l)).binaryOffset | ||||||
| 				diff := offsetOfTarget - (currentOffset + brTableSequenceOffsetTableBegin) | 				diff := offsetOfTarget - (currentOffset + brTableSequenceOffsetTableBegin) | ||||||
| 				targets[i] = uint32(diff) | 				targets[i] = uint32(diff) | ||||||
| 			} | 			} | ||||||
|  | @ -399,7 +511,7 @@ const ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func (m *machine) insertConditionalJumpTrampoline(cbr *instruction, currentBlk *labelPosition, nextLabel label) { | func (m *machine) insertConditionalJumpTrampoline(cbr *instruction, currentBlk *labelPosition, nextLabel label) { | ||||||
| 	cur := currentBlk.End | 	cur := currentBlk.end | ||||||
| 	originalTarget := cbr.condBrLabel() | 	originalTarget := cbr.condBrLabel() | ||||||
| 	endNext := cur.next | 	endNext := cur.next | ||||||
| 
 | 
 | ||||||
|  | @ -422,32 +534,27 @@ func (m *machine) insertConditionalJumpTrampoline(cbr *instruction, currentBlk * | ||||||
| 	cur = linkInstr(cur, br) | 	cur = linkInstr(cur, br) | ||||||
| 
 | 
 | ||||||
| 	// Update the end of the current block. | 	// Update the end of the current block. | ||||||
| 	currentBlk.End = cur | 	currentBlk.end = cur | ||||||
| 
 | 
 | ||||||
| 	linkInstr(cur, endNext) | 	linkInstr(cur, endNext) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Format implements backend.Machine. | // Format implements backend.Machine. | ||||||
| func (m *machine) Format() string { | func (m *machine) Format() string { | ||||||
| 	ectx := m.executableContext |  | ||||||
| 	begins := map[*instruction]label{} | 	begins := map[*instruction]label{} | ||||||
| 	for _, pos := range ectx.LabelPositions { | 	for l := label(0); l < m.nextLabel; l++ { | ||||||
|  | 		pos := m.labelPositionPool.Get(int(l)) | ||||||
| 		if pos != nil { | 		if pos != nil { | ||||||
| 			begins[pos.Begin] = pos.L | 			begins[pos.begin] = l | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	irBlocks := map[label]ssa.BasicBlockID{} |  | ||||||
| 	for i, l := range ectx.SsaBlockIDToLabels { |  | ||||||
| 		irBlocks[l] = ssa.BasicBlockID(i) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	var lines []string | 	var lines []string | ||||||
| 	for cur := ectx.RootInstr; cur != nil; cur = cur.next { | 	for cur := m.rootInstr; cur != nil; cur = cur.next { | ||||||
| 		if l, ok := begins[cur]; ok { | 		if l, ok := begins[cur]; ok { | ||||||
| 			var labelStr string | 			var labelStr string | ||||||
| 			if blkID, ok := irBlocks[l]; ok { | 			if l <= m.maxSSABlockID { | ||||||
| 				labelStr = fmt.Sprintf("%s (SSA Block: %s):", l, blkID) | 				labelStr = fmt.Sprintf("%s (SSA Block: blk%d):", l, int(l)) | ||||||
| 			} else { | 			} else { | ||||||
| 				labelStr = fmt.Sprintf("%s:", l) | 				labelStr = fmt.Sprintf("%s:", l) | ||||||
| 			} | 			} | ||||||
|  | @ -508,13 +615,17 @@ func (m *machine) frameSize() int64 { | ||||||
| 	return s | 	return s | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) addJmpTableTarget(targets []ssa.BasicBlock) (index int) { | func (m *machine) addJmpTableTarget(targets ssa.Values) (index int) { | ||||||
| 	// TODO: reuse the slice! | 	if m.jmpTableTargetsNext == len(m.jmpTableTargets) { | ||||||
| 	labels := make([]uint32, len(targets)) | 		m.jmpTableTargets = append(m.jmpTableTargets, make([]uint32, 0, len(targets.View()))) | ||||||
| 	for j, target := range targets { | 	} | ||||||
| 		labels[j] = uint32(m.executableContext.GetOrAllocateSSABlockLabel(target)) | 
 | ||||||
|  | 	index = m.jmpTableTargetsNext | ||||||
|  | 	m.jmpTableTargetsNext++ | ||||||
|  | 	m.jmpTableTargets[index] = m.jmpTableTargets[index][:0] | ||||||
|  | 	for _, targetBlockID := range targets.View() { | ||||||
|  | 		target := m.compiler.SSABuilder().BasicBlock(ssa.BasicBlockID(targetBlockID)) | ||||||
|  | 		m.jmpTableTargets[index] = append(m.jmpTableTargets[index], uint32(target.ID())) | ||||||
| 	} | 	} | ||||||
| 	index = len(m.jmpTableTargets) |  | ||||||
| 	m.jmpTableTargets = append(m.jmpTableTargets, labels) |  | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -15,9 +15,7 @@ func (m *machine) PostRegAlloc() { | ||||||
| 
 | 
 | ||||||
| // setupPrologue initializes the prologue of the function. | // setupPrologue initializes the prologue of the function. | ||||||
| func (m *machine) setupPrologue() { | func (m *machine) setupPrologue() { | ||||||
| 	ectx := m.executableContext | 	cur := m.rootInstr | ||||||
| 
 |  | ||||||
| 	cur := ectx.RootInstr |  | ||||||
| 	prevInitInst := cur.next | 	prevInitInst := cur.next | ||||||
| 
 | 
 | ||||||
| 	// | 	// | ||||||
|  | @ -196,21 +194,20 @@ func (m *machine) createFrameSizeSlot(cur *instruction, s int64) *instruction { | ||||||
| // 1. Removes the redundant copy instruction. | // 1. Removes the redundant copy instruction. | ||||||
| // 2. Inserts the epilogue. | // 2. Inserts the epilogue. | ||||||
| func (m *machine) postRegAlloc() { | func (m *machine) postRegAlloc() { | ||||||
| 	ectx := m.executableContext | 	for cur := m.rootInstr; cur != nil; cur = cur.next { | ||||||
| 	for cur := ectx.RootInstr; cur != nil; cur = cur.next { |  | ||||||
| 		switch cur.kind { | 		switch cur.kind { | ||||||
| 		case ret: | 		case ret: | ||||||
| 			m.setupEpilogueAfter(cur.prev) | 			m.setupEpilogueAfter(cur.prev) | ||||||
| 		case loadConstBlockArg: | 		case loadConstBlockArg: | ||||||
| 			lc := cur | 			lc := cur | ||||||
| 			next := lc.next | 			next := lc.next | ||||||
| 			m.executableContext.PendingInstructions = m.executableContext.PendingInstructions[:0] | 			m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 			m.lowerLoadConstantBlockArgAfterRegAlloc(lc) | 			m.lowerLoadConstantBlockArgAfterRegAlloc(lc) | ||||||
| 			for _, instr := range m.executableContext.PendingInstructions { | 			for _, instr := range m.pendingInstructions { | ||||||
| 				cur = linkInstr(cur, instr) | 				cur = linkInstr(cur, instr) | ||||||
| 			} | 			} | ||||||
| 			linkInstr(cur, next) | 			linkInstr(cur, next) | ||||||
| 			m.executableContext.PendingInstructions = m.executableContext.PendingInstructions[:0] | 			m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 		default: | 		default: | ||||||
| 			// Removes the redundant copy instruction. | 			// Removes the redundant copy instruction. | ||||||
| 			if cur.IsCopy() && cur.rn.realReg() == cur.rd.RealReg() { | 			if cur.IsCopy() && cur.rn.realReg() == cur.rd.RealReg() { | ||||||
|  | @ -432,11 +429,9 @@ func (m *machine) insertStackBoundsCheck(requiredStackSize int64, cur *instructi | ||||||
| 
 | 
 | ||||||
| // CompileStackGrowCallSequence implements backend.Machine. | // CompileStackGrowCallSequence implements backend.Machine. | ||||||
| func (m *machine) CompileStackGrowCallSequence() []byte { | func (m *machine) CompileStackGrowCallSequence() []byte { | ||||||
| 	ectx := m.executableContext |  | ||||||
| 
 |  | ||||||
| 	cur := m.allocateInstr() | 	cur := m.allocateInstr() | ||||||
| 	cur.asNop0() | 	cur.asNop0() | ||||||
| 	ectx.RootInstr = cur | 	m.rootInstr = cur | ||||||
| 
 | 
 | ||||||
| 	// Save the callee saved and argument registers. | 	// Save the callee saved and argument registers. | ||||||
| 	cur = m.saveRegistersInExecutionContext(cur, saveRequiredRegs) | 	cur = m.saveRegistersInExecutionContext(cur, saveRequiredRegs) | ||||||
|  | @ -458,16 +453,14 @@ func (m *machine) CompileStackGrowCallSequence() []byte { | ||||||
| 	ret.asRet() | 	ret.asRet() | ||||||
| 	linkInstr(cur, ret) | 	linkInstr(cur, ret) | ||||||
| 
 | 
 | ||||||
| 	m.encode(ectx.RootInstr) | 	m.encode(m.rootInstr) | ||||||
| 	return m.compiler.Buf() | 	return m.compiler.Buf() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *machine) addsAddOrSubStackPointer(cur *instruction, rd regalloc.VReg, diff int64, add bool) *instruction { | func (m *machine) addsAddOrSubStackPointer(cur *instruction, rd regalloc.VReg, diff int64, add bool) *instruction { | ||||||
| 	ectx := m.executableContext | 	m.pendingInstructions = m.pendingInstructions[:0] | ||||||
| 
 |  | ||||||
| 	ectx.PendingInstructions = ectx.PendingInstructions[:0] |  | ||||||
| 	m.insertAddOrSubStackPointer(rd, diff, add) | 	m.insertAddOrSubStackPointer(rd, diff, add) | ||||||
| 	for _, inserted := range ectx.PendingInstructions { | 	for _, inserted := range m.pendingInstructions { | ||||||
| 		cur = linkInstr(cur, inserted) | 		cur = linkInstr(cur, inserted) | ||||||
| 	} | 	} | ||||||
| 	return cur | 	return cur | ||||||
|  |  | ||||||
|  | @ -3,18 +3,226 @@ package arm64 | ||||||
| // This file implements the interfaces required for register allocations. See backend.RegAllocFunctionMachine. | // This file implements the interfaces required for register allocations. See backend.RegAllocFunctionMachine. | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend" |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc" | 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc" | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" | 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // ClobberedRegisters implements backend.RegAllocFunctionMachine. | // regAllocFn implements regalloc.Function. | ||||||
| func (m *machine) ClobberedRegisters(regs []regalloc.VReg) { | type regAllocFn struct { | ||||||
| 	m.clobberedRegs = append(m.clobberedRegs[:0], regs...) | 	ssaB                   ssa.Builder | ||||||
|  | 	m                      *machine | ||||||
|  | 	loopNestingForestRoots []ssa.BasicBlock | ||||||
|  | 	blockIter              int | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Swap implements backend.RegAllocFunctionMachine. | // PostOrderBlockIteratorBegin implements regalloc.Function. | ||||||
| func (m *machine) Swap(cur *instruction, x1, x2, tmp regalloc.VReg) { | func (f *regAllocFn) PostOrderBlockIteratorBegin() *labelPosition { | ||||||
|  | 	f.blockIter = len(f.m.orderedSSABlockLabelPos) - 1 | ||||||
|  | 	return f.PostOrderBlockIteratorNext() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // PostOrderBlockIteratorNext implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) PostOrderBlockIteratorNext() *labelPosition { | ||||||
|  | 	if f.blockIter < 0 { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	b := f.m.orderedSSABlockLabelPos[f.blockIter] | ||||||
|  | 	f.blockIter-- | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReversePostOrderBlockIteratorBegin implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReversePostOrderBlockIteratorBegin() *labelPosition { | ||||||
|  | 	f.blockIter = 0 | ||||||
|  | 	return f.ReversePostOrderBlockIteratorNext() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReversePostOrderBlockIteratorNext implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReversePostOrderBlockIteratorNext() *labelPosition { | ||||||
|  | 	if f.blockIter >= len(f.m.orderedSSABlockLabelPos) { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	b := f.m.orderedSSABlockLabelPos[f.blockIter] | ||||||
|  | 	f.blockIter++ | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ClobberedRegisters implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ClobberedRegisters(regs []regalloc.VReg) { | ||||||
|  | 	f.m.clobberedRegs = append(f.m.clobberedRegs[:0], regs...) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestRoots implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LoopNestingForestRoots() int { | ||||||
|  | 	f.loopNestingForestRoots = f.ssaB.LoopNestingForestRoots() | ||||||
|  | 	return len(f.loopNestingForestRoots) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestRoot implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LoopNestingForestRoot(i int) *labelPosition { | ||||||
|  | 	root := f.loopNestingForestRoots[i] | ||||||
|  | 	pos := f.m.getOrAllocateSSABlockLabelPosition(root) | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LowestCommonAncestor implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LowestCommonAncestor(blk1, blk2 *labelPosition) *labelPosition { | ||||||
|  | 	sb := f.ssaB.LowestCommonAncestor(blk1.sb, blk2.sb) | ||||||
|  | 	pos := f.m.getOrAllocateSSABlockLabelPosition(sb) | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Idom implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) Idom(blk *labelPosition) *labelPosition { | ||||||
|  | 	sb := f.ssaB.Idom(blk.sb) | ||||||
|  | 	pos := f.m.getOrAllocateSSABlockLabelPosition(sb) | ||||||
|  | 	return pos | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SwapBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) SwapBefore(x1, x2, tmp regalloc.VReg, instr *instruction) { | ||||||
|  | 	f.m.swap(instr.prev, x1, x2, tmp) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StoreRegisterBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) StoreRegisterBefore(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertStoreRegisterAt(v, instr, false) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StoreRegisterAfter implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) StoreRegisterAfter(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertStoreRegisterAt(v, instr, true) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReloadRegisterBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReloadRegisterBefore(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertReloadRegisterAt(v, instr, false) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReloadRegisterAfter implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) ReloadRegisterAfter(v regalloc.VReg, instr *instruction) { | ||||||
|  | 	m := f.m | ||||||
|  | 	m.insertReloadRegisterAt(v, instr, true) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InsertMoveBefore implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) InsertMoveBefore(dst, src regalloc.VReg, instr *instruction) { | ||||||
|  | 	f.m.insertMoveBefore(dst, src, instr) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestChild implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) LoopNestingForestChild(pos *labelPosition, i int) *labelPosition { | ||||||
|  | 	childSB := pos.sb.LoopNestingForestChildren()[i] | ||||||
|  | 	return f.m.getOrAllocateSSABlockLabelPosition(childSB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Succ implements regalloc.Block. | ||||||
|  | func (f *regAllocFn) Succ(pos *labelPosition, i int) *labelPosition { | ||||||
|  | 	succSB := pos.sb.Succ(i) | ||||||
|  | 	if succSB.ReturnBlock() { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	return f.m.getOrAllocateSSABlockLabelPosition(succSB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Pred implements regalloc.Block. | ||||||
|  | func (f *regAllocFn) Pred(pos *labelPosition, i int) *labelPosition { | ||||||
|  | 	predSB := pos.sb.Pred(i) | ||||||
|  | 	return f.m.getOrAllocateSSABlockLabelPosition(predSB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlockParams implements regalloc.Function. | ||||||
|  | func (f *regAllocFn) BlockParams(pos *labelPosition, regs *[]regalloc.VReg) []regalloc.VReg { | ||||||
|  | 	c := f.m.compiler | ||||||
|  | 	*regs = (*regs)[:0] | ||||||
|  | 	for i := 0; i < pos.sb.Params(); i++ { | ||||||
|  | 		v := c.VRegOf(pos.sb.Param(i)) | ||||||
|  | 		*regs = append(*regs, v) | ||||||
|  | 	} | ||||||
|  | 	return *regs | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ID implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) ID() int32 { | ||||||
|  | 	return int32(pos.sb.ID()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrIteratorBegin implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrIteratorBegin() *instruction { | ||||||
|  | 	ret := pos.begin | ||||||
|  | 	pos.cur = ret | ||||||
|  | 	return ret | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrIteratorNext implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrIteratorNext() *instruction { | ||||||
|  | 	for { | ||||||
|  | 		if pos.cur == pos.end { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 		instr := pos.cur.next | ||||||
|  | 		pos.cur = instr | ||||||
|  | 		if instr == nil { | ||||||
|  | 			return nil | ||||||
|  | 		} else if instr.addedBeforeRegAlloc { | ||||||
|  | 			// Only concerned about the instruction added before regalloc. | ||||||
|  | 			return instr | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrRevIteratorBegin implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrRevIteratorBegin() *instruction { | ||||||
|  | 	pos.cur = pos.end | ||||||
|  | 	return pos.cur | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // InstrRevIteratorNext implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) InstrRevIteratorNext() *instruction { | ||||||
|  | 	for { | ||||||
|  | 		if pos.cur == pos.begin { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 		instr := pos.cur.prev | ||||||
|  | 		pos.cur = instr | ||||||
|  | 		if instr == nil { | ||||||
|  | 			return nil | ||||||
|  | 		} else if instr.addedBeforeRegAlloc { | ||||||
|  | 			// Only concerned about the instruction added before regalloc. | ||||||
|  | 			return instr | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FirstInstr implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) FirstInstr() *instruction { return pos.begin } | ||||||
|  | 
 | ||||||
|  | // LastInstrForInsertion implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) LastInstrForInsertion() *instruction { | ||||||
|  | 	return lastInstrForInsertion(pos.begin, pos.end) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Preds implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) Preds() int { return pos.sb.Preds() } | ||||||
|  | 
 | ||||||
|  | // Entry implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) Entry() bool { return pos.sb.EntryBlock() } | ||||||
|  | 
 | ||||||
|  | // Succs implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) Succs() int { return pos.sb.Succs() } | ||||||
|  | 
 | ||||||
|  | // LoopHeader implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) LoopHeader() bool { return pos.sb.LoopHeader() } | ||||||
|  | 
 | ||||||
|  | // LoopNestingForestChildren implements regalloc.Block. | ||||||
|  | func (pos *labelPosition) LoopNestingForestChildren() int { | ||||||
|  | 	return len(pos.sb.LoopNestingForestChildren()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m *machine) swap(cur *instruction, x1, x2, tmp regalloc.VReg) { | ||||||
| 	prevNext := cur.next | 	prevNext := cur.next | ||||||
| 	var mov1, mov2, mov3 *instruction | 	var mov1, mov2, mov3 *instruction | ||||||
| 	if x1.RegType() == regalloc.RegTypeInt { | 	if x1.RegType() == regalloc.RegTypeInt { | ||||||
|  | @ -32,12 +240,12 @@ func (m *machine) Swap(cur *instruction, x1, x2, tmp regalloc.VReg) { | ||||||
| 		if !tmp.Valid() { | 		if !tmp.Valid() { | ||||||
| 			r2 := x2.RealReg() | 			r2 := x2.RealReg() | ||||||
| 			// Temporarily spill x1 to stack. | 			// Temporarily spill x1 to stack. | ||||||
| 			cur = m.InsertStoreRegisterAt(x1, cur, true).prev | 			cur = m.insertStoreRegisterAt(x1, cur, true).prev | ||||||
| 			// Then move x2 to x1. | 			// Then move x2 to x1. | ||||||
| 			cur = linkInstr(cur, m.allocateInstr().asFpuMov128(x1, x2)) | 			cur = linkInstr(cur, m.allocateInstr().asFpuMov128(x1, x2)) | ||||||
| 			linkInstr(cur, prevNext) | 			linkInstr(cur, prevNext) | ||||||
| 			// Then reload the original value on x1 from stack to r2. | 			// Then reload the original value on x1 from stack to r2. | ||||||
| 			m.InsertReloadRegisterAt(x1.SetRealReg(r2), cur, true) | 			m.insertReloadRegisterAt(x1.SetRealReg(r2), cur, true) | ||||||
| 		} else { | 		} else { | ||||||
| 			mov1 = m.allocateInstr().asFpuMov128(tmp, x1) | 			mov1 = m.allocateInstr().asFpuMov128(tmp, x1) | ||||||
| 			mov2 = m.allocateInstr().asFpuMov128(x1, x2) | 			mov2 = m.allocateInstr().asFpuMov128(x1, x2) | ||||||
|  | @ -50,8 +258,7 @@ func (m *machine) Swap(cur *instruction, x1, x2, tmp regalloc.VReg) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // InsertMoveBefore implements backend.RegAllocFunctionMachine. | func (m *machine) insertMoveBefore(dst, src regalloc.VReg, instr *instruction) { | ||||||
| func (m *machine) InsertMoveBefore(dst, src regalloc.VReg, instr *instruction) { |  | ||||||
| 	typ := src.RegType() | 	typ := src.RegType() | ||||||
| 	if typ != dst.RegType() { | 	if typ != dst.RegType() { | ||||||
| 		panic("BUG: src and dst must have the same type") | 		panic("BUG: src and dst must have the same type") | ||||||
|  | @ -70,13 +277,7 @@ func (m *machine) InsertMoveBefore(dst, src regalloc.VReg, instr *instruction) { | ||||||
| 	linkInstr(cur, prevNext) | 	linkInstr(cur, prevNext) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // SSABlockLabel implements backend.RegAllocFunctionMachine. | func (m *machine) insertStoreRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { | ||||||
| func (m *machine) SSABlockLabel(id ssa.BasicBlockID) backend.Label { |  | ||||||
| 	return m.executableContext.SsaBlockIDToLabels[id] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // InsertStoreRegisterAt implements backend.RegAllocFunctionMachine. |  | ||||||
| func (m *machine) InsertStoreRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { |  | ||||||
| 	if !v.IsRealReg() { | 	if !v.IsRealReg() { | ||||||
| 		panic("BUG: VReg must be backed by real reg to be stored") | 		panic("BUG: VReg must be backed by real reg to be stored") | ||||||
| 	} | 	} | ||||||
|  | @ -100,8 +301,7 @@ func (m *machine) InsertStoreRegisterAt(v regalloc.VReg, instr *instruction, aft | ||||||
| 	return linkInstr(cur, prevNext) | 	return linkInstr(cur, prevNext) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // InsertReloadRegisterAt implements backend.RegAllocFunctionMachine. | func (m *machine) insertReloadRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { | ||||||
| func (m *machine) InsertReloadRegisterAt(v regalloc.VReg, instr *instruction, after bool) *instruction { |  | ||||||
| 	if !v.IsRealReg() { | 	if !v.IsRealReg() { | ||||||
| 		panic("BUG: VReg must be backed by real reg to be stored") | 		panic("BUG: VReg must be backed by real reg to be stored") | ||||||
| 	} | 	} | ||||||
|  | @ -134,8 +334,7 @@ func (m *machine) InsertReloadRegisterAt(v regalloc.VReg, instr *instruction, af | ||||||
| 	return linkInstr(cur, prevNext) | 	return linkInstr(cur, prevNext) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // LastInstrForInsertion implements backend.RegAllocFunctionMachine. | func lastInstrForInsertion(begin, end *instruction) *instruction { | ||||||
| func (m *machine) LastInstrForInsertion(begin, end *instruction) *instruction { |  | ||||||
| 	cur := end | 	cur := end | ||||||
| 	for cur.kind == nop0 { | 	for cur.kind == nop0 { | ||||||
| 		cur = cur.prev | 		cur = cur.prev | ||||||
|  |  | ||||||
|  | @ -14,7 +14,7 @@ func UnwindStack(sp, _, top uintptr, returnAddresses []uintptr) []uintptr { | ||||||
| 
 | 
 | ||||||
| 	var stackBuf []byte | 	var stackBuf []byte | ||||||
| 	{ | 	{ | ||||||
| 		// TODO: use unsafe.Slice after floor version is set to Go 1.20. | 		//nolint:staticcheck | ||||||
| 		hdr := (*reflect.SliceHeader)(unsafe.Pointer(&stackBuf)) | 		hdr := (*reflect.SliceHeader)(unsafe.Pointer(&stackBuf)) | ||||||
| 		hdr.Data = sp | 		hdr.Data = sp | ||||||
| 		hdr.Len = l | 		hdr.Len = l | ||||||
|  | @ -78,13 +78,7 @@ func GoCallStackView(stackPointerBeforeGoCall *uint64) []uint64 { | ||||||
| 	//              +-----------------+ <---- stackPointerBeforeGoCall | 	//              +-----------------+ <---- stackPointerBeforeGoCall | ||||||
| 	//                 (low address) | 	//                 (low address) | ||||||
| 	ptr := unsafe.Pointer(stackPointerBeforeGoCall) | 	ptr := unsafe.Pointer(stackPointerBeforeGoCall) | ||||||
|  | 	data := (*uint64)(unsafe.Add(ptr, 16)) // skips the (frame_size, sliceSize). | ||||||
| 	size := *(*uint64)(unsafe.Add(ptr, 8)) | 	size := *(*uint64)(unsafe.Add(ptr, 8)) | ||||||
| 	var view []uint64 | 	return unsafe.Slice(data, size) | ||||||
| 	{ |  | ||||||
| 		sh := (*reflect.SliceHeader)(unsafe.Pointer(&view)) |  | ||||||
| 		sh.Data = uintptr(unsafe.Add(ptr, 16)) // skips the (frame_size, sliceSize). |  | ||||||
| 		sh.Len = int(size) |  | ||||||
| 		sh.Cap = int(size) |  | ||||||
| 	} |  | ||||||
| 	return view |  | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										19
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/machine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/machine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -11,7 +11,24 @@ import ( | ||||||
| type ( | type ( | ||||||
| 	// Machine is a backend for a specific ISA machine. | 	// Machine is a backend for a specific ISA machine. | ||||||
| 	Machine interface { | 	Machine interface { | ||||||
| 		ExecutableContext() ExecutableContext | 		// StartLoweringFunction is called when the compilation of the given function is started. | ||||||
|  | 		// The maxBlockID is the maximum ssa.BasicBlockID in the function. | ||||||
|  | 		StartLoweringFunction(maxBlockID ssa.BasicBlockID) | ||||||
|  | 
 | ||||||
|  | 		// LinkAdjacentBlocks is called after finished lowering all blocks in order to create one single instruction list. | ||||||
|  | 		LinkAdjacentBlocks(prev, next ssa.BasicBlock) | ||||||
|  | 
 | ||||||
|  | 		// StartBlock is called when the compilation of the given block is started. | ||||||
|  | 		// The order of this being called is the reverse post order of the ssa.BasicBlock(s) as we iterate with | ||||||
|  | 		// ssa.Builder BlockIteratorReversePostOrderBegin and BlockIteratorReversePostOrderEnd. | ||||||
|  | 		StartBlock(ssa.BasicBlock) | ||||||
|  | 
 | ||||||
|  | 		// EndBlock is called when the compilation of the current block is finished. | ||||||
|  | 		EndBlock() | ||||||
|  | 
 | ||||||
|  | 		// FlushPendingInstructions flushes the pending instructions to the buffer. | ||||||
|  | 		// This will be called after the lowering of each SSA Instruction. | ||||||
|  | 		FlushPendingInstructions() | ||||||
| 
 | 
 | ||||||
| 		// DisableStackCheck disables the stack check for the current compilation for debugging/testing. | 		// DisableStackCheck disables the stack check for the current compilation for debugging/testing. | ||||||
| 		DisableStackCheck() | 		DisableStackCheck() | ||||||
|  |  | ||||||
							
								
								
									
										321
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										321
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,321 +0,0 @@ | ||||||
| package backend |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc" |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // RegAllocFunctionMachine is the interface for the machine specific logic that will be used in RegAllocFunction. |  | ||||||
| type RegAllocFunctionMachine[I regalloc.InstrConstraint] interface { |  | ||||||
| 	// InsertMoveBefore inserts the move instruction from src to dst before the given instruction. |  | ||||||
| 	InsertMoveBefore(dst, src regalloc.VReg, instr I) |  | ||||||
| 	// InsertStoreRegisterAt inserts the instruction(s) to store the given virtual register at the given instruction. |  | ||||||
| 	// If after is true, the instruction(s) will be inserted after the given instruction, otherwise before. |  | ||||||
| 	InsertStoreRegisterAt(v regalloc.VReg, instr I, after bool) I |  | ||||||
| 	// InsertReloadRegisterAt inserts the instruction(s) to reload the given virtual register at the given instruction. |  | ||||||
| 	// If after is true, the instruction(s) will be inserted after the given instruction, otherwise before. |  | ||||||
| 	InsertReloadRegisterAt(v regalloc.VReg, instr I, after bool) I |  | ||||||
| 	// ClobberedRegisters is called when the register allocation is done and the clobbered registers are known. |  | ||||||
| 	ClobberedRegisters(regs []regalloc.VReg) |  | ||||||
| 	// Swap swaps the two virtual registers after the given instruction. |  | ||||||
| 	Swap(cur I, x1, x2, tmp regalloc.VReg) |  | ||||||
| 	// LastInstrForInsertion implements LastInstrForInsertion of regalloc.Function. See its comment for details. |  | ||||||
| 	LastInstrForInsertion(begin, end I) I |  | ||||||
| 	// SSABlockLabel returns the label of the given ssa.BasicBlockID. |  | ||||||
| 	SSABlockLabel(id ssa.BasicBlockID) Label |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type ( |  | ||||||
| 	// RegAllocFunction implements regalloc.Function. |  | ||||||
| 	RegAllocFunction[I regalloc.InstrConstraint, m RegAllocFunctionMachine[I]] struct { |  | ||||||
| 		m   m |  | ||||||
| 		ssb ssa.Builder |  | ||||||
| 		c   Compiler |  | ||||||
| 		// iter is the iterator for reversePostOrderBlocks |  | ||||||
| 		iter                   int |  | ||||||
| 		reversePostOrderBlocks []RegAllocBlock[I, m] |  | ||||||
| 		// labelToRegAllocBlockIndex maps label to the index of reversePostOrderBlocks. |  | ||||||
| 		labelToRegAllocBlockIndex [] /* Label to */ int |  | ||||||
| 		loopNestingForestRoots    []ssa.BasicBlock |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// RegAllocBlock implements regalloc.Block. |  | ||||||
| 	RegAllocBlock[I regalloc.InstrConstraint, m RegAllocFunctionMachine[I]] struct { |  | ||||||
| 		// f is the function this instruction belongs to. Used to reuse the regAllocFunctionImpl.predsSlice slice for Defs() and Uses(). |  | ||||||
| 		f                           *RegAllocFunction[I, m] |  | ||||||
| 		sb                          ssa.BasicBlock |  | ||||||
| 		l                           Label |  | ||||||
| 		begin, end                  I |  | ||||||
| 		loopNestingForestChildren   []ssa.BasicBlock |  | ||||||
| 		cur                         I |  | ||||||
| 		id                          int |  | ||||||
| 		cachedLastInstrForInsertion I |  | ||||||
| 	} |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // NewRegAllocFunction returns a new RegAllocFunction. |  | ||||||
| func NewRegAllocFunction[I regalloc.InstrConstraint, M RegAllocFunctionMachine[I]](m M, ssb ssa.Builder, c Compiler) *RegAllocFunction[I, M] { |  | ||||||
| 	return &RegAllocFunction[I, M]{ |  | ||||||
| 		m:   m, |  | ||||||
| 		ssb: ssb, |  | ||||||
| 		c:   c, |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // AddBlock adds a new block to the function. |  | ||||||
| func (f *RegAllocFunction[I, M]) AddBlock(sb ssa.BasicBlock, l Label, begin, end I) { |  | ||||||
| 	i := len(f.reversePostOrderBlocks) |  | ||||||
| 	f.reversePostOrderBlocks = append(f.reversePostOrderBlocks, RegAllocBlock[I, M]{ |  | ||||||
| 		f:     f, |  | ||||||
| 		sb:    sb, |  | ||||||
| 		l:     l, |  | ||||||
| 		begin: begin, |  | ||||||
| 		end:   end, |  | ||||||
| 		id:    int(sb.ID()), |  | ||||||
| 	}) |  | ||||||
| 	if len(f.labelToRegAllocBlockIndex) <= int(l) { |  | ||||||
| 		f.labelToRegAllocBlockIndex = append(f.labelToRegAllocBlockIndex, make([]int, int(l)-len(f.labelToRegAllocBlockIndex)+1)...) |  | ||||||
| 	} |  | ||||||
| 	f.labelToRegAllocBlockIndex[l] = i |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Reset resets the function for the next compilation. |  | ||||||
| func (f *RegAllocFunction[I, M]) Reset() { |  | ||||||
| 	f.reversePostOrderBlocks = f.reversePostOrderBlocks[:0] |  | ||||||
| 	f.iter = 0 |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // StoreRegisterAfter implements regalloc.Function StoreRegisterAfter. |  | ||||||
| func (f *RegAllocFunction[I, M]) StoreRegisterAfter(v regalloc.VReg, instr regalloc.Instr) { |  | ||||||
| 	m := f.m |  | ||||||
| 	m.InsertStoreRegisterAt(v, instr.(I), true) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // ReloadRegisterBefore implements regalloc.Function ReloadRegisterBefore. |  | ||||||
| func (f *RegAllocFunction[I, M]) ReloadRegisterBefore(v regalloc.VReg, instr regalloc.Instr) { |  | ||||||
| 	m := f.m |  | ||||||
| 	m.InsertReloadRegisterAt(v, instr.(I), false) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // ReloadRegisterAfter implements regalloc.Function ReloadRegisterAfter. |  | ||||||
| func (f *RegAllocFunction[I, M]) ReloadRegisterAfter(v regalloc.VReg, instr regalloc.Instr) { |  | ||||||
| 	m := f.m |  | ||||||
| 	m.InsertReloadRegisterAt(v, instr.(I), true) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // StoreRegisterBefore implements regalloc.Function StoreRegisterBefore. |  | ||||||
| func (f *RegAllocFunction[I, M]) StoreRegisterBefore(v regalloc.VReg, instr regalloc.Instr) { |  | ||||||
| 	m := f.m |  | ||||||
| 	m.InsertStoreRegisterAt(v, instr.(I), false) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // ClobberedRegisters implements regalloc.Function ClobberedRegisters. |  | ||||||
| func (f *RegAllocFunction[I, M]) ClobberedRegisters(regs []regalloc.VReg) { |  | ||||||
| 	f.m.ClobberedRegisters(regs) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // SwapBefore implements regalloc.Function SwapBefore. |  | ||||||
| func (f *RegAllocFunction[I, M]) SwapBefore(x1, x2, tmp regalloc.VReg, instr regalloc.Instr) { |  | ||||||
| 	f.m.Swap(instr.Prev().(I), x1, x2, tmp) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // PostOrderBlockIteratorBegin implements regalloc.Function PostOrderBlockIteratorBegin. |  | ||||||
| func (f *RegAllocFunction[I, M]) PostOrderBlockIteratorBegin() regalloc.Block { |  | ||||||
| 	f.iter = len(f.reversePostOrderBlocks) - 1 |  | ||||||
| 	return f.PostOrderBlockIteratorNext() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // PostOrderBlockIteratorNext implements regalloc.Function PostOrderBlockIteratorNext. |  | ||||||
| func (f *RegAllocFunction[I, M]) PostOrderBlockIteratorNext() regalloc.Block { |  | ||||||
| 	if f.iter < 0 { |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 	b := &f.reversePostOrderBlocks[f.iter] |  | ||||||
| 	f.iter-- |  | ||||||
| 	return b |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // ReversePostOrderBlockIteratorBegin implements regalloc.Function ReversePostOrderBlockIteratorBegin. |  | ||||||
| func (f *RegAllocFunction[I, M]) ReversePostOrderBlockIteratorBegin() regalloc.Block { |  | ||||||
| 	f.iter = 0 |  | ||||||
| 	return f.ReversePostOrderBlockIteratorNext() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // ReversePostOrderBlockIteratorNext implements regalloc.Function ReversePostOrderBlockIteratorNext. |  | ||||||
| func (f *RegAllocFunction[I, M]) ReversePostOrderBlockIteratorNext() regalloc.Block { |  | ||||||
| 	if f.iter >= len(f.reversePostOrderBlocks) { |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 	b := &f.reversePostOrderBlocks[f.iter] |  | ||||||
| 	f.iter++ |  | ||||||
| 	return b |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LoopNestingForestRoots implements regalloc.Function LoopNestingForestRoots. |  | ||||||
| func (f *RegAllocFunction[I, M]) LoopNestingForestRoots() int { |  | ||||||
| 	f.loopNestingForestRoots = f.ssb.LoopNestingForestRoots() |  | ||||||
| 	return len(f.loopNestingForestRoots) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LoopNestingForestRoot implements regalloc.Function LoopNestingForestRoot. |  | ||||||
| func (f *RegAllocFunction[I, M]) LoopNestingForestRoot(i int) regalloc.Block { |  | ||||||
| 	blk := f.loopNestingForestRoots[i] |  | ||||||
| 	l := f.m.SSABlockLabel(blk.ID()) |  | ||||||
| 	index := f.labelToRegAllocBlockIndex[l] |  | ||||||
| 	return &f.reversePostOrderBlocks[index] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // InsertMoveBefore implements regalloc.Function InsertMoveBefore. |  | ||||||
| func (f *RegAllocFunction[I, M]) InsertMoveBefore(dst, src regalloc.VReg, instr regalloc.Instr) { |  | ||||||
| 	f.m.InsertMoveBefore(dst, src, instr.(I)) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LowestCommonAncestor implements regalloc.Function LowestCommonAncestor. |  | ||||||
| func (f *RegAllocFunction[I, M]) LowestCommonAncestor(blk1, blk2 regalloc.Block) regalloc.Block { |  | ||||||
| 	ret := f.ssb.LowestCommonAncestor(blk1.(*RegAllocBlock[I, M]).sb, blk2.(*RegAllocBlock[I, M]).sb) |  | ||||||
| 	l := f.m.SSABlockLabel(ret.ID()) |  | ||||||
| 	index := f.labelToRegAllocBlockIndex[l] |  | ||||||
| 	return &f.reversePostOrderBlocks[index] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Idom implements regalloc.Function Idom. |  | ||||||
| func (f *RegAllocFunction[I, M]) Idom(blk regalloc.Block) regalloc.Block { |  | ||||||
| 	builder := f.ssb |  | ||||||
| 	idom := builder.Idom(blk.(*RegAllocBlock[I, M]).sb) |  | ||||||
| 	if idom == nil { |  | ||||||
| 		panic("BUG: idom must not be nil") |  | ||||||
| 	} |  | ||||||
| 	l := f.m.SSABlockLabel(idom.ID()) |  | ||||||
| 	index := f.labelToRegAllocBlockIndex[l] |  | ||||||
| 	return &f.reversePostOrderBlocks[index] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // ID implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) ID() int32 { return int32(r.id) } |  | ||||||
| 
 |  | ||||||
| // BlockParams implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) BlockParams(regs *[]regalloc.VReg) []regalloc.VReg { |  | ||||||
| 	c := r.f.c |  | ||||||
| 	*regs = (*regs)[:0] |  | ||||||
| 	for i := 0; i < r.sb.Params(); i++ { |  | ||||||
| 		v := c.VRegOf(r.sb.Param(i)) |  | ||||||
| 		*regs = append(*regs, v) |  | ||||||
| 	} |  | ||||||
| 	return *regs |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // InstrIteratorBegin implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) InstrIteratorBegin() regalloc.Instr { |  | ||||||
| 	r.cur = r.begin |  | ||||||
| 	return r.cur |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // InstrIteratorNext implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) InstrIteratorNext() regalloc.Instr { |  | ||||||
| 	for { |  | ||||||
| 		if r.cur == r.end { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 		instr := r.cur.Next() |  | ||||||
| 		r.cur = instr.(I) |  | ||||||
| 		if instr == nil { |  | ||||||
| 			return nil |  | ||||||
| 		} else if instr.AddedBeforeRegAlloc() { |  | ||||||
| 			// Only concerned about the instruction added before regalloc. |  | ||||||
| 			return instr |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // InstrRevIteratorBegin implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) InstrRevIteratorBegin() regalloc.Instr { |  | ||||||
| 	r.cur = r.end |  | ||||||
| 	return r.cur |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // InstrRevIteratorNext implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) InstrRevIteratorNext() regalloc.Instr { |  | ||||||
| 	for { |  | ||||||
| 		if r.cur == r.begin { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 		instr := r.cur.Prev() |  | ||||||
| 		r.cur = instr.(I) |  | ||||||
| 		if instr == nil { |  | ||||||
| 			return nil |  | ||||||
| 		} else if instr.AddedBeforeRegAlloc() { |  | ||||||
| 			// Only concerned about the instruction added before regalloc. |  | ||||||
| 			return instr |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // FirstInstr implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) FirstInstr() regalloc.Instr { |  | ||||||
| 	return r.begin |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // EndInstr implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) EndInstr() regalloc.Instr { |  | ||||||
| 	return r.end |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LastInstrForInsertion implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) LastInstrForInsertion() regalloc.Instr { |  | ||||||
| 	var nil I |  | ||||||
| 	if r.cachedLastInstrForInsertion == nil { |  | ||||||
| 		r.cachedLastInstrForInsertion = r.f.m.LastInstrForInsertion(r.begin, r.end) |  | ||||||
| 	} |  | ||||||
| 	return r.cachedLastInstrForInsertion |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Preds implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) Preds() int { return r.sb.Preds() } |  | ||||||
| 
 |  | ||||||
| // Pred implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) Pred(i int) regalloc.Block { |  | ||||||
| 	sb := r.sb |  | ||||||
| 	pred := sb.Pred(i) |  | ||||||
| 	l := r.f.m.SSABlockLabel(pred.ID()) |  | ||||||
| 	index := r.f.labelToRegAllocBlockIndex[l] |  | ||||||
| 	return &r.f.reversePostOrderBlocks[index] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Entry implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) Entry() bool { return r.sb.EntryBlock() } |  | ||||||
| 
 |  | ||||||
| // Succs implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) Succs() int { |  | ||||||
| 	return r.sb.Succs() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Succ implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) Succ(i int) regalloc.Block { |  | ||||||
| 	sb := r.sb |  | ||||||
| 	succ := sb.Succ(i) |  | ||||||
| 	if succ.ReturnBlock() { |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 	l := r.f.m.SSABlockLabel(succ.ID()) |  | ||||||
| 	index := r.f.labelToRegAllocBlockIndex[l] |  | ||||||
| 	return &r.f.reversePostOrderBlocks[index] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LoopHeader implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) LoopHeader() bool { |  | ||||||
| 	return r.sb.LoopHeader() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LoopNestingForestChildren implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) LoopNestingForestChildren() int { |  | ||||||
| 	r.loopNestingForestChildren = r.sb.LoopNestingForestChildren() |  | ||||||
| 	return len(r.loopNestingForestChildren) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // LoopNestingForestChild implements regalloc.Block. |  | ||||||
| func (r *RegAllocBlock[I, m]) LoopNestingForestChild(i int) regalloc.Block { |  | ||||||
| 	blk := r.loopNestingForestChildren[i] |  | ||||||
| 	l := r.f.m.SSABlockLabel(blk.ID()) |  | ||||||
| 	index := r.f.labelToRegAllocBlockIndex[l] |  | ||||||
| 	return &r.f.reversePostOrderBlocks[index] |  | ||||||
| } |  | ||||||
							
								
								
									
										84
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc/api.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										84
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc/api.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -4,104 +4,100 @@ import "fmt" | ||||||
| 
 | 
 | ||||||
| // These interfaces are implemented by ISA-specific backends to abstract away the details, and allow the register | // These interfaces are implemented by ISA-specific backends to abstract away the details, and allow the register | ||||||
| // allocators to work on any ISA. | // allocators to work on any ISA. | ||||||
| // |  | ||||||
| // TODO: the interfaces are not stabilized yet, especially x64 will need some changes. E.g. x64 has an addressing mode |  | ||||||
| // 	where index can be in memory. That kind of info will be useful to reduce the register pressure, and should be leveraged |  | ||||||
| // 	by the register allocators, like https://docs.rs/regalloc2/latest/regalloc2/enum.OperandConstraint.html |  | ||||||
| 
 | 
 | ||||||
| type ( | type ( | ||||||
| 	// Function is the top-level interface to do register allocation, which corresponds to a CFG containing | 	// Function is the top-level interface to do register allocation, which corresponds to a CFG containing | ||||||
| 	// Blocks(s). | 	// Blocks(s). | ||||||
| 	Function interface { | 	// | ||||||
|  | 	// I is the type of the instruction, and B is the type of the basic block. | ||||||
|  | 	Function[I Instr, B Block[I]] interface { | ||||||
| 		// PostOrderBlockIteratorBegin returns the first block in the post-order traversal of the CFG. | 		// PostOrderBlockIteratorBegin returns the first block in the post-order traversal of the CFG. | ||||||
| 		// In other words, the last blocks in the CFG will be returned first. | 		// In other words, the last blocks in the CFG will be returned first. | ||||||
| 		PostOrderBlockIteratorBegin() Block | 		PostOrderBlockIteratorBegin() B | ||||||
| 		// PostOrderBlockIteratorNext returns the next block in the post-order traversal of the CFG. | 		// PostOrderBlockIteratorNext returns the next block in the post-order traversal of the CFG. | ||||||
| 		PostOrderBlockIteratorNext() Block | 		PostOrderBlockIteratorNext() B | ||||||
| 		// ReversePostOrderBlockIteratorBegin returns the first block in the reverse post-order traversal of the CFG. | 		// ReversePostOrderBlockIteratorBegin returns the first block in the reverse post-order traversal of the CFG. | ||||||
| 		// In other words, the first blocks in the CFG will be returned first. | 		// In other words, the first blocks in the CFG will be returned first. | ||||||
| 		ReversePostOrderBlockIteratorBegin() Block | 		ReversePostOrderBlockIteratorBegin() B | ||||||
| 		// ReversePostOrderBlockIteratorNext returns the next block in the reverse post-order traversal of the CFG. | 		// ReversePostOrderBlockIteratorNext returns the next block in the reverse post-order traversal of the CFG. | ||||||
| 		ReversePostOrderBlockIteratorNext() Block | 		ReversePostOrderBlockIteratorNext() B | ||||||
| 		// ClobberedRegisters tell the clobbered registers by this function. | 		// ClobberedRegisters tell the clobbered registers by this function. | ||||||
| 		ClobberedRegisters([]VReg) | 		ClobberedRegisters([]VReg) | ||||||
| 		// LoopNestingForestRoots returns the number of roots of the loop nesting forest in a function. | 		// LoopNestingForestRoots returns the number of roots of the loop nesting forest in a function. | ||||||
| 		LoopNestingForestRoots() int | 		LoopNestingForestRoots() int | ||||||
| 		// LoopNestingForestRoot returns the i-th root of the loop nesting forest in a function. | 		// LoopNestingForestRoot returns the i-th root of the loop nesting forest in a function. | ||||||
| 		LoopNestingForestRoot(i int) Block | 		LoopNestingForestRoot(i int) B | ||||||
| 		// LowestCommonAncestor returns the lowest common ancestor of two blocks in the dominator tree. | 		// LowestCommonAncestor returns the lowest common ancestor of two blocks in the dominator tree. | ||||||
| 		LowestCommonAncestor(blk1, blk2 Block) Block | 		LowestCommonAncestor(blk1, blk2 B) B | ||||||
| 		// Idom returns the immediate dominator of the given block. | 		// Idom returns the immediate dominator of the given block. | ||||||
| 		Idom(blk Block) Block | 		Idom(blk B) B | ||||||
|  | 
 | ||||||
|  | 		// LoopNestingForestChild returns the i-th child of the block in the loop nesting forest. | ||||||
|  | 		LoopNestingForestChild(b B, i int) B | ||||||
|  | 		// Pred returns the i-th predecessor of the block in the CFG. | ||||||
|  | 		Pred(b B, i int) B | ||||||
|  | 		// Succ returns the i-th successor of the block in the CFG. | ||||||
|  | 		Succ(b B, i int) B | ||||||
|  | 		// BlockParams returns the virtual registers used as the parameters of this block. | ||||||
|  | 		BlockParams(B, *[]VReg) []VReg | ||||||
| 
 | 
 | ||||||
| 		// Followings are for rewriting the function. | 		// Followings are for rewriting the function. | ||||||
| 
 | 
 | ||||||
| 		// SwapAtEndOfBlock swaps the two virtual registers at the end of the given block. | 		// SwapBefore swaps the two virtual registers at the end of the given block. | ||||||
| 		SwapBefore(x1, x2, tmp VReg, instr Instr) | 		SwapBefore(x1, x2, tmp VReg, instr I) | ||||||
| 		// StoreRegisterBefore inserts store instruction(s) before the given instruction for the given virtual register. | 		// StoreRegisterBefore inserts store instruction(s) before the given instruction for the given virtual register. | ||||||
| 		StoreRegisterBefore(v VReg, instr Instr) | 		StoreRegisterBefore(v VReg, instr I) | ||||||
| 		// StoreRegisterAfter inserts store instruction(s) after the given instruction for the given virtual register. | 		// StoreRegisterAfter inserts store instruction(s) after the given instruction for the given virtual register. | ||||||
| 		StoreRegisterAfter(v VReg, instr Instr) | 		StoreRegisterAfter(v VReg, instr I) | ||||||
| 		// ReloadRegisterBefore inserts reload instruction(s) before the given instruction for the given virtual register. | 		// ReloadRegisterBefore inserts reload instruction(s) before the given instruction for the given virtual register. | ||||||
| 		ReloadRegisterBefore(v VReg, instr Instr) | 		ReloadRegisterBefore(v VReg, instr I) | ||||||
| 		// ReloadRegisterAfter inserts reload instruction(s) after the given instruction for the given virtual register. | 		// ReloadRegisterAfter inserts reload instruction(s) after the given instruction for the given virtual register. | ||||||
| 		ReloadRegisterAfter(v VReg, instr Instr) | 		ReloadRegisterAfter(v VReg, instr I) | ||||||
| 		// InsertMoveBefore inserts move instruction(s) before the given instruction for the given virtual registers. | 		// InsertMoveBefore inserts move instruction(s) before the given instruction for the given virtual registers. | ||||||
| 		InsertMoveBefore(dst, src VReg, instr Instr) | 		InsertMoveBefore(dst, src VReg, instr I) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Block is a basic block in the CFG of a function, and it consists of multiple instructions, and predecessor Block(s). | 	// Block is a basic block in the CFG of a function, and it consists of multiple instructions, and predecessor Block(s). | ||||||
| 	Block interface { | 	// Right now, this corresponds to a ssa.BasicBlock lowered to the machine level. | ||||||
|  | 	Block[I Instr] interface { | ||||||
|  | 		comparable | ||||||
| 		// ID returns the unique identifier of this block which is ordered in the reverse post-order traversal of the CFG. | 		// ID returns the unique identifier of this block which is ordered in the reverse post-order traversal of the CFG. | ||||||
| 		ID() int32 | 		ID() int32 | ||||||
| 		// BlockParams returns the virtual registers used as the parameters of this block. |  | ||||||
| 		BlockParams(*[]VReg) []VReg |  | ||||||
| 		// InstrIteratorBegin returns the first instruction in this block. Instructions added after lowering must be skipped. | 		// InstrIteratorBegin returns the first instruction in this block. Instructions added after lowering must be skipped. | ||||||
| 		// Note: multiple Instr(s) will not be held at the same time, so it's safe to use the same impl for the return Instr. | 		// Note: multiple Instr(s) will not be held at the same time, so it's safe to use the same impl for the return Instr. | ||||||
| 		InstrIteratorBegin() Instr | 		InstrIteratorBegin() I | ||||||
| 		// InstrIteratorNext returns the next instruction in this block. Instructions added after lowering must be skipped. | 		// InstrIteratorNext returns the next instruction in this block. Instructions added after lowering must be skipped. | ||||||
| 		// Note: multiple Instr(s) will not be held at the same time, so it's safe to use the same impl for the return Instr. | 		// Note: multiple Instr(s) will not be held at the same time, so it's safe to use the same impl for the return Instr. | ||||||
| 		InstrIteratorNext() Instr | 		InstrIteratorNext() I | ||||||
| 		// InstrRevIteratorBegin is the same as InstrIteratorBegin, but in the reverse order. | 		// InstrRevIteratorBegin is the same as InstrIteratorBegin, but in the reverse order. | ||||||
| 		InstrRevIteratorBegin() Instr | 		InstrRevIteratorBegin() I | ||||||
| 		// InstrRevIteratorNext is the same as InstrIteratorNext, but in the reverse order. | 		// InstrRevIteratorNext is the same as InstrIteratorNext, but in the reverse order. | ||||||
| 		InstrRevIteratorNext() Instr | 		InstrRevIteratorNext() I | ||||||
| 		// FirstInstr returns the fist instruction in this block where instructions will be inserted after it. | 		// FirstInstr returns the fist instruction in this block where instructions will be inserted after it. | ||||||
| 		FirstInstr() Instr | 		FirstInstr() I | ||||||
| 		// EndInstr returns the end instruction in this block. |  | ||||||
| 		EndInstr() Instr |  | ||||||
| 		// LastInstrForInsertion returns the last instruction in this block where instructions will be inserted before it. | 		// LastInstrForInsertion returns the last instruction in this block where instructions will be inserted before it. | ||||||
| 		// Such insertions only happen when we need to insert spill/reload instructions to adjust the merge edges. | 		// Such insertions only happen when we need to insert spill/reload instructions to adjust the merge edges. | ||||||
| 		// At the time of register allocation, all the critical edges are already split, so there is no need | 		// At the time of register allocation, all the critical edges are already split, so there is no need | ||||||
| 		// to worry about the case where branching instruction has multiple successors. | 		// to worry about the case where branching instruction has multiple successors. | ||||||
| 		// Therefore, usually, it is the nop instruction, but if the block ends with an unconditional branching, then it returns | 		// Therefore, usually, it is the nop instruction, but if the block ends with an unconditional branching, then it returns | ||||||
| 		// the unconditional branch, not the nop. In other words it is either nop or unconditional branch. | 		// the unconditional branch, not the nop. In other words it is either nop or unconditional branch. | ||||||
| 		LastInstrForInsertion() Instr | 		LastInstrForInsertion() I | ||||||
| 		// Preds returns the number of predecessors of this block in the CFG. | 		// Preds returns the number of predecessors of this block in the CFG. | ||||||
| 		Preds() int | 		Preds() int | ||||||
| 		// Pred returns the i-th predecessor of this block in the CFG. |  | ||||||
| 		Pred(i int) Block |  | ||||||
| 		// Entry returns true if the block is for the entry block. | 		// Entry returns true if the block is for the entry block. | ||||||
| 		Entry() bool | 		Entry() bool | ||||||
| 		// Succs returns the number of successors of this block in the CFG. | 		// Succs returns the number of successors of this block in the CFG. | ||||||
| 		Succs() int | 		Succs() int | ||||||
| 		// Succ returns the i-th successor of this block in the CFG. |  | ||||||
| 		Succ(i int) Block |  | ||||||
| 		// LoopHeader returns true if this block is a loop header. | 		// LoopHeader returns true if this block is a loop header. | ||||||
| 		LoopHeader() bool | 		LoopHeader() bool | ||||||
| 		// LoopNestingForestChildren returns the number of children of this block in the loop nesting forest. | 		// LoopNestingForestChildren returns the number of children of this block in the loop nesting forest. | ||||||
| 		LoopNestingForestChildren() int | 		LoopNestingForestChildren() int | ||||||
| 		// LoopNestingForestChild returns the i-th child of this block in the loop nesting forest. |  | ||||||
| 		LoopNestingForestChild(i int) Block |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Instr is an instruction in a block, abstracting away the underlying ISA. | 	// Instr is an instruction in a block, abstracting away the underlying ISA. | ||||||
| 	Instr interface { | 	Instr interface { | ||||||
|  | 		comparable | ||||||
| 		fmt.Stringer | 		fmt.Stringer | ||||||
| 		// Next returns the next instruction in the same block. |  | ||||||
| 		Next() Instr |  | ||||||
| 		// Prev returns the previous instruction in the same block. |  | ||||||
| 		Prev() Instr |  | ||||||
| 		// Defs returns the virtual registers defined by this instruction. | 		// Defs returns the virtual registers defined by this instruction. | ||||||
| 		Defs(*[]VReg) []VReg | 		Defs(*[]VReg) []VReg | ||||||
| 		// Uses returns the virtual registers used by this instruction. | 		// Uses returns the virtual registers used by this instruction. | ||||||
|  | @ -124,13 +120,5 @@ type ( | ||||||
| 		IsIndirectCall() bool | 		IsIndirectCall() bool | ||||||
| 		// IsReturn returns true if this instruction is a return instruction. | 		// IsReturn returns true if this instruction is a return instruction. | ||||||
| 		IsReturn() bool | 		IsReturn() bool | ||||||
| 		// AddedBeforeRegAlloc returns true if this instruction is added before register allocation. |  | ||||||
| 		AddedBeforeRegAlloc() bool |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// InstrConstraint is an interface for arch-specific instruction constraints. |  | ||||||
| 	InstrConstraint interface { |  | ||||||
| 		comparable |  | ||||||
| 		Instr |  | ||||||
| 	} | 	} | ||||||
| ) | ) | ||||||
|  |  | ||||||
							
								
								
									
										461
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc/regalloc.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										461
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc/regalloc.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										34
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc/regset.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										34
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc/regset.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -46,52 +46,50 @@ func (rs RegSet) Range(f func(allocatedRealReg RealReg)) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type regInUseSet [64]VReg | type regInUseSet[I Instr, B Block[I], F Function[I, B]] [64]*vrState[I, B, F] | ||||||
| 
 | 
 | ||||||
| func newRegInUseSet() regInUseSet { | func newRegInUseSet[I Instr, B Block[I], F Function[I, B]]() regInUseSet[I, B, F] { | ||||||
| 	var ret regInUseSet | 	var ret regInUseSet[I, B, F] | ||||||
| 	ret.reset() | 	ret.reset() | ||||||
| 	return ret | 	return ret | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (rs *regInUseSet) reset() { | func (rs *regInUseSet[I, B, F]) reset() { | ||||||
| 	for i := range rs { | 	clear(rs[:]) | ||||||
| 		rs[i] = VRegInvalid |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (rs *regInUseSet) format(info *RegisterInfo) string { //nolint:unused | func (rs *regInUseSet[I, B, F]) format(info *RegisterInfo) string { //nolint:unused | ||||||
| 	var ret []string | 	var ret []string | ||||||
| 	for i, vr := range rs { | 	for i, vr := range rs { | ||||||
| 		if vr != VRegInvalid { | 		if vr != nil { | ||||||
| 			ret = append(ret, fmt.Sprintf("(%s->v%d)", info.RealRegName(RealReg(i)), vr.ID())) | 			ret = append(ret, fmt.Sprintf("(%s->v%d)", info.RealRegName(RealReg(i)), vr.v.ID())) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	return strings.Join(ret, ", ") | 	return strings.Join(ret, ", ") | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (rs *regInUseSet) has(r RealReg) bool { | func (rs *regInUseSet[I, B, F]) has(r RealReg) bool { | ||||||
| 	return r < 64 && rs[r] != VRegInvalid | 	return r < 64 && rs[r] != nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (rs *regInUseSet) get(r RealReg) VReg { | func (rs *regInUseSet[I, B, F]) get(r RealReg) *vrState[I, B, F] { | ||||||
| 	return rs[r] | 	return rs[r] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (rs *regInUseSet) remove(r RealReg) { | func (rs *regInUseSet[I, B, F]) remove(r RealReg) { | ||||||
| 	rs[r] = VRegInvalid | 	rs[r] = nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (rs *regInUseSet) add(r RealReg, vr VReg) { | func (rs *regInUseSet[I, B, F]) add(r RealReg, vr *vrState[I, B, F]) { | ||||||
| 	if r >= 64 { | 	if r >= 64 { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	rs[r] = vr | 	rs[r] = vr | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (rs *regInUseSet) range_(f func(allocatedRealReg RealReg, vr VReg)) { | func (rs *regInUseSet[I, B, F]) range_(f func(allocatedRealReg RealReg, vr *vrState[I, B, F])) { | ||||||
| 	for i, vr := range rs { | 	for i, vr := range rs { | ||||||
| 		if vr != VRegInvalid { | 		if vr != nil { | ||||||
| 			f(RealReg(i), vr) | 			f(RealReg(i), vr) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
							
								
								
									
										30
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/vdef.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										30
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/backend/vdef.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,43 +1,19 @@ | ||||||
| package backend | package backend | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc" |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" | 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // SSAValueDefinition represents a definition of an SSA value. | // SSAValueDefinition represents a definition of an SSA value. | ||||||
| type SSAValueDefinition struct { | type SSAValueDefinition struct { | ||||||
| 	// BlockParamValue is valid if Instr == nil | 	V ssa.Value | ||||||
| 	BlockParamValue ssa.Value |  | ||||||
| 
 |  | ||||||
| 	// BlkParamVReg is valid if Instr == nil |  | ||||||
| 	BlkParamVReg regalloc.VReg |  | ||||||
| 
 |  | ||||||
| 	// Instr is not nil if this is a definition from an instruction. | 	// Instr is not nil if this is a definition from an instruction. | ||||||
| 	Instr *ssa.Instruction | 	Instr *ssa.Instruction | ||||||
| 	// N is the index of the return value in the instr's return values list. |  | ||||||
| 	N int |  | ||||||
| 	// RefCount is the number of references to the result. | 	// RefCount is the number of references to the result. | ||||||
| 	RefCount int | 	RefCount uint32 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // IsFromInstr returns true if this definition is from an instruction. | ||||||
| func (d *SSAValueDefinition) IsFromInstr() bool { | func (d *SSAValueDefinition) IsFromInstr() bool { | ||||||
| 	return d.Instr != nil | 	return d.Instr != nil | ||||||
| } | } | ||||||
| 
 |  | ||||||
| func (d *SSAValueDefinition) IsFromBlockParam() bool { |  | ||||||
| 	return d.Instr == nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (d *SSAValueDefinition) SSAValue() ssa.Value { |  | ||||||
| 	if d.IsFromBlockParam() { |  | ||||||
| 		return d.BlockParamValue |  | ||||||
| 	} else { |  | ||||||
| 		r, rs := d.Instr.Returns() |  | ||||||
| 		if d.N == 0 { |  | ||||||
| 			return r |  | ||||||
| 		} else { |  | ||||||
| 			return rs[d.N-1] |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/call_engine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/call_engine.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -554,17 +554,21 @@ func (c *callEngine) cloneStack(l uintptr) (newSP, newFP, newTop uintptr, newSta | ||||||
| 	// Copy the existing contents in the previous Go-allocated stack into the new one. | 	// Copy the existing contents in the previous Go-allocated stack into the new one. | ||||||
| 	var prevStackAligned, newStackAligned []byte | 	var prevStackAligned, newStackAligned []byte | ||||||
| 	{ | 	{ | ||||||
|  | 		//nolint:staticcheck | ||||||
| 		sh := (*reflect.SliceHeader)(unsafe.Pointer(&prevStackAligned)) | 		sh := (*reflect.SliceHeader)(unsafe.Pointer(&prevStackAligned)) | ||||||
| 		sh.Data = c.stackTop - relSp | 		sh.Data = c.stackTop - relSp | ||||||
| 		setSliceLimits(sh, relSp, relSp) | 		sh.Len = int(relSp) | ||||||
|  | 		sh.Cap = int(relSp) | ||||||
| 	} | 	} | ||||||
| 	newTop = alignedStackTop(newStack) | 	newTop = alignedStackTop(newStack) | ||||||
| 	{ | 	{ | ||||||
| 		newSP = newTop - relSp | 		newSP = newTop - relSp | ||||||
| 		newFP = newTop - relFp | 		newFP = newTop - relFp | ||||||
|  | 		//nolint:staticcheck | ||||||
| 		sh := (*reflect.SliceHeader)(unsafe.Pointer(&newStackAligned)) | 		sh := (*reflect.SliceHeader)(unsafe.Pointer(&newStackAligned)) | ||||||
| 		sh.Data = newSP | 		sh.Data = newSP | ||||||
| 		setSliceLimits(sh, relSp, relSp) | 		sh.Len = int(relSp) | ||||||
|  | 		sh.Cap = int(relSp) | ||||||
| 	} | 	} | ||||||
| 	copy(newStackAligned, prevStackAligned) | 	copy(newStackAligned, prevStackAligned) | ||||||
| 	return | 	return | ||||||
|  |  | ||||||
							
								
								
									
										14
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/frontend.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/frontend.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -275,7 +275,7 @@ func (c *Compiler) LowerToSSA() { | ||||||
| 		builder.DefineVariable(variable, value, entryBlock) | 		builder.DefineVariable(variable, value, entryBlock) | ||||||
| 		c.setWasmLocalVariable(wasm.Index(i), variable) | 		c.setWasmLocalVariable(wasm.Index(i), variable) | ||||||
| 	} | 	} | ||||||
| 	c.declareWasmLocals(entryBlock) | 	c.declareWasmLocals() | ||||||
| 	c.declareNecessaryVariables() | 	c.declareNecessaryVariables() | ||||||
| 
 | 
 | ||||||
| 	c.lowerBody(entryBlock) | 	c.lowerBody(entryBlock) | ||||||
|  | @ -295,7 +295,7 @@ func (c *Compiler) setWasmLocalVariable(index wasm.Index, variable ssa.Variable) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // declareWasmLocals declares the SSA variables for the Wasm locals. | // declareWasmLocals declares the SSA variables for the Wasm locals. | ||||||
| func (c *Compiler) declareWasmLocals(entry ssa.BasicBlock) { | func (c *Compiler) declareWasmLocals() { | ||||||
| 	localCount := wasm.Index(len(c.wasmFunctionTyp.Params)) | 	localCount := wasm.Index(len(c.wasmFunctionTyp.Params)) | ||||||
| 	for i, typ := range c.wasmFunctionLocalTypes { | 	for i, typ := range c.wasmFunctionLocalTypes { | ||||||
| 		st := WasmTypeToSSAType(typ) | 		st := WasmTypeToSSAType(typ) | ||||||
|  | @ -543,11 +543,11 @@ func (c *Compiler) initializeCurrentBlockKnownBounds() { | ||||||
| 				cb := &c.bounds[i][c.pointers[i]] | 				cb := &c.bounds[i][c.pointers[i]] | ||||||
| 				if cb.id != smallestID { | 				if cb.id != smallestID { | ||||||
| 					same = false | 					same = false | ||||||
| 					break |  | ||||||
| 				} else { | 				} else { | ||||||
| 					if cb.bound < minBound { | 					if cb.bound < minBound { | ||||||
| 						minBound = cb.bound | 						minBound = cb.bound | ||||||
| 					} | 					} | ||||||
|  | 					c.pointers[i]++ | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
|  | @ -555,14 +555,6 @@ func (c *Compiler) initializeCurrentBlockKnownBounds() { | ||||||
| 				// Absolute address cannot be used in the intersection since the value might be only defined in one of the predecessors. | 				// Absolute address cannot be used in the intersection since the value might be only defined in one of the predecessors. | ||||||
| 				c.recordKnownSafeBound(smallestID, minBound, ssa.ValueInvalid) | 				c.recordKnownSafeBound(smallestID, minBound, ssa.ValueInvalid) | ||||||
| 			} | 			} | ||||||
| 
 |  | ||||||
| 			// Move pointer(s) for the smallest ID forward (if same, move all). |  | ||||||
| 			for i := 0; i < preds; i++ { |  | ||||||
| 				cb := &c.bounds[i][c.pointers[i]] |  | ||||||
| 				if cb.id == smallestID { |  | ||||||
| 					c.pointers[i]++ |  | ||||||
| 				} |  | ||||||
| 			} |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										17
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/lower.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/lower.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1538,8 +1538,7 @@ func (c *Compiler) lowerCurrentOpcode() { | ||||||
| 		builder.SetCurrentBlock(elseBlk) | 		builder.SetCurrentBlock(elseBlk) | ||||||
| 
 | 
 | ||||||
| 	case wasm.OpcodeBrTable: | 	case wasm.OpcodeBrTable: | ||||||
| 		labels := state.tmpForBrTable | 		labels := state.tmpForBrTable[:0] | ||||||
| 		labels = labels[:0] |  | ||||||
| 		labelCount := c.readI32u() | 		labelCount := c.readI32u() | ||||||
| 		for i := 0; i < int(labelCount); i++ { | 		for i := 0; i < int(labelCount); i++ { | ||||||
| 			labels = append(labels, c.readI32u()) | 			labels = append(labels, c.readI32u()) | ||||||
|  | @ -1557,6 +1556,7 @@ func (c *Compiler) lowerCurrentOpcode() { | ||||||
| 		} else { | 		} else { | ||||||
| 			c.lowerBrTable(labels, index) | 			c.lowerBrTable(labels, index) | ||||||
| 		} | 		} | ||||||
|  | 		state.tmpForBrTable = labels // reuse the temporary slice for next use. | ||||||
| 		state.unreachable = true | 		state.unreachable = true | ||||||
| 
 | 
 | ||||||
| 	case wasm.OpcodeNop: | 	case wasm.OpcodeNop: | ||||||
|  | @ -4068,13 +4068,14 @@ func (c *Compiler) lowerBrTable(labels []uint32, index ssa.Value) { | ||||||
| 		numArgs = len(f.blockType.Results) | 		numArgs = len(f.blockType.Results) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	targets := make([]ssa.BasicBlock, len(labels)) | 	varPool := builder.VarLengthPool() | ||||||
|  | 	trampolineBlockIDs := varPool.Allocate(len(labels)) | ||||||
| 
 | 
 | ||||||
| 	// We need trampoline blocks since depending on the target block structure, we might end up inserting moves before jumps, | 	// We need trampoline blocks since depending on the target block structure, we might end up inserting moves before jumps, | ||||||
| 	// which cannot be done with br_table. Instead, we can do such per-block moves in the trampoline blocks. | 	// which cannot be done with br_table. Instead, we can do such per-block moves in the trampoline blocks. | ||||||
| 	// At the linking phase (very end of the backend), we can remove the unnecessary jumps, and therefore no runtime overhead. | 	// At the linking phase (very end of the backend), we can remove the unnecessary jumps, and therefore no runtime overhead. | ||||||
| 	currentBlk := builder.CurrentBlock() | 	currentBlk := builder.CurrentBlock() | ||||||
| 	for i, l := range labels { | 	for _, l := range labels { | ||||||
| 		// Args are always on the top of the stack. Note that we should not share the args slice | 		// Args are always on the top of the stack. Note that we should not share the args slice | ||||||
| 		// among the jump instructions since the args are modified during passes (e.g. redundant phi elimination). | 		// among the jump instructions since the args are modified during passes (e.g. redundant phi elimination). | ||||||
| 		args := c.nPeekDup(numArgs) | 		args := c.nPeekDup(numArgs) | ||||||
|  | @ -4082,17 +4083,17 @@ func (c *Compiler) lowerBrTable(labels []uint32, index ssa.Value) { | ||||||
| 		trampoline := builder.AllocateBasicBlock() | 		trampoline := builder.AllocateBasicBlock() | ||||||
| 		builder.SetCurrentBlock(trampoline) | 		builder.SetCurrentBlock(trampoline) | ||||||
| 		c.insertJumpToBlock(args, targetBlk) | 		c.insertJumpToBlock(args, targetBlk) | ||||||
| 		targets[i] = trampoline | 		trampolineBlockIDs = trampolineBlockIDs.Append(builder.VarLengthPool(), ssa.Value(trampoline.ID())) | ||||||
| 	} | 	} | ||||||
| 	builder.SetCurrentBlock(currentBlk) | 	builder.SetCurrentBlock(currentBlk) | ||||||
| 
 | 
 | ||||||
| 	// If the target block has no arguments, we can just jump to the target block. | 	// If the target block has no arguments, we can just jump to the target block. | ||||||
| 	brTable := builder.AllocateInstruction() | 	brTable := builder.AllocateInstruction() | ||||||
| 	brTable.AsBrTable(index, targets) | 	brTable.AsBrTable(index, trampolineBlockIDs) | ||||||
| 	builder.InsertInstruction(brTable) | 	builder.InsertInstruction(brTable) | ||||||
| 
 | 
 | ||||||
| 	for _, trampoline := range targets { | 	for _, trampolineID := range trampolineBlockIDs.View() { | ||||||
| 		builder.Seal(trampoline) | 		builder.Seal(builder.BasicBlock(ssa.BasicBlockID(trampolineID))) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										2
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/sort_id.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/sort_id.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,5 +1,3 @@ | ||||||
| //go:build go1.21 |  | ||||||
| 
 |  | ||||||
| package frontend | package frontend | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  |  | ||||||
							
								
								
									
										17
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/sort_id_old.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/frontend/sort_id_old.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,17 +0,0 @@ | ||||||
| //go:build !go1.21 |  | ||||||
| 
 |  | ||||||
| // TODO: delete after the floor Go version is 1.21 |  | ||||||
| 
 |  | ||||||
| package frontend |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"sort" |  | ||||||
| 
 |  | ||||||
| 	"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| func sortSSAValueIDs(IDs []ssa.ValueID) { |  | ||||||
| 	sort.SliceStable(IDs, func(i, j int) bool { |  | ||||||
| 		return int(IDs[i]) < int(IDs[j]) |  | ||||||
| 	}) |  | ||||||
| } |  | ||||||
							
								
								
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/hostmodule.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/hostmodule.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -16,6 +16,7 @@ func buildHostModuleOpaque(m *wasm.Module, listeners []experimental.FunctionList | ||||||
| 	binary.LittleEndian.PutUint64(ret[0:], uint64(uintptr(unsafe.Pointer(m)))) | 	binary.LittleEndian.PutUint64(ret[0:], uint64(uintptr(unsafe.Pointer(m)))) | ||||||
| 
 | 
 | ||||||
| 	if len(listeners) > 0 { | 	if len(listeners) > 0 { | ||||||
|  | 		//nolint:staticcheck | ||||||
| 		sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&listeners)) | 		sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&listeners)) | ||||||
| 		binary.LittleEndian.PutUint64(ret[8:], uint64(sliceHeader.Data)) | 		binary.LittleEndian.PutUint64(ret[8:], uint64(sliceHeader.Data)) | ||||||
| 		binary.LittleEndian.PutUint64(ret[16:], uint64(sliceHeader.Len)) | 		binary.LittleEndian.PutUint64(ret[16:], uint64(sliceHeader.Len)) | ||||||
|  | @ -33,6 +34,7 @@ func buildHostModuleOpaque(m *wasm.Module, listeners []experimental.FunctionList | ||||||
| 
 | 
 | ||||||
| func hostModuleFromOpaque(opaqueBegin uintptr) *wasm.Module { | func hostModuleFromOpaque(opaqueBegin uintptr) *wasm.Module { | ||||||
| 	var opaqueViewOverSlice []byte | 	var opaqueViewOverSlice []byte | ||||||
|  | 	//nolint:staticcheck | ||||||
| 	sh := (*reflect.SliceHeader)(unsafe.Pointer(&opaqueViewOverSlice)) | 	sh := (*reflect.SliceHeader)(unsafe.Pointer(&opaqueViewOverSlice)) | ||||||
| 	sh.Data = opaqueBegin | 	sh.Data = opaqueBegin | ||||||
| 	sh.Len = 32 | 	sh.Len = 32 | ||||||
|  | @ -42,6 +44,7 @@ func hostModuleFromOpaque(opaqueBegin uintptr) *wasm.Module { | ||||||
| 
 | 
 | ||||||
| func hostModuleListenersSliceFromOpaque(opaqueBegin uintptr) []experimental.FunctionListener { | func hostModuleListenersSliceFromOpaque(opaqueBegin uintptr) []experimental.FunctionListener { | ||||||
| 	var opaqueViewOverSlice []byte | 	var opaqueViewOverSlice []byte | ||||||
|  | 	//nolint:staticcheck | ||||||
| 	sh := (*reflect.SliceHeader)(unsafe.Pointer(&opaqueViewOverSlice)) | 	sh := (*reflect.SliceHeader)(unsafe.Pointer(&opaqueViewOverSlice)) | ||||||
| 	sh.Data = opaqueBegin | 	sh.Data = opaqueBegin | ||||||
| 	sh.Len = 32 | 	sh.Len = 32 | ||||||
|  | @ -51,9 +54,11 @@ func hostModuleListenersSliceFromOpaque(opaqueBegin uintptr) []experimental.Func | ||||||
| 	l := binary.LittleEndian.Uint64(opaqueViewOverSlice[16:]) | 	l := binary.LittleEndian.Uint64(opaqueViewOverSlice[16:]) | ||||||
| 	c := binary.LittleEndian.Uint64(opaqueViewOverSlice[24:]) | 	c := binary.LittleEndian.Uint64(opaqueViewOverSlice[24:]) | ||||||
| 	var ret []experimental.FunctionListener | 	var ret []experimental.FunctionListener | ||||||
|  | 	//nolint:staticcheck | ||||||
| 	sh = (*reflect.SliceHeader)(unsafe.Pointer(&ret)) | 	sh = (*reflect.SliceHeader)(unsafe.Pointer(&ret)) | ||||||
| 	sh.Data = uintptr(b) | 	sh.Data = uintptr(b) | ||||||
| 	setSliceLimits(sh, uintptr(l), uintptr(c)) | 	sh.Len = int(l) | ||||||
|  | 	sh.Cap = int(c) | ||||||
| 	return ret | 	return ret | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -62,6 +67,7 @@ func hostModuleGoFuncFromOpaque[T any](index int, opaqueBegin uintptr) T { | ||||||
| 	ptr := opaqueBegin + offset | 	ptr := opaqueBegin + offset | ||||||
| 
 | 
 | ||||||
| 	var opaqueViewOverFunction []byte | 	var opaqueViewOverFunction []byte | ||||||
|  | 	//nolint:staticcheck | ||||||
| 	sh := (*reflect.SliceHeader)(unsafe.Pointer(&opaqueViewOverFunction)) | 	sh := (*reflect.SliceHeader)(unsafe.Pointer(&opaqueViewOverFunction)) | ||||||
| 	sh.Data = ptr | 	sh.Data = ptr | ||||||
| 	sh.Len = 16 | 	sh.Len = 16 | ||||||
|  |  | ||||||
							
								
								
									
										11
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/reflect.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/reflect.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,11 +0,0 @@ | ||||||
| //go:build !tinygo |  | ||||||
| 
 |  | ||||||
| package wazevo |  | ||||||
| 
 |  | ||||||
| import "reflect" |  | ||||||
| 
 |  | ||||||
| // setSliceLimits sets both Cap and Len for the given reflected slice. |  | ||||||
| func setSliceLimits(s *reflect.SliceHeader, l, c uintptr) { |  | ||||||
| 	s.Len = int(l) |  | ||||||
| 	s.Cap = int(c) |  | ||||||
| } |  | ||||||
							
								
								
									
										11
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/reflect_tinygo.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/reflect_tinygo.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,11 +0,0 @@ | ||||||
| //go:build tinygo |  | ||||||
| 
 |  | ||||||
| package wazevo |  | ||||||
| 
 |  | ||||||
| import "reflect" |  | ||||||
| 
 |  | ||||||
| // setSliceLimits sets both Cap and Len for the given reflected slice. |  | ||||||
| func setSliceLimits(s *reflect.SliceHeader, l, c uintptr) { |  | ||||||
| 	s.Len = l |  | ||||||
| 	s.Cap = c |  | ||||||
| } |  | ||||||
							
								
								
									
										43
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										43
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -34,9 +34,6 @@ type BasicBlock interface { | ||||||
| 	// The returned Value is the definition of the param in this block. | 	// The returned Value is the definition of the param in this block. | ||||||
| 	Param(i int) Value | 	Param(i int) Value | ||||||
| 
 | 
 | ||||||
| 	// InsertInstruction inserts an instruction that implements Value into the tail of this block. |  | ||||||
| 	InsertInstruction(raw *Instruction) |  | ||||||
| 
 |  | ||||||
| 	// Root returns the root instruction of this block. | 	// Root returns the root instruction of this block. | ||||||
| 	Root() *Instruction | 	Root() *Instruction | ||||||
| 
 | 
 | ||||||
|  | @ -81,7 +78,7 @@ type ( | ||||||
| 		rootInstr, currentInstr *Instruction | 		rootInstr, currentInstr *Instruction | ||||||
| 		// params are Values that represent parameters to a basicBlock. | 		// params are Values that represent parameters to a basicBlock. | ||||||
| 		// Each parameter can be considered as an output of PHI instruction in traditional SSA. | 		// Each parameter can be considered as an output of PHI instruction in traditional SSA. | ||||||
| 		params  []Value | 		params  Values | ||||||
| 		preds   []basicBlockPredecessorInfo | 		preds   []basicBlockPredecessorInfo | ||||||
| 		success []*basicBlock | 		success []*basicBlock | ||||||
| 		// singlePred is the alias to preds[0] for fast lookup, and only set after Seal is called. | 		// singlePred is the alias to preds[0] for fast lookup, and only set after Seal is called. | ||||||
|  | @ -179,23 +176,23 @@ func (bb *basicBlock) ReturnBlock() bool { | ||||||
| // AddParam implements BasicBlock.AddParam. | // AddParam implements BasicBlock.AddParam. | ||||||
| func (bb *basicBlock) AddParam(b Builder, typ Type) Value { | func (bb *basicBlock) AddParam(b Builder, typ Type) Value { | ||||||
| 	paramValue := b.allocateValue(typ) | 	paramValue := b.allocateValue(typ) | ||||||
| 	bb.params = append(bb.params, paramValue) | 	bb.params = bb.params.Append(&b.(*builder).varLengthPool, paramValue) | ||||||
| 	return paramValue | 	return paramValue | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // addParamOn adds a parameter to this block whose value is already allocated. | // addParamOn adds a parameter to this block whose value is already allocated. | ||||||
| func (bb *basicBlock) addParamOn(value Value) { | func (bb *basicBlock) addParamOn(b *builder, value Value) { | ||||||
| 	bb.params = append(bb.params, value) | 	bb.params = bb.params.Append(&b.varLengthPool, value) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Params implements BasicBlock.Params. | // Params implements BasicBlock.Params. | ||||||
| func (bb *basicBlock) Params() int { | func (bb *basicBlock) Params() int { | ||||||
| 	return len(bb.params) | 	return len(bb.params.View()) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Param implements BasicBlock.Param. | // Param implements BasicBlock.Param. | ||||||
| func (bb *basicBlock) Param(i int) Value { | func (bb *basicBlock) Param(i int) Value { | ||||||
| 	return bb.params[i] | 	return bb.params.View()[i] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Valid implements BasicBlock.Valid. | // Valid implements BasicBlock.Valid. | ||||||
|  | @ -208,8 +205,8 @@ func (bb *basicBlock) Sealed() bool { | ||||||
| 	return bb.sealed | 	return bb.sealed | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // InsertInstruction implements BasicBlock.InsertInstruction. | // insertInstruction implements BasicBlock.InsertInstruction. | ||||||
| func (bb *basicBlock) InsertInstruction(next *Instruction) { | func (bb *basicBlock) insertInstruction(b *builder, next *Instruction) { | ||||||
| 	current := bb.currentInstr | 	current := bb.currentInstr | ||||||
| 	if current != nil { | 	if current != nil { | ||||||
| 		current.next = next | 		current.next = next | ||||||
|  | @ -221,12 +218,12 @@ func (bb *basicBlock) InsertInstruction(next *Instruction) { | ||||||
| 
 | 
 | ||||||
| 	switch next.opcode { | 	switch next.opcode { | ||||||
| 	case OpcodeJump, OpcodeBrz, OpcodeBrnz: | 	case OpcodeJump, OpcodeBrz, OpcodeBrnz: | ||||||
| 		target := next.blk.(*basicBlock) | 		target := BasicBlockID(next.rValue) | ||||||
| 		target.addPred(bb, next) | 		b.basicBlock(target).addPred(bb, next) | ||||||
| 	case OpcodeBrTable: | 	case OpcodeBrTable: | ||||||
| 		for _, _target := range next.targets { | 		for _, _target := range next.rValues.View() { | ||||||
| 			target := _target.(*basicBlock) | 			target := BasicBlockID(_target) | ||||||
| 			target.addPred(bb, next) | 			b.basicBlock(target).addPred(bb, next) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | @ -268,7 +265,7 @@ func (bb *basicBlock) Tail() *Instruction { | ||||||
| 
 | 
 | ||||||
| // reset resets the basicBlock to its initial state so that it can be reused for another function. | // reset resets the basicBlock to its initial state so that it can be reused for another function. | ||||||
| func resetBasicBlock(bb *basicBlock) { | func resetBasicBlock(bb *basicBlock) { | ||||||
| 	bb.params = bb.params[:0] | 	bb.params = ValuesNil | ||||||
| 	bb.rootInstr, bb.currentInstr = nil, nil | 	bb.rootInstr, bb.currentInstr = nil, nil | ||||||
| 	bb.preds = bb.preds[:0] | 	bb.preds = bb.preds[:0] | ||||||
| 	bb.success = bb.success[:0] | 	bb.success = bb.success[:0] | ||||||
|  | @ -310,8 +307,8 @@ func (bb *basicBlock) addPred(blk BasicBlock, branch *Instruction) { | ||||||
| 
 | 
 | ||||||
| // formatHeader returns the string representation of the header of the basicBlock. | // formatHeader returns the string representation of the header of the basicBlock. | ||||||
| func (bb *basicBlock) formatHeader(b Builder) string { | func (bb *basicBlock) formatHeader(b Builder) string { | ||||||
| 	ps := make([]string, len(bb.params)) | 	ps := make([]string, len(bb.params.View())) | ||||||
| 	for i, p := range bb.params { | 	for i, p := range bb.params.View() { | ||||||
| 		ps[i] = p.formatWithType(b) | 		ps[i] = p.formatWithType(b) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -339,7 +336,9 @@ func (bb *basicBlock) validate(b *builder) { | ||||||
| 	if len(bb.preds) > 0 { | 	if len(bb.preds) > 0 { | ||||||
| 		for _, pred := range bb.preds { | 		for _, pred := range bb.preds { | ||||||
| 			if pred.branch.opcode != OpcodeBrTable { | 			if pred.branch.opcode != OpcodeBrTable { | ||||||
| 				if target := pred.branch.blk; target != bb { | 				blockID := int(pred.branch.rValue) | ||||||
|  | 				target := b.basicBlocksPool.View(blockID) | ||||||
|  | 				if target != bb { | ||||||
| 					panic(fmt.Sprintf("BUG: '%s' is not branch to %s, but to %s", | 					panic(fmt.Sprintf("BUG: '%s' is not branch to %s, but to %s", | ||||||
| 						pred.branch.Format(b), bb.Name(), target.Name())) | 						pred.branch.Format(b), bb.Name(), target.Name())) | ||||||
| 				} | 				} | ||||||
|  | @ -349,14 +348,14 @@ func (bb *basicBlock) validate(b *builder) { | ||||||
| 			if bb.ReturnBlock() { | 			if bb.ReturnBlock() { | ||||||
| 				exp = len(b.currentSignature.Results) | 				exp = len(b.currentSignature.Results) | ||||||
| 			} else { | 			} else { | ||||||
| 				exp = len(bb.params) | 				exp = len(bb.params.View()) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			if len(pred.branch.vs.View()) != exp { | 			if len(pred.branch.vs.View()) != exp { | ||||||
| 				panic(fmt.Sprintf( | 				panic(fmt.Sprintf( | ||||||
| 					"BUG: len(argument at %s) != len(params at %s): %d != %d: %s", | 					"BUG: len(argument at %s) != len(params at %s): %d != %d: %s", | ||||||
| 					pred.blk.Name(), bb.Name(), | 					pred.blk.Name(), bb.Name(), | ||||||
| 					len(pred.branch.vs.View()), len(bb.params), pred.branch.Format(b), | 					len(pred.branch.vs.View()), len(bb.params.View()), pred.branch.Format(b), | ||||||
| 				)) | 				)) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										2
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,5 +1,3 @@ | ||||||
| //go:build go1.21 |  | ||||||
| 
 |  | ||||||
| package ssa | package ssa | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  |  | ||||||
							
								
								
									
										24
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort_old.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort_old.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,24 +0,0 @@ | ||||||
| //go:build !go1.21 |  | ||||||
| 
 |  | ||||||
| // TODO: delete after the floor Go version is 1.21 |  | ||||||
| 
 |  | ||||||
| package ssa |  | ||||||
| 
 |  | ||||||
| import "sort" |  | ||||||
| 
 |  | ||||||
| func sortBlocks(blocks []*basicBlock) { |  | ||||||
| 	sort.SliceStable(blocks, func(i, j int) bool { |  | ||||||
| 		iBlk, jBlk := blocks[i], blocks[j] |  | ||||||
| 		if jBlk.ReturnBlock() { |  | ||||||
| 			return true |  | ||||||
| 		} |  | ||||||
| 		if iBlk.ReturnBlock() { |  | ||||||
| 			return false |  | ||||||
| 		} |  | ||||||
| 		iRoot, jRoot := iBlk.rootInstr, jBlk.rootInstr |  | ||||||
| 		if iRoot == nil || jRoot == nil { // For testing. |  | ||||||
| 			return true |  | ||||||
| 		} |  | ||||||
| 		return iBlk.rootInstr.id < jBlk.rootInstr.id |  | ||||||
| 	}) |  | ||||||
| } |  | ||||||
							
								
								
									
										149
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/builder.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										149
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/builder.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -94,9 +94,9 @@ type Builder interface { | ||||||
| 	// Returns nil if there's no unseen BasicBlock. | 	// Returns nil if there's no unseen BasicBlock. | ||||||
| 	BlockIteratorNext() BasicBlock | 	BlockIteratorNext() BasicBlock | ||||||
| 
 | 
 | ||||||
| 	// ValueRefCounts returns the map of ValueID to its reference count. | 	// ValuesInfo returns the data per Value used to lower the SSA in backend. | ||||||
| 	// The returned slice must not be modified. | 	// This is indexed by ValueID. | ||||||
| 	ValueRefCounts() []int | 	ValuesInfo() []ValueInfo | ||||||
| 
 | 
 | ||||||
| 	// BlockIteratorReversePostOrderBegin is almost the same as BlockIteratorBegin except it returns the BasicBlock in the reverse post-order. | 	// BlockIteratorReversePostOrderBegin is almost the same as BlockIteratorBegin except it returns the BasicBlock in the reverse post-order. | ||||||
| 	// This is available after RunPasses is run. | 	// This is available after RunPasses is run. | ||||||
|  | @ -129,6 +129,12 @@ type Builder interface { | ||||||
| 
 | 
 | ||||||
| 	// InsertZeroValue inserts a zero value constant instruction of the given type. | 	// InsertZeroValue inserts a zero value constant instruction of the given type. | ||||||
| 	InsertZeroValue(t Type) | 	InsertZeroValue(t Type) | ||||||
|  | 
 | ||||||
|  | 	// BasicBlock returns the BasicBlock of the given ID. | ||||||
|  | 	BasicBlock(id BasicBlockID) BasicBlock | ||||||
|  | 
 | ||||||
|  | 	// InstructionOfValue returns the Instruction that produces the given Value or nil if the Value is not produced by any Instruction. | ||||||
|  | 	InstructionOfValue(v Value) *Instruction | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // NewBuilder returns a new Builder implementation. | // NewBuilder returns a new Builder implementation. | ||||||
|  | @ -140,8 +146,6 @@ func NewBuilder() Builder { | ||||||
| 		varLengthPool:           wazevoapi.NewVarLengthPool[Value](), | 		varLengthPool:           wazevoapi.NewVarLengthPool[Value](), | ||||||
| 		valueAnnotations:        make(map[ValueID]string), | 		valueAnnotations:        make(map[ValueID]string), | ||||||
| 		signatures:              make(map[SignatureID]*Signature), | 		signatures:              make(map[SignatureID]*Signature), | ||||||
| 		valueIDAliases:                 make(map[ValueID]Value), |  | ||||||
| 		redundantParameterIndexToValue: make(map[int]Value), |  | ||||||
| 		returnBlk:               &basicBlock{id: basicBlockIDReturnBlock}, | 		returnBlk:               &basicBlock{id: basicBlockIDReturnBlock}, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | @ -159,19 +163,16 @@ type builder struct { | ||||||
| 	currentBB                     *basicBlock | 	currentBB                     *basicBlock | ||||||
| 	returnBlk                     *basicBlock | 	returnBlk                     *basicBlock | ||||||
| 
 | 
 | ||||||
| 	// variables track the types for Variable with the index regarded Variable. |  | ||||||
| 	variables []Type |  | ||||||
| 	// nextValueID is used by builder.AllocateValue. | 	// nextValueID is used by builder.AllocateValue. | ||||||
| 	nextValueID ValueID | 	nextValueID ValueID | ||||||
| 	// nextVariable is used by builder.AllocateVariable. | 	// nextVariable is used by builder.AllocateVariable. | ||||||
| 	nextVariable Variable | 	nextVariable Variable | ||||||
| 
 | 
 | ||||||
| 	valueIDAliases   map[ValueID]Value | 	// valueAnnotations contains the annotations for each Value, only used for debugging. | ||||||
| 	valueAnnotations map[ValueID]string | 	valueAnnotations map[ValueID]string | ||||||
| 
 | 
 | ||||||
| 	// valueRefCounts is used to lower the SSA in backend, and will be calculated | 	// valuesInfo contains the data per Value used to lower the SSA in backend. This is indexed by ValueID. | ||||||
| 	// by the last SSA-level optimization pass. | 	valuesInfo []ValueInfo | ||||||
| 	valueRefCounts []int |  | ||||||
| 
 | 
 | ||||||
| 	// dominators stores the immediate dominator of each BasicBlock. | 	// dominators stores the immediate dominator of each BasicBlock. | ||||||
| 	// The index is blockID of the BasicBlock. | 	// The index is blockID of the BasicBlock. | ||||||
|  | @ -185,11 +186,9 @@ type builder struct { | ||||||
| 
 | 
 | ||||||
| 	// The followings are used for optimization passes/deterministic compilation. | 	// The followings are used for optimization passes/deterministic compilation. | ||||||
| 	instStack       []*Instruction | 	instStack       []*Instruction | ||||||
| 	valueIDToInstruction           []*Instruction |  | ||||||
| 	blkStack        []*basicBlock | 	blkStack        []*basicBlock | ||||||
| 	blkStack2       []*basicBlock | 	blkStack2       []*basicBlock | ||||||
| 	ints                           []int | 	redundantParams []redundantParam | ||||||
| 	redundantParameterIndexToValue map[int]Value |  | ||||||
| 
 | 
 | ||||||
| 	// blockIterCur is used to implement blockIteratorBegin and blockIteratorNext. | 	// blockIterCur is used to implement blockIteratorBegin and blockIteratorNext. | ||||||
| 	blockIterCur int | 	blockIterCur int | ||||||
|  | @ -207,6 +206,34 @@ type builder struct { | ||||||
| 	zeros [typeEnd]Value | 	zeros [typeEnd]Value | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // ValueInfo contains the data per Value used to lower the SSA in backend. | ||||||
|  | type ValueInfo struct { | ||||||
|  | 	// RefCount is the reference count of the Value. | ||||||
|  | 	RefCount uint32 | ||||||
|  | 	alias    Value | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // redundantParam is a pair of the index of the redundant parameter and the Value. | ||||||
|  | // This is used to eliminate the redundant parameters in the optimization pass. | ||||||
|  | type redundantParam struct { | ||||||
|  | 	// index is the index of the redundant parameter in the basicBlock. | ||||||
|  | 	index int | ||||||
|  | 	// uniqueValue is the Value which is passed to the redundant parameter. | ||||||
|  | 	uniqueValue Value | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BasicBlock implements Builder.BasicBlock. | ||||||
|  | func (b *builder) BasicBlock(id BasicBlockID) BasicBlock { | ||||||
|  | 	return b.basicBlock(id) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *builder) basicBlock(id BasicBlockID) *basicBlock { | ||||||
|  | 	if id == basicBlockIDReturnBlock { | ||||||
|  | 		return b.returnBlk | ||||||
|  | 	} | ||||||
|  | 	return b.basicBlocksPool.View(int(id)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // InsertZeroValue implements Builder.InsertZeroValue. | // InsertZeroValue implements Builder.InsertZeroValue. | ||||||
| func (b *builder) InsertZeroValue(t Type) { | func (b *builder) InsertZeroValue(t Type) { | ||||||
| 	if b.zeros[t].Valid() { | 	if b.zeros[t].Valid() { | ||||||
|  | @ -256,7 +283,7 @@ func (b *builder) Init(s *Signature) { | ||||||
| 		sig.used = false | 		sig.used = false | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	b.ints = b.ints[:0] | 	b.redundantParams = b.redundantParams[:0] | ||||||
| 	b.blkStack = b.blkStack[:0] | 	b.blkStack = b.blkStack[:0] | ||||||
| 	b.blkStack2 = b.blkStack2[:0] | 	b.blkStack2 = b.blkStack2[:0] | ||||||
| 	b.dominators = b.dominators[:0] | 	b.dominators = b.dominators[:0] | ||||||
|  | @ -265,17 +292,11 @@ func (b *builder) Init(s *Signature) { | ||||||
| 
 | 
 | ||||||
| 	for v := ValueID(0); v < b.nextValueID; v++ { | 	for v := ValueID(0); v < b.nextValueID; v++ { | ||||||
| 		delete(b.valueAnnotations, v) | 		delete(b.valueAnnotations, v) | ||||||
| 		delete(b.valueIDAliases, v) | 		b.valuesInfo[v] = ValueInfo{alias: ValueInvalid} | ||||||
| 		b.valueRefCounts[v] = 0 |  | ||||||
| 		b.valueIDToInstruction[v] = nil |  | ||||||
| 	} | 	} | ||||||
| 	b.nextValueID = 0 | 	b.nextValueID = 0 | ||||||
| 	b.reversePostOrderedBasicBlocks = b.reversePostOrderedBasicBlocks[:0] | 	b.reversePostOrderedBasicBlocks = b.reversePostOrderedBasicBlocks[:0] | ||||||
| 	b.doneBlockLayout = false | 	b.doneBlockLayout = false | ||||||
| 	for i := range b.valueRefCounts { |  | ||||||
| 		b.valueRefCounts[i] = 0 |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	b.currentSourceOffset = sourceOffsetUnknown | 	b.currentSourceOffset = sourceOffsetUnknown | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -355,7 +376,7 @@ func (b *builder) Idom(blk BasicBlock) BasicBlock { | ||||||
| 
 | 
 | ||||||
| // InsertInstruction implements Builder.InsertInstruction. | // InsertInstruction implements Builder.InsertInstruction. | ||||||
| func (b *builder) InsertInstruction(instr *Instruction) { | func (b *builder) InsertInstruction(instr *Instruction) { | ||||||
| 	b.currentBB.InsertInstruction(instr) | 	b.currentBB.insertInstruction(b, instr) | ||||||
| 
 | 
 | ||||||
| 	if l := b.currentSourceOffset; l.Valid() { | 	if l := b.currentSourceOffset; l.Valid() { | ||||||
| 		// Emit the source offset info only when the instruction has side effect because | 		// Emit the source offset info only when the instruction has side effect because | ||||||
|  | @ -377,7 +398,7 @@ func (b *builder) InsertInstruction(instr *Instruction) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	r1 := b.allocateValue(t1) | 	r1 := b.allocateValue(t1) | ||||||
| 	instr.rValue = r1 | 	instr.rValue = r1.setInstructionID(instr.id) | ||||||
| 
 | 
 | ||||||
| 	tsl := len(ts) | 	tsl := len(ts) | ||||||
| 	if tsl == 0 { | 	if tsl == 0 { | ||||||
|  | @ -386,20 +407,14 @@ func (b *builder) InsertInstruction(instr *Instruction) { | ||||||
| 
 | 
 | ||||||
| 	rValues := b.varLengthPool.Allocate(tsl) | 	rValues := b.varLengthPool.Allocate(tsl) | ||||||
| 	for i := 0; i < tsl; i++ { | 	for i := 0; i < tsl; i++ { | ||||||
| 		rValues = rValues.Append(&b.varLengthPool, b.allocateValue(ts[i])) | 		rn := b.allocateValue(ts[i]) | ||||||
|  | 		rValues = rValues.Append(&b.varLengthPool, rn.setInstructionID(instr.id)) | ||||||
| 	} | 	} | ||||||
| 	instr.rValues = rValues | 	instr.rValues = rValues | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // DefineVariable implements Builder.DefineVariable. | // DefineVariable implements Builder.DefineVariable. | ||||||
| func (b *builder) DefineVariable(variable Variable, value Value, block BasicBlock) { | func (b *builder) DefineVariable(variable Variable, value Value, block BasicBlock) { | ||||||
| 	if b.variables[variable].invalid() { |  | ||||||
| 		panic("BUG: trying to define variable " + variable.String() + " but is not declared yet") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if b.variables[variable] != value.Type() { |  | ||||||
| 		panic(fmt.Sprintf("BUG: inconsistent type for variable %d: expected %s but got %s", variable, b.variables[variable], value.Type())) |  | ||||||
| 	} |  | ||||||
| 	bb := block.(*basicBlock) | 	bb := block.(*basicBlock) | ||||||
| 	bb.lastDefinitions[variable] = value | 	bb.lastDefinitions[variable] = value | ||||||
| } | } | ||||||
|  | @ -426,20 +441,9 @@ func (b *builder) EntryBlock() BasicBlock { | ||||||
| 
 | 
 | ||||||
| // DeclareVariable implements Builder.DeclareVariable. | // DeclareVariable implements Builder.DeclareVariable. | ||||||
| func (b *builder) DeclareVariable(typ Type) Variable { | func (b *builder) DeclareVariable(typ Type) Variable { | ||||||
| 	v := b.allocateVariable() | 	v := b.nextVariable | ||||||
| 	iv := int(v) |  | ||||||
| 	if l := len(b.variables); l <= iv { |  | ||||||
| 		b.variables = append(b.variables, make([]Type, 2*(l+1))...) |  | ||||||
| 	} |  | ||||||
| 	b.variables[v] = typ |  | ||||||
| 	return v |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // allocateVariable allocates a new variable. |  | ||||||
| func (b *builder) allocateVariable() (ret Variable) { |  | ||||||
| 	ret = b.nextVariable |  | ||||||
| 	b.nextVariable++ | 	b.nextVariable++ | ||||||
| 	return | 	return v.setType(typ) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // allocateValue implements Builder.AllocateValue. | // allocateValue implements Builder.AllocateValue. | ||||||
|  | @ -475,8 +479,7 @@ func (b *builder) findValueInLinearPath(variable Variable, blk *basicBlock) Valu | ||||||
| 
 | 
 | ||||||
| // MustFindValue implements Builder.MustFindValue. | // MustFindValue implements Builder.MustFindValue. | ||||||
| func (b *builder) MustFindValue(variable Variable) Value { | func (b *builder) MustFindValue(variable Variable) Value { | ||||||
| 	typ := b.definedVariableType(variable) | 	return b.findValue(variable.getType(), variable, b.currentBB) | ||||||
| 	return b.findValue(typ, variable, b.currentBB) |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // findValue recursively tries to find the latest definition of a `variable`. The algorithm is described in | // findValue recursively tries to find the latest definition of a `variable`. The algorithm is described in | ||||||
|  | @ -504,7 +507,7 @@ func (b *builder) findValue(typ Type, variable Variable, blk *basicBlock) Value | ||||||
| 		return value | 		return value | ||||||
| 	} else if blk.EntryBlock() { | 	} else if blk.EntryBlock() { | ||||||
| 		// If this is the entry block, we reach the uninitialized variable which has zero value. | 		// If this is the entry block, we reach the uninitialized variable which has zero value. | ||||||
| 		return b.zeros[b.definedVariableType(variable)] | 		return b.zeros[variable.getType()] | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if pred := blk.singlePred; pred != nil { | 	if pred := blk.singlePred; pred != nil { | ||||||
|  | @ -536,14 +539,13 @@ func (b *builder) findValue(typ Type, variable Variable, blk *basicBlock) Value | ||||||
| 
 | 
 | ||||||
| 	if uniqueValue != ValueInvalid { | 	if uniqueValue != ValueInvalid { | ||||||
| 		// If all the predecessors have the same definition, we can use that value. | 		// If all the predecessors have the same definition, we can use that value. | ||||||
| 		b.DefineVariable(variable, uniqueValue, blk) |  | ||||||
| 		b.alias(tmpValue, uniqueValue) | 		b.alias(tmpValue, uniqueValue) | ||||||
| 		return uniqueValue | 		return uniqueValue | ||||||
| 	} else { | 	} else { | ||||||
| 		// Otherwise, add the tmpValue to this block as a parameter which may or may not be redundant, but | 		// Otherwise, add the tmpValue to this block as a parameter which may or may not be redundant, but | ||||||
| 		// later we eliminate trivial params in an optimization pass. This must be done before finding the | 		// later we eliminate trivial params in an optimization pass. This must be done before finding the | ||||||
| 		// definitions in the predecessors so that we can break the cycle. | 		// definitions in the predecessors so that we can break the cycle. | ||||||
| 		blk.addParamOn(tmpValue) | 		blk.addParamOn(b, tmpValue) | ||||||
| 		// After the new param is added, we have to manipulate the original branching instructions | 		// After the new param is added, we have to manipulate the original branching instructions | ||||||
| 		// in predecessors so that they would pass the definition of `variable` as the argument to | 		// in predecessors so that they would pass the definition of `variable` as the argument to | ||||||
| 		// the newly added PHI. | 		// the newly added PHI. | ||||||
|  | @ -566,8 +568,8 @@ func (b *builder) Seal(raw BasicBlock) { | ||||||
| 
 | 
 | ||||||
| 	for _, v := range blk.unknownValues { | 	for _, v := range blk.unknownValues { | ||||||
| 		variable, phiValue := v.variable, v.value | 		variable, phiValue := v.variable, v.value | ||||||
| 		typ := b.definedVariableType(variable) | 		typ := variable.getType() | ||||||
| 		blk.addParamOn(phiValue) | 		blk.addParamOn(b, phiValue) | ||||||
| 		for i := range blk.preds { | 		for i := range blk.preds { | ||||||
| 			pred := &blk.preds[i] | 			pred := &blk.preds[i] | ||||||
| 			predValue := b.findValue(typ, variable, pred.blk) | 			predValue := b.findValue(typ, variable, pred.blk) | ||||||
|  | @ -579,15 +581,6 @@ func (b *builder) Seal(raw BasicBlock) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // definedVariableType returns the type of the given variable. If the variable is not defined yet, it panics. |  | ||||||
| func (b *builder) definedVariableType(variable Variable) Type { |  | ||||||
| 	typ := b.variables[variable] |  | ||||||
| 	if typ.invalid() { |  | ||||||
| 		panic(fmt.Sprintf("%s is not defined yet", variable)) |  | ||||||
| 	} |  | ||||||
| 	return typ |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Format implements Builder.Format. | // Format implements Builder.Format. | ||||||
| func (b *builder) Format() string { | func (b *builder) Format() string { | ||||||
| 	str := strings.Builder{} | 	str := strings.Builder{} | ||||||
|  | @ -689,15 +682,24 @@ func (b *builder) blockIteratorReversePostOrderNext() *basicBlock { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // ValueRefCounts implements Builder.ValueRefCounts. | // ValuesInfo implements Builder.ValuesInfo. | ||||||
| func (b *builder) ValueRefCounts() []int { | func (b *builder) ValuesInfo() []ValueInfo { | ||||||
| 	return b.valueRefCounts | 	return b.valuesInfo | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // alias records the alias of the given values. The alias(es) will be | // alias records the alias of the given values. The alias(es) will be | ||||||
| // eliminated in the optimization pass via resolveArgumentAlias. | // eliminated in the optimization pass via resolveArgumentAlias. | ||||||
| func (b *builder) alias(dst, src Value) { | func (b *builder) alias(dst, src Value) { | ||||||
| 	b.valueIDAliases[dst.ID()] = src | 	did := int(dst.ID()) | ||||||
|  | 	if did >= len(b.valuesInfo) { | ||||||
|  | 		l := did + 1 - len(b.valuesInfo) | ||||||
|  | 		b.valuesInfo = append(b.valuesInfo, make([]ValueInfo, l)...) | ||||||
|  | 		view := b.valuesInfo[len(b.valuesInfo)-l:] | ||||||
|  | 		for i := range view { | ||||||
|  | 			view[i].alias = ValueInvalid | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	b.valuesInfo[did].alias = src | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // resolveArgumentAlias resolves the alias of the arguments of the given instruction. | // resolveArgumentAlias resolves the alias of the arguments of the given instruction. | ||||||
|  | @ -722,10 +724,13 @@ func (b *builder) resolveArgumentAlias(instr *Instruction) { | ||||||
| 
 | 
 | ||||||
| // resolveAlias resolves the alias of the given value. | // resolveAlias resolves the alias of the given value. | ||||||
| func (b *builder) resolveAlias(v Value) Value { | func (b *builder) resolveAlias(v Value) Value { | ||||||
|  | 	info := b.valuesInfo | ||||||
|  | 	l := ValueID(len(info)) | ||||||
| 	// Some aliases are chained, so we need to resolve them recursively. | 	// Some aliases are chained, so we need to resolve them recursively. | ||||||
| 	for { | 	for { | ||||||
| 		if src, ok := b.valueIDAliases[v.ID()]; ok { | 		vid := v.ID() | ||||||
| 			v = src | 		if vid < l && info[vid].alias.Valid() { | ||||||
|  | 			v = info[vid].alias | ||||||
| 		} else { | 		} else { | ||||||
| 			break | 			break | ||||||
| 		} | 		} | ||||||
|  | @ -773,3 +778,13 @@ func (b *builder) LoopNestingForestRoots() []BasicBlock { | ||||||
| func (b *builder) LowestCommonAncestor(blk1, blk2 BasicBlock) BasicBlock { | func (b *builder) LowestCommonAncestor(blk1, blk2 BasicBlock) BasicBlock { | ||||||
| 	return b.sparseTree.findLCA(blk1.ID(), blk2.ID()) | 	return b.sparseTree.findLCA(blk1.ID(), blk2.ID()) | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // InstructionOfValue returns the instruction that produces the given Value, or nil | ||||||
|  | // if the Value is not produced by any instruction. | ||||||
|  | func (b *builder) InstructionOfValue(v Value) *Instruction { | ||||||
|  | 	instrID := v.instructionID() | ||||||
|  | 	if instrID <= 0 { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	return b.instructionsPool.View(instrID - 1) | ||||||
|  | } | ||||||
|  |  | ||||||
							
								
								
									
										45
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/instructions.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										45
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/instructions.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -25,11 +25,13 @@ type Instruction struct { | ||||||
| 	v3         Value | 	v3         Value | ||||||
| 	vs         Values | 	vs         Values | ||||||
| 	typ        Type | 	typ        Type | ||||||
| 	blk        BasicBlock |  | ||||||
| 	targets    []BasicBlock |  | ||||||
| 	prev, next *Instruction | 	prev, next *Instruction | ||||||
| 
 | 
 | ||||||
|  | 	// rValue is the (first) return value of this instruction. | ||||||
|  | 	// For branching instructions except for OpcodeBrTable, they hold BlockID to jump cast to Value. | ||||||
| 	rValue Value | 	rValue Value | ||||||
|  | 	// rValues are the rest of the return values of this instruction. | ||||||
|  | 	// For OpcodeBrTable, it holds the list of BlockID to jump cast to Value. | ||||||
| 	rValues        Values | 	rValues        Values | ||||||
| 	gid            InstructionGroupID | 	gid            InstructionGroupID | ||||||
| 	sourceOffset   SourceOffset | 	sourceOffset   SourceOffset | ||||||
|  | @ -105,6 +107,9 @@ type InstructionGroupID uint32 | ||||||
| // Returns Value(s) produced by this instruction if any. | // Returns Value(s) produced by this instruction if any. | ||||||
| // The `first` is the first return value, and `rest` is the rest of the values. | // The `first` is the first return value, and `rest` is the rest of the values. | ||||||
| func (i *Instruction) Returns() (first Value, rest []Value) { | func (i *Instruction) Returns() (first Value, rest []Value) { | ||||||
|  | 	if i.IsBranching() { | ||||||
|  | 		return ValueInvalid, nil | ||||||
|  | 	} | ||||||
| 	return i.rValue, i.rValues.View() | 	return i.rValue, i.rValues.View() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -2077,7 +2082,7 @@ func (i *Instruction) InvertBrx() { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // BranchData returns the branch data for this instruction necessary for backends. | // BranchData returns the branch data for this instruction necessary for backends. | ||||||
| func (i *Instruction) BranchData() (condVal Value, blockArgs []Value, target BasicBlock) { | func (i *Instruction) BranchData() (condVal Value, blockArgs []Value, target BasicBlockID) { | ||||||
| 	switch i.opcode { | 	switch i.opcode { | ||||||
| 	case OpcodeJump: | 	case OpcodeJump: | ||||||
| 		condVal = ValueInvalid | 		condVal = ValueInvalid | ||||||
|  | @ -2087,17 +2092,17 @@ func (i *Instruction) BranchData() (condVal Value, blockArgs []Value, target Bas | ||||||
| 		panic("BUG") | 		panic("BUG") | ||||||
| 	} | 	} | ||||||
| 	blockArgs = i.vs.View() | 	blockArgs = i.vs.View() | ||||||
| 	target = i.blk | 	target = BasicBlockID(i.rValue) | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // BrTableData returns the branch table data for this instruction necessary for backends. | // BrTableData returns the branch table data for this instruction necessary for backends. | ||||||
| func (i *Instruction) BrTableData() (index Value, targets []BasicBlock) { | func (i *Instruction) BrTableData() (index Value, targets Values) { | ||||||
| 	if i.opcode != OpcodeBrTable { | 	if i.opcode != OpcodeBrTable { | ||||||
| 		panic("BUG: BrTableData only available for OpcodeBrTable") | 		panic("BUG: BrTableData only available for OpcodeBrTable") | ||||||
| 	} | 	} | ||||||
| 	index = i.v | 	index = i.v | ||||||
| 	targets = i.targets | 	targets = i.rValues | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -2105,7 +2110,7 @@ func (i *Instruction) BrTableData() (index Value, targets []BasicBlock) { | ||||||
| func (i *Instruction) AsJump(vs Values, target BasicBlock) *Instruction { | func (i *Instruction) AsJump(vs Values, target BasicBlock) *Instruction { | ||||||
| 	i.opcode = OpcodeJump | 	i.opcode = OpcodeJump | ||||||
| 	i.vs = vs | 	i.vs = vs | ||||||
| 	i.blk = target | 	i.rValue = Value(target.ID()) | ||||||
| 	return i | 	return i | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -2130,7 +2135,7 @@ func (i *Instruction) AsBrz(v Value, args Values, target BasicBlock) { | ||||||
| 	i.opcode = OpcodeBrz | 	i.opcode = OpcodeBrz | ||||||
| 	i.v = v | 	i.v = v | ||||||
| 	i.vs = args | 	i.vs = args | ||||||
| 	i.blk = target | 	i.rValue = Value(target.ID()) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // AsBrnz initializes this instruction as a branch-if-not-zero instruction with OpcodeBrnz. | // AsBrnz initializes this instruction as a branch-if-not-zero instruction with OpcodeBrnz. | ||||||
|  | @ -2138,15 +2143,16 @@ func (i *Instruction) AsBrnz(v Value, args Values, target BasicBlock) *Instructi | ||||||
| 	i.opcode = OpcodeBrnz | 	i.opcode = OpcodeBrnz | ||||||
| 	i.v = v | 	i.v = v | ||||||
| 	i.vs = args | 	i.vs = args | ||||||
| 	i.blk = target | 	i.rValue = Value(target.ID()) | ||||||
| 	return i | 	return i | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // AsBrTable initializes this instruction as a branch-table instruction with OpcodeBrTable. | // AsBrTable initializes this instruction as a branch-table instruction with OpcodeBrTable. | ||||||
| func (i *Instruction) AsBrTable(index Value, targets []BasicBlock) { | // targets is a list of basic block IDs cast to Values. | ||||||
|  | func (i *Instruction) AsBrTable(index Value, targets Values) { | ||||||
| 	i.opcode = OpcodeBrTable | 	i.opcode = OpcodeBrTable | ||||||
| 	i.v = index | 	i.v = index | ||||||
| 	i.targets = targets | 	i.rValues = targets | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // AsCall initializes this instruction as a call instruction with OpcodeCall. | // AsCall initializes this instruction as a call instruction with OpcodeCall. | ||||||
|  | @ -2531,7 +2537,8 @@ func (i *Instruction) Format(b Builder) string { | ||||||
| 		if i.IsFallthroughJump() { | 		if i.IsFallthroughJump() { | ||||||
| 			vs[0] = " fallthrough" | 			vs[0] = " fallthrough" | ||||||
| 		} else { | 		} else { | ||||||
| 			vs[0] = " " + i.blk.(*basicBlock).Name() | 			blockId := BasicBlockID(i.rValue) | ||||||
|  | 			vs[0] = " " + b.BasicBlock(blockId).Name() | ||||||
| 		} | 		} | ||||||
| 		for idx := range view { | 		for idx := range view { | ||||||
| 			vs[idx+1] = view[idx].Format(b) | 			vs[idx+1] = view[idx].Format(b) | ||||||
|  | @ -2542,7 +2549,8 @@ func (i *Instruction) Format(b Builder) string { | ||||||
| 		view := i.vs.View() | 		view := i.vs.View() | ||||||
| 		vs := make([]string, len(view)+2) | 		vs := make([]string, len(view)+2) | ||||||
| 		vs[0] = " " + i.v.Format(b) | 		vs[0] = " " + i.v.Format(b) | ||||||
| 		vs[1] = i.blk.(*basicBlock).Name() | 		blockId := BasicBlockID(i.rValue) | ||||||
|  | 		vs[1] = b.BasicBlock(blockId).Name() | ||||||
| 		for idx := range view { | 		for idx := range view { | ||||||
| 			vs[idx+2] = view[idx].Format(b) | 			vs[idx+2] = view[idx].Format(b) | ||||||
| 		} | 		} | ||||||
|  | @ -2551,8 +2559,8 @@ func (i *Instruction) Format(b Builder) string { | ||||||
| 		// `BrTable index, [label1, label2, ... labelN]` | 		// `BrTable index, [label1, label2, ... labelN]` | ||||||
| 		instSuffix = fmt.Sprintf(" %s", i.v.Format(b)) | 		instSuffix = fmt.Sprintf(" %s", i.v.Format(b)) | ||||||
| 		instSuffix += ", [" | 		instSuffix += ", [" | ||||||
| 		for i, target := range i.targets { | 		for i, target := range i.rValues.View() { | ||||||
| 			blk := target.(*basicBlock) | 			blk := b.BasicBlock(BasicBlockID(target)) | ||||||
| 			if i == 0 { | 			if i == 0 { | ||||||
| 				instSuffix += blk.Name() | 				instSuffix += blk.Name() | ||||||
| 			} else { | 			} else { | ||||||
|  | @ -2621,11 +2629,12 @@ func (i *Instruction) Format(b Builder) string { | ||||||
| 	instr := i.opcode.String() + instSuffix | 	instr := i.opcode.String() + instSuffix | ||||||
| 
 | 
 | ||||||
| 	var rvs []string | 	var rvs []string | ||||||
| 	if rv := i.rValue; rv.Valid() { | 	r1, rs := i.Returns() | ||||||
| 		rvs = append(rvs, rv.formatWithType(b)) | 	if r1.Valid() { | ||||||
|  | 		rvs = append(rvs, r1.formatWithType(b)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for _, v := range i.rValues.View() { | 	for _, v := range rs { | ||||||
| 		rvs = append(rvs, v.formatWithType(b)) | 		rvs = append(rvs, v.formatWithType(b)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										103
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										103
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -112,7 +112,7 @@ func passDeadBlockEliminationOpt(b *builder) { | ||||||
| // This requires the reverse post-order traversal to be calculated before calling this function, | // This requires the reverse post-order traversal to be calculated before calling this function, | ||||||
| // hence passCalculateImmediateDominators must be called before this. | // hence passCalculateImmediateDominators must be called before this. | ||||||
| func passRedundantPhiEliminationOpt(b *builder) { | func passRedundantPhiEliminationOpt(b *builder) { | ||||||
| 	redundantParameterIndexes := b.ints[:0] // reuse the slice from previous iterations. | 	redundantParams := b.redundantParams[:0] // reuse the slice from previous iterations. | ||||||
| 
 | 
 | ||||||
| 	// TODO: this might be costly for large programs, but at least, as far as I did the experiment, it's almost the | 	// TODO: this might be costly for large programs, but at least, as far as I did the experiment, it's almost the | ||||||
| 	//  same as the single iteration version in terms of the overall compilation time. That *might be* mostly thanks to the fact | 	//  same as the single iteration version in terms of the overall compilation time. That *might be* mostly thanks to the fact | ||||||
|  | @ -128,10 +128,11 @@ func passRedundantPhiEliminationOpt(b *builder) { | ||||||
| 		_ = b.blockIteratorReversePostOrderBegin() // skip entry block! | 		_ = b.blockIteratorReversePostOrderBegin() // skip entry block! | ||||||
| 		// Below, we intentionally use the named iteration variable name, as this comes with inevitable nested for loops! | 		// Below, we intentionally use the named iteration variable name, as this comes with inevitable nested for loops! | ||||||
| 		for blk := b.blockIteratorReversePostOrderNext(); blk != nil; blk = b.blockIteratorReversePostOrderNext() { | 		for blk := b.blockIteratorReversePostOrderNext(); blk != nil; blk = b.blockIteratorReversePostOrderNext() { | ||||||
| 			paramNum := len(blk.params) | 			params := blk.params.View() | ||||||
|  | 			paramNum := len(params) | ||||||
| 
 | 
 | ||||||
| 			for paramIndex := 0; paramIndex < paramNum; paramIndex++ { | 			for paramIndex := 0; paramIndex < paramNum; paramIndex++ { | ||||||
| 				phiValue := blk.params[paramIndex] | 				phiValue := params[paramIndex] | ||||||
| 				redundant := true | 				redundant := true | ||||||
| 
 | 
 | ||||||
| 				nonSelfReferencingValue := ValueInvalid | 				nonSelfReferencingValue := ValueInvalid | ||||||
|  | @ -162,55 +163,58 @@ func passRedundantPhiEliminationOpt(b *builder) { | ||||||
| 				} | 				} | ||||||
| 
 | 
 | ||||||
| 				if redundant { | 				if redundant { | ||||||
| 					b.redundantParameterIndexToValue[paramIndex] = nonSelfReferencingValue | 					redundantParams = append(redundantParams, redundantParam{ | ||||||
| 					redundantParameterIndexes = append(redundantParameterIndexes, paramIndex) | 						index: paramIndex, uniqueValue: nonSelfReferencingValue, | ||||||
|  | 					}) | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			if len(b.redundantParameterIndexToValue) == 0 { | 			if len(redundantParams) == 0 { | ||||||
| 				continue | 				continue | ||||||
| 			} | 			} | ||||||
| 			changed = true | 			changed = true | ||||||
| 
 | 
 | ||||||
| 			// Remove the redundant PHIs from the argument list of branching instructions. | 			// Remove the redundant PHIs from the argument list of branching instructions. | ||||||
| 			for predIndex := range blk.preds { | 			for predIndex := range blk.preds { | ||||||
| 				var cur int | 				redundantParamsCur, predParamCur := 0, 0 | ||||||
| 				predBlk := blk.preds[predIndex] | 				predBlk := blk.preds[predIndex] | ||||||
| 				branchInst := predBlk.branch | 				branchInst := predBlk.branch | ||||||
| 				view := branchInst.vs.View() | 				view := branchInst.vs.View() | ||||||
| 				for argIndex, value := range view { | 				for argIndex, value := range view { | ||||||
| 					if _, ok := b.redundantParameterIndexToValue[argIndex]; !ok { | 					if len(redundantParams) == redundantParamsCur || | ||||||
| 						view[cur] = value | 						redundantParams[redundantParamsCur].index != argIndex { | ||||||
| 						cur++ | 						view[predParamCur] = value | ||||||
|  | 						predParamCur++ | ||||||
|  | 					} else { | ||||||
|  | 						redundantParamsCur++ | ||||||
| 					} | 					} | ||||||
| 				} | 				} | ||||||
| 				branchInst.vs.Cut(cur) | 				branchInst.vs.Cut(predParamCur) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// Still need to have the definition of the value of the PHI (previously as the parameter). | 			// Still need to have the definition of the value of the PHI (previously as the parameter). | ||||||
| 			for _, redundantParamIndex := range redundantParameterIndexes { | 			for i := range redundantParams { | ||||||
| 				phiValue := blk.params[redundantParamIndex] | 				redundantValue := &redundantParams[i] | ||||||
| 				onlyValue := b.redundantParameterIndexToValue[redundantParamIndex] | 				phiValue := params[redundantValue.index] | ||||||
| 				// Create an alias in this block from the only phi argument to the phi value. | 				// Create an alias in this block from the only phi argument to the phi value. | ||||||
| 				b.alias(phiValue, onlyValue) | 				b.alias(phiValue, redundantValue.uniqueValue) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// Finally, Remove the param from the blk. | 			// Finally, Remove the param from the blk. | ||||||
| 			var cur int | 			paramsCur, redundantParamsCur := 0, 0 | ||||||
| 			for paramIndex := 0; paramIndex < paramNum; paramIndex++ { | 			for paramIndex := 0; paramIndex < paramNum; paramIndex++ { | ||||||
| 				param := blk.params[paramIndex] | 				param := params[paramIndex] | ||||||
| 				if _, ok := b.redundantParameterIndexToValue[paramIndex]; !ok { | 				if len(redundantParams) == redundantParamsCur || redundantParams[redundantParamsCur].index != paramIndex { | ||||||
| 					blk.params[cur] = param | 					params[paramsCur] = param | ||||||
| 					cur++ | 					paramsCur++ | ||||||
|  | 				} else { | ||||||
|  | 					redundantParamsCur++ | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 			blk.params = blk.params[:cur] | 			blk.params.Cut(paramsCur) | ||||||
| 
 | 
 | ||||||
| 			// Clears the map for the next iteration. | 			// Clears the map for the next iteration. | ||||||
| 			for _, paramIndex := range redundantParameterIndexes { | 			redundantParams = redundantParams[:0] | ||||||
| 				delete(b.redundantParameterIndexToValue, paramIndex) |  | ||||||
| 			} |  | ||||||
| 			redundantParameterIndexes = redundantParameterIndexes[:0] |  | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if !changed { | 		if !changed { | ||||||
|  | @ -219,7 +223,7 @@ func passRedundantPhiEliminationOpt(b *builder) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Reuse the slice for the future passes. | 	// Reuse the slice for the future passes. | ||||||
| 	b.ints = redundantParameterIndexes | 	b.redundantParams = redundantParams | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // passDeadCodeEliminationOpt traverses all the instructions, and calculates the reference count of each Value, and | // passDeadCodeEliminationOpt traverses all the instructions, and calculates the reference count of each Value, and | ||||||
|  | @ -231,11 +235,13 @@ func passRedundantPhiEliminationOpt(b *builder) { | ||||||
| // TODO: the algorithm here might not be efficient. Get back to this later. | // TODO: the algorithm here might not be efficient. Get back to this later. | ||||||
| func passDeadCodeEliminationOpt(b *builder) { | func passDeadCodeEliminationOpt(b *builder) { | ||||||
| 	nvid := int(b.nextValueID) | 	nvid := int(b.nextValueID) | ||||||
| 	if nvid >= len(b.valueRefCounts) { | 	if nvid >= len(b.valuesInfo) { | ||||||
| 		b.valueRefCounts = append(b.valueRefCounts, make([]int, nvid-len(b.valueRefCounts)+1)...) | 		l := nvid - len(b.valuesInfo) + 1 | ||||||
|  | 		b.valuesInfo = append(b.valuesInfo, make([]ValueInfo, l)...) | ||||||
|  | 		view := b.valuesInfo[len(b.valuesInfo)-l:] | ||||||
|  | 		for i := range view { | ||||||
|  | 			view[i].alias = ValueInvalid | ||||||
| 		} | 		} | ||||||
| 	if nvid >= len(b.valueIDToInstruction) { |  | ||||||
| 		b.valueIDToInstruction = append(b.valueIDToInstruction, make([]*Instruction, nvid-len(b.valueIDToInstruction)+1)...) |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// First, we gather all the instructions with side effects. | 	// First, we gather all the instructions with side effects. | ||||||
|  | @ -255,14 +261,6 @@ func passDeadCodeEliminationOpt(b *builder) { | ||||||
| 				// The strict side effect should create different instruction groups. | 				// The strict side effect should create different instruction groups. | ||||||
| 				gid++ | 				gid++ | ||||||
| 			} | 			} | ||||||
| 
 |  | ||||||
| 			r1, rs := cur.Returns() |  | ||||||
| 			if r1.Valid() { |  | ||||||
| 				b.valueIDToInstruction[r1.ID()] = cur |  | ||||||
| 			} |  | ||||||
| 			for _, r := range rs { |  | ||||||
| 				b.valueIDToInstruction[r.ID()] = cur |  | ||||||
| 			} |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -283,28 +281,28 @@ func passDeadCodeEliminationOpt(b *builder) { | ||||||
| 
 | 
 | ||||||
| 		v1, v2, v3, vs := live.Args() | 		v1, v2, v3, vs := live.Args() | ||||||
| 		if v1.Valid() { | 		if v1.Valid() { | ||||||
| 			producingInst := b.valueIDToInstruction[v1.ID()] | 			producingInst := b.InstructionOfValue(v1) | ||||||
| 			if producingInst != nil { | 			if producingInst != nil { | ||||||
| 				liveInstructions = append(liveInstructions, producingInst) | 				liveInstructions = append(liveInstructions, producingInst) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if v2.Valid() { | 		if v2.Valid() { | ||||||
| 			producingInst := b.valueIDToInstruction[v2.ID()] | 			producingInst := b.InstructionOfValue(v2) | ||||||
| 			if producingInst != nil { | 			if producingInst != nil { | ||||||
| 				liveInstructions = append(liveInstructions, producingInst) | 				liveInstructions = append(liveInstructions, producingInst) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if v3.Valid() { | 		if v3.Valid() { | ||||||
| 			producingInst := b.valueIDToInstruction[v3.ID()] | 			producingInst := b.InstructionOfValue(v3) | ||||||
| 			if producingInst != nil { | 			if producingInst != nil { | ||||||
| 				liveInstructions = append(liveInstructions, producingInst) | 				liveInstructions = append(liveInstructions, producingInst) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		for _, v := range vs { | 		for _, v := range vs { | ||||||
| 			producingInst := b.valueIDToInstruction[v.ID()] | 			producingInst := b.InstructionOfValue(v) | ||||||
| 			if producingInst != nil { | 			if producingInst != nil { | ||||||
| 				liveInstructions = append(liveInstructions, producingInst) | 				liveInstructions = append(liveInstructions, producingInst) | ||||||
| 			} | 			} | ||||||
|  | @ -352,34 +350,19 @@ func (b *builder) incRefCount(id ValueID, from *Instruction) { | ||||||
| 	if wazevoapi.SSALoggingEnabled { | 	if wazevoapi.SSALoggingEnabled { | ||||||
| 		fmt.Printf("v%d referenced from %v\n", id, from.Format(b)) | 		fmt.Printf("v%d referenced from %v\n", id, from.Format(b)) | ||||||
| 	} | 	} | ||||||
| 	b.valueRefCounts[id]++ | 	info := &b.valuesInfo[id] | ||||||
|  | 	info.RefCount++ | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // passNopInstElimination eliminates the instructions which is essentially a no-op. | // passNopInstElimination eliminates the instructions which is essentially a no-op. | ||||||
| func passNopInstElimination(b *builder) { | func passNopInstElimination(b *builder) { | ||||||
| 	if int(b.nextValueID) >= len(b.valueIDToInstruction) { |  | ||||||
| 		b.valueIDToInstruction = append(b.valueIDToInstruction, make([]*Instruction, int(b.nextValueID)-len(b.valueIDToInstruction)+1)...) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() { |  | ||||||
| 		for cur := blk.rootInstr; cur != nil; cur = cur.next { |  | ||||||
| 			r1, rs := cur.Returns() |  | ||||||
| 			if r1.Valid() { |  | ||||||
| 				b.valueIDToInstruction[r1.ID()] = cur |  | ||||||
| 			} |  | ||||||
| 			for _, r := range rs { |  | ||||||
| 				b.valueIDToInstruction[r.ID()] = cur |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() { | 	for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() { | ||||||
| 		for cur := blk.rootInstr; cur != nil; cur = cur.next { | 		for cur := blk.rootInstr; cur != nil; cur = cur.next { | ||||||
| 			switch cur.Opcode() { | 			switch cur.Opcode() { | ||||||
| 			// TODO: add more logics here. | 			// TODO: add more logics here. | ||||||
| 			case OpcodeIshl, OpcodeSshr, OpcodeUshr: | 			case OpcodeIshl, OpcodeSshr, OpcodeUshr: | ||||||
| 				x, amount := cur.Arg2() | 				x, amount := cur.Arg2() | ||||||
| 				definingInst := b.valueIDToInstruction[amount.ID()] | 				definingInst := b.InstructionOfValue(amount) | ||||||
| 				if definingInst == nil { | 				if definingInst == nil { | ||||||
| 					// If there's no defining instruction, that means the amount is coming from the parameter. | 					// If there's no defining instruction, that means the amount is coming from the parameter. | ||||||
| 					continue | 					continue | ||||||
|  |  | ||||||
							
								
								
									
										19
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_blk_layouts.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_blk_layouts.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -33,7 +33,7 @@ func passLayoutBlocks(b *builder) { | ||||||
| 		} | 		} | ||||||
| 		nonSplitBlocks = append(nonSplitBlocks, blk) | 		nonSplitBlocks = append(nonSplitBlocks, blk) | ||||||
| 		if i != len(b.reversePostOrderedBasicBlocks)-1 { | 		if i != len(b.reversePostOrderedBasicBlocks)-1 { | ||||||
| 			_ = maybeInvertBranches(blk, b.reversePostOrderedBasicBlocks[i+1]) | 			_ = maybeInvertBranches(b, blk, b.reversePostOrderedBasicBlocks[i+1]) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -111,7 +111,7 @@ func passLayoutBlocks(b *builder) { | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			fallthroughBranch := blk.currentInstr | 			fallthroughBranch := blk.currentInstr | ||||||
| 			if fallthroughBranch.opcode == OpcodeJump && fallthroughBranch.blk == trampoline { | 			if fallthroughBranch.opcode == OpcodeJump && BasicBlockID(fallthroughBranch.rValue) == trampoline.id { | ||||||
| 				// This can be lowered as fallthrough at the end of the block. | 				// This can be lowered as fallthrough at the end of the block. | ||||||
| 				b.reversePostOrderedBasicBlocks = append(b.reversePostOrderedBasicBlocks, trampoline) | 				b.reversePostOrderedBasicBlocks = append(b.reversePostOrderedBasicBlocks, trampoline) | ||||||
| 				trampoline.visited = 1 // mark as inserted. | 				trampoline.visited = 1 // mark as inserted. | ||||||
|  | @ -157,7 +157,7 @@ func (b *builder) markFallthroughJumps() { | ||||||
| 	for i, blk := range b.reversePostOrderedBasicBlocks { | 	for i, blk := range b.reversePostOrderedBasicBlocks { | ||||||
| 		if i < l { | 		if i < l { | ||||||
| 			cur := blk.currentInstr | 			cur := blk.currentInstr | ||||||
| 			if cur.opcode == OpcodeJump && cur.blk == b.reversePostOrderedBasicBlocks[i+1] { | 			if cur.opcode == OpcodeJump && BasicBlockID(cur.rValue) == b.reversePostOrderedBasicBlocks[i+1].id { | ||||||
| 				cur.AsFallthroughJump() | 				cur.AsFallthroughJump() | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | @ -168,7 +168,7 @@ func (b *builder) markFallthroughJumps() { | ||||||
| // nextInRPO is the next block in the reverse post-order. | // nextInRPO is the next block in the reverse post-order. | ||||||
| // | // | ||||||
| // Returns true if the branch is inverted for testing purpose. | // Returns true if the branch is inverted for testing purpose. | ||||||
| func maybeInvertBranches(now *basicBlock, nextInRPO *basicBlock) bool { | func maybeInvertBranches(b *builder, now *basicBlock, nextInRPO *basicBlock) bool { | ||||||
| 	fallthroughBranch := now.currentInstr | 	fallthroughBranch := now.currentInstr | ||||||
| 	if fallthroughBranch.opcode == OpcodeBrTable { | 	if fallthroughBranch.opcode == OpcodeBrTable { | ||||||
| 		return false | 		return false | ||||||
|  | @ -187,7 +187,8 @@ func maybeInvertBranches(now *basicBlock, nextInRPO *basicBlock) bool { | ||||||
| 	// So this block has two branches (a conditional branch followed by an unconditional branch) at the end. | 	// So this block has two branches (a conditional branch followed by an unconditional branch) at the end. | ||||||
| 	// We can invert the condition of the branch if it makes the fallthrough more likely. | 	// We can invert the condition of the branch if it makes the fallthrough more likely. | ||||||
| 
 | 
 | ||||||
| 	fallthroughTarget, condTarget := fallthroughBranch.blk.(*basicBlock), condBranch.blk.(*basicBlock) | 	fallthroughTarget := b.basicBlock(BasicBlockID(fallthroughBranch.rValue)) | ||||||
|  | 	condTarget := b.basicBlock(BasicBlockID(condBranch.rValue)) | ||||||
| 
 | 
 | ||||||
| 	if fallthroughTarget.loopHeader { | 	if fallthroughTarget.loopHeader { | ||||||
| 		// First, if the tail's target is loopHeader, we don't need to do anything here, | 		// First, if the tail's target is loopHeader, we don't need to do anything here, | ||||||
|  | @ -231,8 +232,8 @@ invert: | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	condBranch.InvertBrx() | 	condBranch.InvertBrx() | ||||||
| 	condBranch.blk = fallthroughTarget | 	condBranch.rValue = Value(fallthroughTarget.ID()) | ||||||
| 	fallthroughBranch.blk = condTarget | 	fallthroughBranch.rValue = Value(condTarget.ID()) | ||||||
| 	if wazevoapi.SSALoggingEnabled { | 	if wazevoapi.SSALoggingEnabled { | ||||||
| 		fmt.Printf("inverting branches at %d->%d and %d->%d\n", | 		fmt.Printf("inverting branches at %d->%d and %d->%d\n", | ||||||
| 			now.ID(), fallthroughTarget.ID(), now.ID(), condTarget.ID()) | 			now.ID(), fallthroughTarget.ID(), now.ID(), condTarget.ID()) | ||||||
|  | @ -275,7 +276,7 @@ func (b *builder) splitCriticalEdge(pred, succ *basicBlock, predInfo *basicBlock | ||||||
| 	// Replace originalBranch with the newBranch. | 	// Replace originalBranch with the newBranch. | ||||||
| 	newBranch := b.AllocateInstruction() | 	newBranch := b.AllocateInstruction() | ||||||
| 	newBranch.opcode = originalBranch.opcode | 	newBranch.opcode = originalBranch.opcode | ||||||
| 	newBranch.blk = trampoline | 	newBranch.rValue = Value(trampoline.ID()) | ||||||
| 	switch originalBranch.opcode { | 	switch originalBranch.opcode { | ||||||
| 	case OpcodeJump: | 	case OpcodeJump: | ||||||
| 	case OpcodeBrz, OpcodeBrnz: | 	case OpcodeBrz, OpcodeBrnz: | ||||||
|  | @ -303,7 +304,7 @@ func (b *builder) splitCriticalEdge(pred, succ *basicBlock, predInfo *basicBlock | ||||||
| 		trampoline.validate(b) | 		trampoline.validate(b) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if len(trampoline.params) > 0 { | 	if len(trampoline.params.View()) > 0 { | ||||||
| 		panic("trampoline should not have params") | 		panic("trampoline should not have params") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										39
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/vs.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										39
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/vs.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -15,17 +15,31 @@ import ( | ||||||
| // | // | ||||||
| // Variable is useful to track the SSA Values of a variable in the source program, and | // Variable is useful to track the SSA Values of a variable in the source program, and | ||||||
| // can be used to find the corresponding latest SSA Value via Builder.FindValue. | // can be used to find the corresponding latest SSA Value via Builder.FindValue. | ||||||
|  | // | ||||||
|  | // Higher 4-bit is used to store Type for this variable. | ||||||
| type Variable uint32 | type Variable uint32 | ||||||
| 
 | 
 | ||||||
| // String implements fmt.Stringer. | // String implements fmt.Stringer. | ||||||
| func (v Variable) String() string { | func (v Variable) String() string { | ||||||
| 	return fmt.Sprintf("var%d", v) | 	return fmt.Sprintf("var%d", v&0x0fffffff) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (v Variable) setType(typ Type) Variable { | ||||||
|  | 	if v >= 1<<28 { | ||||||
|  | 		panic(fmt.Sprintf("Too large variable: %d", v)) | ||||||
|  | 	} | ||||||
|  | 	return Variable(typ)<<28 | v | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (v Variable) getType() Type { | ||||||
|  | 	return Type(v >> 28) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Value represents an SSA value with a type information. The relationship with Variable is 1: N (including 0), | // Value represents an SSA value with a type information. The relationship with Variable is 1: N (including 0), | ||||||
| // that means there might be multiple Variable(s) for a Value. | // that means there might be multiple Variable(s) for a Value. | ||||||
| // | // | ||||||
| // Higher 32-bit is used to store Type for this value. | // 32 to 59-bit is used to store the unique identifier of the Instruction that generates this value if any. | ||||||
|  | // 60 to 63-bit is used to store Type for this value. | ||||||
| type Value uint64 | type Value uint64 | ||||||
| 
 | 
 | ||||||
| // ValueID is the lower 32bit of Value, which is the pure identifier of Value without type info. | // ValueID is the lower 32bit of Value, which is the pure identifier of Value without type info. | ||||||
|  | @ -33,7 +47,7 @@ type ValueID uint32 | ||||||
| 
 | 
 | ||||||
| const ( | const ( | ||||||
| 	valueIDInvalid ValueID = math.MaxUint32 | 	valueIDInvalid ValueID = math.MaxUint32 | ||||||
| 	ValueInvalid   Value   = Value(valueIDInvalid) | 	ValueInvalid           = Value(valueIDInvalid) | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // Format creates a debug string for this Value using the data stored in Builder. | // Format creates a debug string for this Value using the data stored in Builder. | ||||||
|  | @ -54,7 +68,7 @@ func (v Value) formatWithType(b Builder) (ret string) { | ||||||
| 	if wazevoapi.SSALoggingEnabled { // This is useful to check live value analysis bugs. | 	if wazevoapi.SSALoggingEnabled { // This is useful to check live value analysis bugs. | ||||||
| 		if bd := b.(*builder); bd.donePostBlockLayoutPasses { | 		if bd := b.(*builder); bd.donePostBlockLayoutPasses { | ||||||
| 			id := v.ID() | 			id := v.ID() | ||||||
| 			ret += fmt.Sprintf("(ref=%d)", bd.valueRefCounts[id]) | 			ret += fmt.Sprintf("(ref=%d)", bd.valuesInfo[id].RefCount) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	return ret | 	return ret | ||||||
|  | @ -67,7 +81,7 @@ func (v Value) Valid() bool { | ||||||
| 
 | 
 | ||||||
| // Type returns the Type of this value. | // Type returns the Type of this value. | ||||||
| func (v Value) Type() Type { | func (v Value) Type() Type { | ||||||
| 	return Type(v >> 32) | 	return Type(v >> 60) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // ID returns the valueID of this value. | // ID returns the valueID of this value. | ||||||
|  | @ -77,7 +91,20 @@ func (v Value) ID() ValueID { | ||||||
| 
 | 
 | ||||||
| // setType sets a type to this Value and returns the updated Value. | // setType sets a type to this Value and returns the updated Value. | ||||||
| func (v Value) setType(typ Type) Value { | func (v Value) setType(typ Type) Value { | ||||||
| 	return v | Value(typ)<<32 | 	return v | Value(typ)<<60 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // setInstructionID sets an Instruction.id to this Value and returns the updated Value. | ||||||
|  | func (v Value) setInstructionID(id int) Value { | ||||||
|  | 	if id < 0 || uint(id) >= 1<<28 { | ||||||
|  | 		panic(fmt.Sprintf("Too large instruction ID: %d", id)) | ||||||
|  | 	} | ||||||
|  | 	return v | Value(id)<<32 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // instructionID() returns the Instruction.id of this Value. | ||||||
|  | func (v Value) instructionID() int { | ||||||
|  | 	return int(v>>32) & 0x0fffffff | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Values is a slice of Value. Use this instead of []Value to reuse the underlying memory. | // Values is a slice of Value. Use this instead of []Value to reuse the underlying memory. | ||||||
|  |  | ||||||
							
								
								
									
										4
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi/resetmap.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi/resetmap.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -5,9 +5,7 @@ func ResetMap[K comparable, V any](m map[K]V) map[K]V { | ||||||
| 	if m == nil { | 	if m == nil { | ||||||
| 		m = make(map[K]V) | 		m = make(map[K]V) | ||||||
| 	} else { | 	} else { | ||||||
| 		for v := range m { | 		clear(m) | ||||||
| 			delete(m, v) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 	return m | 	return m | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										6
									
								
								vendor/github.com/tetratelabs/wazero/internal/expctxkeys/importresolver.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/tetratelabs/wazero/internal/expctxkeys/importresolver.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,6 @@ | ||||||
|  | package expctxkeys | ||||||
|  | 
 | ||||||
|  | // ImportResolverKey is a context.Context Value key. | ||||||
|  | // Its associated value should be an ImportResolver. | ||||||
|  | // See issue 2294. | ||||||
|  | type ImportResolverKey struct{} | ||||||
							
								
								
									
										23
									
								
								vendor/github.com/tetratelabs/wazero/internal/platform/mremap_other.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/tetratelabs/wazero/internal/platform/mremap_other.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,23 +0,0 @@ | ||||||
| //go:build !(darwin || linux || freebsd) || tinygo |  | ||||||
| 
 |  | ||||||
| package platform |  | ||||||
| 
 |  | ||||||
| func remapCodeSegmentAMD64(code []byte, size int) ([]byte, error) { |  | ||||||
| 	b, err := mmapCodeSegmentAMD64(size) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} |  | ||||||
| 	copy(b, code) |  | ||||||
| 	mustMunmapCodeSegment(code) |  | ||||||
| 	return b, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func remapCodeSegmentARM64(code []byte, size int) ([]byte, error) { |  | ||||||
| 	b, err := mmapCodeSegmentARM64(size) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} |  | ||||||
| 	copy(b, code) |  | ||||||
| 	mustMunmapCodeSegment(code) |  | ||||||
| 	return b, nil |  | ||||||
| } |  | ||||||
							
								
								
									
										21
									
								
								vendor/github.com/tetratelabs/wazero/internal/platform/mremap_unix.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										21
									
								
								vendor/github.com/tetratelabs/wazero/internal/platform/mremap_unix.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,21 +0,0 @@ | ||||||
| //go:build (darwin || linux || freebsd) && !tinygo |  | ||||||
| 
 |  | ||||||
| package platform |  | ||||||
| 
 |  | ||||||
| func remapCodeSegmentAMD64(code []byte, size int) ([]byte, error) { |  | ||||||
| 	return remapCodeSegment(code, size, mmapProtAMD64) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func remapCodeSegmentARM64(code []byte, size int) ([]byte, error) { |  | ||||||
| 	return remapCodeSegment(code, size, mmapProtARM64) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func remapCodeSegment(code []byte, size, prot int) ([]byte, error) { |  | ||||||
| 	b, err := mmapCodeSegment(size, prot) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} |  | ||||||
| 	copy(b, code) |  | ||||||
| 	mustMunmapCodeSegment(code) |  | ||||||
| 	return b, nil |  | ||||||
| } |  | ||||||
							
								
								
									
										36
									
								
								vendor/github.com/tetratelabs/wazero/internal/platform/platform.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								vendor/github.com/tetratelabs/wazero/internal/platform/platform.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -36,28 +36,6 @@ func MmapCodeSegment(size int) ([]byte, error) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // RemapCodeSegment reallocates the memory mapping of an existing code segment |  | ||||||
| // to increase its size. The previous code mapping is unmapped and must not be |  | ||||||
| // reused after the function returns. |  | ||||||
| // |  | ||||||
| // This is similar to mremap(2) on linux, and emulated on platforms which do not |  | ||||||
| // have this syscall. |  | ||||||
| // |  | ||||||
| // See https://man7.org/linux/man-pages/man2/mremap.2.html |  | ||||||
| func RemapCodeSegment(code []byte, size int) ([]byte, error) { |  | ||||||
| 	if size < len(code) { |  | ||||||
| 		panic("BUG: RemapCodeSegment with size less than code") |  | ||||||
| 	} |  | ||||||
| 	if code == nil { |  | ||||||
| 		return MmapCodeSegment(size) |  | ||||||
| 	} |  | ||||||
| 	if runtime.GOARCH == "amd64" { |  | ||||||
| 		return remapCodeSegmentAMD64(code, size) |  | ||||||
| 	} else { |  | ||||||
| 		return remapCodeSegmentARM64(code, size) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // MunmapCodeSegment unmaps the given memory region. | // MunmapCodeSegment unmaps the given memory region. | ||||||
| func MunmapCodeSegment(code []byte) error { | func MunmapCodeSegment(code []byte) error { | ||||||
| 	if len(code) == 0 { | 	if len(code) == 0 { | ||||||
|  | @ -65,17 +43,3 @@ func MunmapCodeSegment(code []byte) error { | ||||||
| 	} | 	} | ||||||
| 	return munmapCodeSegment(code) | 	return munmapCodeSegment(code) | ||||||
| } | } | ||||||
| 
 |  | ||||||
| // mustMunmapCodeSegment panics instead of returning an error to the |  | ||||||
| // application. |  | ||||||
| // |  | ||||||
| // # Why panic? |  | ||||||
| // |  | ||||||
| // It is less disruptive to the application to leak the previous block if it |  | ||||||
| // could be unmapped than to leak the new block and return an error. |  | ||||||
| // Realistically, either scenarios are pretty hard to debug, so we panic. |  | ||||||
| func mustMunmapCodeSegment(code []byte) { |  | ||||||
| 	if err := munmapCodeSegment(code); err != nil { |  | ||||||
| 		panic(err) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  |  | ||||||
							
								
								
									
										3
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/binary/value.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/binary/value.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -54,7 +54,6 @@ func decodeUTF8(r *bytes.Reader, contextFormat string, contextArgs ...interface{ | ||||||
| 		return "", 0, fmt.Errorf("%s is not valid UTF-8", fmt.Sprintf(contextFormat, contextArgs...)) | 		return "", 0, fmt.Errorf("%s is not valid UTF-8", fmt.Sprintf(contextFormat, contextArgs...)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// TODO: use unsafe.String after flooring Go 1.20. | 	ret := unsafe.String(&buf[0], int(size)) | ||||||
| 	ret := *(*string)(unsafe.Pointer(&buf)) |  | ||||||
| 	return ret, size + uint32(sizeOfSize), nil | 	return ret, size + uint32(sizeOfSize), nil | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										9
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/func_validation.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/func_validation.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -451,14 +451,14 @@ func (m *Module) validateFunctionWithMaxStackValues( | ||||||
| 				return fmt.Errorf("read immediate: %w", err) | 				return fmt.Errorf("read immediate: %w", err) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			list := make([]uint32, nl) | 			sts.ls = sts.ls[:0] | ||||||
| 			for i := uint32(0); i < nl; i++ { | 			for i := uint32(0); i < nl; i++ { | ||||||
| 				l, n, err := leb128.DecodeUint32(br) | 				l, n, err := leb128.DecodeUint32(br) | ||||||
| 				if err != nil { | 				if err != nil { | ||||||
| 					return fmt.Errorf("read immediate: %w", err) | 					return fmt.Errorf("read immediate: %w", err) | ||||||
| 				} | 				} | ||||||
| 				num += n | 				num += n | ||||||
| 				list[i] = l | 				sts.ls = append(sts.ls, l) | ||||||
| 			} | 			} | ||||||
| 			ln, n, err := leb128.DecodeUint32(br) | 			ln, n, err := leb128.DecodeUint32(br) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
|  | @ -511,7 +511,7 @@ func (m *Module) validateFunctionWithMaxStackValues( | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			for _, l := range list { | 			for _, l := range sts.ls { | ||||||
| 				if int(l) >= len(controlBlockStack.stack) { | 				if int(l) >= len(controlBlockStack.stack) { | ||||||
| 					return fmt.Errorf("invalid l param given for %s", OpcodeBrTableName) | 					return fmt.Errorf("invalid l param given for %s", OpcodeBrTableName) | ||||||
| 				} | 				} | ||||||
|  | @ -2003,6 +2003,8 @@ var vecSplatValueTypes = [...]ValueType{ | ||||||
| type stacks struct { | type stacks struct { | ||||||
| 	vs valueTypeStack | 	vs valueTypeStack | ||||||
| 	cs controlBlockStack | 	cs controlBlockStack | ||||||
|  | 	// ls is the label slice that is reused for each br_table instruction. | ||||||
|  | 	ls []uint32 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (sts *stacks) reset(functionType *FunctionType) { | func (sts *stacks) reset(functionType *FunctionType) { | ||||||
|  | @ -2012,6 +2014,7 @@ func (sts *stacks) reset(functionType *FunctionType) { | ||||||
| 	sts.vs.maximumStackPointer = 0 | 	sts.vs.maximumStackPointer = 0 | ||||||
| 	sts.cs.stack = sts.cs.stack[:0] | 	sts.cs.stack = sts.cs.stack[:0] | ||||||
| 	sts.cs.stack = append(sts.cs.stack, controlBlock{blockType: functionType}) | 	sts.cs.stack = append(sts.cs.stack, controlBlock{blockType: functionType}) | ||||||
|  | 	sts.ls = sts.ls[:0] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type controlBlockStack struct { | type controlBlockStack struct { | ||||||
|  |  | ||||||
							
								
								
									
										10
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/memory.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/memory.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -52,7 +52,8 @@ type MemoryInstance struct { | ||||||
| 	definition api.MemoryDefinition | 	definition api.MemoryDefinition | ||||||
| 
 | 
 | ||||||
| 	// Mux is used in interpreter mode to prevent overlapping calls to atomic instructions, | 	// Mux is used in interpreter mode to prevent overlapping calls to atomic instructions, | ||||||
| 	// introduced with WebAssembly threads proposal. | 	// introduced with WebAssembly threads proposal, and in compiler mode to make memory modifications | ||||||
|  | 	// within Grow non-racy for the Go race detector. | ||||||
| 	Mux sync.Mutex | 	Mux sync.Mutex | ||||||
| 
 | 
 | ||||||
| 	// waiters implements atomic wait and notify. It is implemented similarly to golang.org/x/sync/semaphore, | 	// waiters implements atomic wait and notify. It is implemented similarly to golang.org/x/sync/semaphore, | ||||||
|  | @ -227,6 +228,11 @@ func MemoryPagesToBytesNum(pages uint32) (bytesNum uint64) { | ||||||
| 
 | 
 | ||||||
| // Grow implements the same method as documented on api.Memory. | // Grow implements the same method as documented on api.Memory. | ||||||
| func (m *MemoryInstance) Grow(delta uint32) (result uint32, ok bool) { | func (m *MemoryInstance) Grow(delta uint32) (result uint32, ok bool) { | ||||||
|  | 	if m.Shared { | ||||||
|  | 		m.Mux.Lock() | ||||||
|  | 		defer m.Mux.Unlock() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	currentPages := m.Pages() | 	currentPages := m.Pages() | ||||||
| 	if delta == 0 { | 	if delta == 0 { | ||||||
| 		return currentPages, true | 		return currentPages, true | ||||||
|  | @ -299,6 +305,7 @@ func PagesToUnitOfBytes(pages uint32) string { | ||||||
| 
 | 
 | ||||||
| // Uses atomic write to update the length of a slice. | // Uses atomic write to update the length of a slice. | ||||||
| func atomicStoreLengthAndCap(slice *[]byte, length uintptr, cap uintptr) { | func atomicStoreLengthAndCap(slice *[]byte, length uintptr, cap uintptr) { | ||||||
|  | 	//nolint:staticcheck | ||||||
| 	slicePtr := (*reflect.SliceHeader)(unsafe.Pointer(slice)) | 	slicePtr := (*reflect.SliceHeader)(unsafe.Pointer(slice)) | ||||||
| 	capPtr := (*uintptr)(unsafe.Pointer(&slicePtr.Cap)) | 	capPtr := (*uintptr)(unsafe.Pointer(&slicePtr.Cap)) | ||||||
| 	atomic.StoreUintptr(capPtr, cap) | 	atomic.StoreUintptr(capPtr, cap) | ||||||
|  | @ -308,6 +315,7 @@ func atomicStoreLengthAndCap(slice *[]byte, length uintptr, cap uintptr) { | ||||||
| 
 | 
 | ||||||
| // Uses atomic write to update the length of a slice. | // Uses atomic write to update the length of a slice. | ||||||
| func atomicStoreLength(slice *[]byte, length uintptr) { | func atomicStoreLength(slice *[]byte, length uintptr) { | ||||||
|  | 	//nolint:staticcheck | ||||||
| 	slicePtr := (*reflect.SliceHeader)(unsafe.Pointer(slice)) | 	slicePtr := (*reflect.SliceHeader)(unsafe.Pointer(slice)) | ||||||
| 	lenPtr := (*uintptr)(unsafe.Pointer(&slicePtr.Len)) | 	lenPtr := (*uintptr)(unsafe.Pointer(&slicePtr.Len)) | ||||||
| 	atomic.StoreUintptr(lenPtr, length) | 	atomic.StoreUintptr(lenPtr, length) | ||||||
|  |  | ||||||
							
								
								
									
										25
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/store.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/store.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -3,6 +3,7 @@ package wasm | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"encoding/binary" | 	"encoding/binary" | ||||||
|  | 	"errors" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"sync" | 	"sync" | ||||||
| 	"sync/atomic" | 	"sync/atomic" | ||||||
|  | @ -352,7 +353,7 @@ func (s *Store) instantiate( | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err = m.resolveImports(module); err != nil { | 	if err = m.resolveImports(ctx, module); err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -410,13 +411,23 @@ func (s *Store) instantiate( | ||||||
| 	return | 	return | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (m *ModuleInstance) resolveImports(module *Module) (err error) { | func (m *ModuleInstance) resolveImports(ctx context.Context, module *Module) (err error) { | ||||||
|  | 	// Check if ctx contains an ImportResolver. | ||||||
|  | 	resolveImport, _ := ctx.Value(expctxkeys.ImportResolverKey{}).(experimental.ImportResolver) | ||||||
|  | 
 | ||||||
| 	for moduleName, imports := range module.ImportPerModule { | 	for moduleName, imports := range module.ImportPerModule { | ||||||
| 		var importedModule *ModuleInstance | 		var importedModule *ModuleInstance | ||||||
|  | 		if resolveImport != nil { | ||||||
|  | 			if v := resolveImport(moduleName); v != nil { | ||||||
|  | 				importedModule = v.(*ModuleInstance) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		if importedModule == nil { | ||||||
| 			importedModule, err = m.s.module(moduleName) | 			importedModule, err = m.s.module(moduleName) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return err | 				return err | ||||||
| 			} | 			} | ||||||
|  | 		} | ||||||
| 
 | 
 | ||||||
| 		for _, i := range imports { | 		for _, i := range imports { | ||||||
| 			var imported *Export | 			var imported *Export | ||||||
|  | @ -649,20 +660,20 @@ func (s *Store) GetFunctionTypeID(t *FunctionType) (FunctionTypeID, error) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // CloseWithExitCode implements the same method as documented on wazero.Runtime. | // CloseWithExitCode implements the same method as documented on wazero.Runtime. | ||||||
| func (s *Store) CloseWithExitCode(ctx context.Context, exitCode uint32) (err error) { | func (s *Store) CloseWithExitCode(ctx context.Context, exitCode uint32) error { | ||||||
| 	s.mux.Lock() | 	s.mux.Lock() | ||||||
| 	defer s.mux.Unlock() | 	defer s.mux.Unlock() | ||||||
| 	// Close modules in reverse initialization order. | 	// Close modules in reverse initialization order. | ||||||
|  | 	var errs []error | ||||||
| 	for m := s.moduleList; m != nil; m = m.next { | 	for m := s.moduleList; m != nil; m = m.next { | ||||||
| 		// If closing this module errs, proceed anyway to close the others. | 		// If closing this module errs, proceed anyway to close the others. | ||||||
| 		if e := m.closeWithExitCode(ctx, exitCode); e != nil && err == nil { | 		if err := m.closeWithExitCode(ctx, exitCode); err != nil { | ||||||
| 			// TODO: use multiple errors handling in Go 1.20. | 			errs = append(errs, err) | ||||||
| 			err = e // first error |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	s.moduleList = nil | 	s.moduleList = nil | ||||||
| 	s.nameToModule = nil | 	s.nameToModule = nil | ||||||
| 	s.nameToModuleCap = 0 | 	s.nameToModuleCap = 0 | ||||||
| 	s.typeIDs = nil | 	s.typeIDs = nil | ||||||
| 	return | 	return errors.Join(errs...) | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										4
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/store_module_list.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasm/store_module_list.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -3,8 +3,6 @@ package wasm | ||||||
| import ( | import ( | ||||||
| 	"errors" | 	"errors" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 
 |  | ||||||
| 	"github.com/tetratelabs/wazero/api" |  | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // deleteModule makes the moduleName available for instantiation again. | // deleteModule makes the moduleName available for instantiation again. | ||||||
|  | @ -88,7 +86,7 @@ func (s *Store) registerModule(m *ModuleInstance) error { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Module implements wazero.Runtime Module | // Module implements wazero.Runtime Module | ||||||
| func (s *Store) Module(moduleName string) api.Module { | func (s *Store) Module(moduleName string) *ModuleInstance { | ||||||
| 	m, err := s.module(moduleName) | 	m, err := s.module(moduleName) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil | 		return nil | ||||||
|  |  | ||||||
							
								
								
									
										1
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasmdebug/dwarf.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/tetratelabs/wazero/internal/wasmdebug/dwarf.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -171,7 +171,6 @@ entry: | ||||||
| 	// Advance the line reader for the found position. | 	// Advance the line reader for the found position. | ||||||
| 	lineReader.Seek(ln.pos) | 	lineReader.Seek(ln.pos) | ||||||
| 	err = lineReader.Next(&le) | 	err = lineReader.Next(&le) | ||||||
| 
 |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		// If we reach this block, that means there's a bug in the []line creation logic above. | 		// If we reach this block, that means there's a bug in the []line creation logic above. | ||||||
| 		panic("BUG: stored dwarf.LineReaderPos is invalid") | 		panic("BUG: stored dwarf.LineReaderPos is invalid") | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/runtime.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/tetratelabs/wazero/runtime.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -197,7 +197,13 @@ func (r *runtime) Module(moduleName string) api.Module { | ||||||
| 	if len(moduleName) == 0 { | 	if len(moduleName) == 0 { | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 	return r.store.Module(moduleName) | 	m := r.store.Module(moduleName) | ||||||
|  | 	if m == nil { | ||||||
|  | 		return nil | ||||||
|  | 	} else if m.Source.IsHostModule { | ||||||
|  | 		return hostModuleInstance{m} | ||||||
|  | 	} | ||||||
|  | 	return m | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // CompileModule implements Runtime.CompileModule | // CompileModule implements Runtime.CompileModule | ||||||
|  |  | ||||||
							
								
								
									
										3
									
								
								vendor/github.com/tetratelabs/wazero/sys/stat_unsupported.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/tetratelabs/wazero/sys/stat_unsupported.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -7,9 +7,6 @@ import "io/fs" | ||||||
| // sysParseable is only used here as we define "supported" as being able to | // sysParseable is only used here as we define "supported" as being able to | ||||||
| // parse `info.Sys()`. The above `go:build` constraints exclude 32-bit until | // parse `info.Sys()`. The above `go:build` constraints exclude 32-bit until | ||||||
| // that's requested. | // that's requested. | ||||||
| // |  | ||||||
| // TODO: When Go 1.21 is out, use the "unix" build constraint (as 1.21 makes |  | ||||||
| // our floor Go version 1.19. |  | ||||||
| const sysParseable = false | const sysParseable = false | ||||||
| 
 | 
 | ||||||
| func statFromFileInfo(info fs.FileInfo) Stat_t { | func statFromFileInfo(info fs.FileInfo) Stat_t { | ||||||
|  |  | ||||||
							
								
								
									
										6
									
								
								vendor/modules.txt
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/modules.txt
									
										
									
									
										vendored
									
									
								
							|  | @ -30,7 +30,7 @@ codeberg.org/gruf/go-fastcopy | ||||||
| # codeberg.org/gruf/go-fastpath/v2 v2.0.0 | # codeberg.org/gruf/go-fastpath/v2 v2.0.0 | ||||||
| ## explicit; go 1.14 | ## explicit; go 1.14 | ||||||
| codeberg.org/gruf/go-fastpath/v2 | codeberg.org/gruf/go-fastpath/v2 | ||||||
| # codeberg.org/gruf/go-ffmpreg v0.2.4 | # codeberg.org/gruf/go-ffmpreg v0.2.5 | ||||||
| ## explicit; go 1.22.0 | ## explicit; go 1.22.0 | ||||||
| codeberg.org/gruf/go-ffmpreg/embed/ffmpeg | codeberg.org/gruf/go-ffmpreg/embed/ffmpeg | ||||||
| codeberg.org/gruf/go-ffmpreg/embed/ffprobe | codeberg.org/gruf/go-ffmpreg/embed/ffprobe | ||||||
|  | @ -842,8 +842,8 @@ github.com/tdewolff/parse/v2/strconv | ||||||
| # github.com/technologize/otel-go-contrib v1.1.1 | # github.com/technologize/otel-go-contrib v1.1.1 | ||||||
| ## explicit; go 1.17 | ## explicit; go 1.17 | ||||||
| github.com/technologize/otel-go-contrib/otelginmetrics | github.com/technologize/otel-go-contrib/otelginmetrics | ||||||
| # github.com/tetratelabs/wazero v1.7.3 | # github.com/tetratelabs/wazero v1.8.0 | ||||||
| ## explicit; go 1.20 | ## explicit; go 1.21 | ||||||
| github.com/tetratelabs/wazero | github.com/tetratelabs/wazero | ||||||
| github.com/tetratelabs/wazero/api | github.com/tetratelabs/wazero/api | ||||||
| github.com/tetratelabs/wazero/experimental | github.com/tetratelabs/wazero/experimental | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue