mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 14:42:26 -05:00 
			
		
		
		
	[chore]: Bump github.com/KimMachineGun/automemlimit from 0.2.4 to 0.2.5 (#1666)
Bumps [github.com/KimMachineGun/automemlimit](https://github.com/KimMachineGun/automemlimit) from 0.2.4 to 0.2.5. - [Release notes](https://github.com/KimMachineGun/automemlimit/releases) - [Commits](https://github.com/KimMachineGun/automemlimit/compare/v0.2.4...v0.2.5) --- updated-dependencies: - dependency-name: github.com/KimMachineGun/automemlimit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
		
					parent
					
						
							
								3f9b2336c0
							
						
					
				
			
			
				commit
				
					
						57dc742c76
					
				
			
		
					 200 changed files with 16392 additions and 38190 deletions
				
			
		
							
								
								
									
										7
									
								
								go.mod
									
										
									
									
									
								
							
							
						
						
									
										7
									
								
								go.mod
									
										
									
									
									
								
							|  | @ -15,7 +15,7 @@ require ( | |||
| 	codeberg.org/gruf/go-runners v1.6.1 | ||||
| 	codeberg.org/gruf/go-sched v1.2.3 | ||||
| 	codeberg.org/gruf/go-store/v2 v2.2.2 | ||||
| 	github.com/KimMachineGun/automemlimit v0.2.4 | ||||
| 	github.com/KimMachineGun/automemlimit v0.2.5 | ||||
| 	github.com/abema/go-mp4 v0.10.1 | ||||
| 	github.com/buckket/go-blurhash v1.1.0 | ||||
| 	github.com/coreos/go-oidc/v3 v3.5.0 | ||||
|  | @ -79,8 +79,8 @@ require ( | |||
| 	github.com/aymerick/douceur v0.2.0 // indirect | ||||
| 	github.com/bytedance/sonic v1.8.0 // indirect | ||||
| 	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect | ||||
| 	github.com/cilium/ebpf v0.4.0 // indirect | ||||
| 	github.com/containerd/cgroups v1.0.4 // indirect | ||||
| 	github.com/cilium/ebpf v0.9.1 // indirect | ||||
| 	github.com/containerd/cgroups/v3 v3.0.1 // indirect | ||||
| 	github.com/coreos/go-systemd/v22 v22.3.2 // indirect | ||||
| 	github.com/davecgh/go-spew v1.1.1 // indirect | ||||
| 	github.com/docker/go-units v0.4.0 // indirect | ||||
|  | @ -100,7 +100,6 @@ require ( | |||
| 	github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 // indirect | ||||
| 	github.com/goccy/go-json v0.10.0 // indirect | ||||
| 	github.com/godbus/dbus/v5 v5.0.4 // indirect | ||||
| 	github.com/gogo/protobuf v1.3.2 // indirect | ||||
| 	github.com/golang-jwt/jwt v3.2.2+incompatible // indirect | ||||
| 	github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect | ||||
| 	github.com/golang/protobuf v1.5.2 // indirect | ||||
|  |  | |||
							
								
								
									
										19
									
								
								go.sum
									
										
									
									
									
								
							
							
						
						
									
										19
									
								
								go.sum
									
										
									
									
									
								
							|  | @ -88,8 +88,8 @@ codeberg.org/gruf/go-store/v2 v2.2.2/go.mod h1:QRM3LUAfYyoGMWLTqA1WzohxQgYqPFiVv | |||
| dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= | ||||
| github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= | ||||
| github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= | ||||
| github.com/KimMachineGun/automemlimit v0.2.4 h1:GBty8TK8k0aJer1Pq5/3Vdt2ef+YpLhcqNo+PSD5CoI= | ||||
| github.com/KimMachineGun/automemlimit v0.2.4/go.mod h1:38QAnnnNhnFuAIW3+aPlaVUHqzE9buJYZK3m/jsra8E= | ||||
| github.com/KimMachineGun/automemlimit v0.2.5 h1:+wWu5hu/dVqWWauY2rZmlGN+z7SnP02gMVQpcP7xvrk= | ||||
| github.com/KimMachineGun/automemlimit v0.2.5/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0= | ||||
| github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= | ||||
| github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= | ||||
| github.com/abema/go-mp4 v0.10.1 h1:wOhZgNxjduc8r4FJdwPa5x/gdBSSX+8MTnfNj/xkJaE= | ||||
|  | @ -113,8 +113,8 @@ github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583j | |||
| github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= | ||||
| github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= | ||||
| github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= | ||||
| github.com/cilium/ebpf v0.4.0 h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA= | ||||
| github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= | ||||
| github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= | ||||
| github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= | ||||
| github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= | ||||
| github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= | ||||
| github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= | ||||
|  | @ -122,8 +122,8 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht | |||
| github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ= | ||||
| github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= | ||||
| github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= | ||||
| github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= | ||||
| github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= | ||||
| github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE= | ||||
| github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw= | ||||
| github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw= | ||||
| github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM= | ||||
| github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= | ||||
|  | @ -172,7 +172,6 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 | |||
| github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= | ||||
| github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= | ||||
| github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= | ||||
| github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= | ||||
| github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= | ||||
| github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | ||||
| github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= | ||||
|  | @ -231,8 +230,6 @@ github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= | |||
| github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= | ||||
| github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= | ||||
| github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= | ||||
| github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= | ||||
| github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= | ||||
| github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= | ||||
| github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= | ||||
| github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= | ||||
|  | @ -396,7 +393,6 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV | |||
| github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= | ||||
| github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= | ||||
| github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= | ||||
| github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= | ||||
| github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= | ||||
| github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||
| github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||
|  | @ -823,7 +819,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w | |||
| golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
|  | @ -904,7 +899,6 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY | |||
| golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||
| golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||
| golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||
| golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||
| golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= | ||||
| golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= | ||||
| golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= | ||||
|  | @ -913,7 +907,6 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f | |||
| golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= | ||||
| golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= | ||||
|  |  | |||
							
								
								
									
										15
									
								
								vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -4,8 +4,9 @@ | |||
| package memlimit | ||||
| 
 | ||||
| import ( | ||||
| 	"github.com/containerd/cgroups" | ||||
| 	v2 "github.com/containerd/cgroups/v2" | ||||
| 	"github.com/containerd/cgroups/v3" | ||||
| 	"github.com/containerd/cgroups/v3/cgroup1" | ||||
| 	"github.com/containerd/cgroups/v3/cgroup2" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
|  | @ -25,12 +26,14 @@ func FromCgroup() (uint64, error) { | |||
| 
 | ||||
| // FromCgroupV1 returns the memory limit from the cgroup v1. | ||||
| func FromCgroupV1() (uint64, error) { | ||||
| 	cg, err := cgroups.Load(cgroups.SingleSubsystem(cgroups.V1, cgroups.Memory), cgroups.RootPath) | ||||
| 	cg, err := cgroup1.Load(cgroup1.RootPath, cgroup1.WithHiearchy( | ||||
| 		cgroup1.SingleSubsystem(cgroup1.Default, cgroup1.Memory), | ||||
| 	)) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	metrics, err := cg.Stat(cgroups.IgnoreNotExist) | ||||
| 	metrics, err := cg.Stat(cgroup1.IgnoreNotExist) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} else if metrics.Memory == nil { | ||||
|  | @ -42,12 +45,12 @@ func FromCgroupV1() (uint64, error) { | |||
| 
 | ||||
| // FromCgroupV2 returns the memory limit from the cgroup v2. | ||||
| func FromCgroupV2() (uint64, error) { | ||||
| 	path, err := v2.NestedGroupPath("") | ||||
| 	path, err := cgroup2.NestedGroupPath("") | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	m, err := v2.LoadManager(cgroupMountPoint, path) | ||||
| 	m, err := cgroup2.Load(path, cgroup2.WithMountpoint(cgroupMountPoint)) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  |  | |||
							
								
								
									
										10
									
								
								vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -36,6 +36,15 @@ var ( | |||
| // If AUTOMEMLIMIT is not set, it defaults to 0.9. (10% is the headroom for memory sources the Go runtime is unaware of.) | ||||
| // If GOMEMLIMIT is already set or AUTOMEMLIMIT=off, this function does nothing. | ||||
| func SetGoMemLimitWithEnv() { | ||||
| 	snapshot := debug.SetMemoryLimit(-1) | ||||
| 	defer func() { | ||||
| 		err := recover() | ||||
| 		if err != nil { | ||||
| 			logger.Printf("panic during SetGoMemLimitWithEnv, rolling back to previous value %d: %v\n", snapshot, err) | ||||
| 			debug.SetMemoryLimit(snapshot) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	if os.Getenv(envAUTOMEMLIMIT_DEBUG) == "true" { | ||||
| 		logger = log.Default() | ||||
| 	} | ||||
|  | @ -97,6 +106,7 @@ func cappedFloat2Int(f float64) int64 { | |||
| 	} | ||||
| 	return int64(f) | ||||
| } | ||||
| 
 | ||||
| // Limit is a helper Provider function that returns the given limit. | ||||
| func Limit(limit uint64) func() (uint64, error) { | ||||
| 	return func() (uint64, error) { | ||||
|  |  | |||
							
								
								
									
										1
									
								
								vendor/github.com/cilium/ebpf/.gitignore
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/cilium/ebpf/.gitignore
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -5,6 +5,7 @@ | |||
| *.so | ||||
| *.dylib | ||||
| *.o | ||||
| !*_bpf*.o | ||||
| 
 | ||||
| # Test binary, build with `go test -c` | ||||
| *.test | ||||
|  |  | |||
							
								
								
									
										28
									
								
								vendor/github.com/cilium/ebpf/.golangci.yaml
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/cilium/ebpf/.golangci.yaml
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,28 @@ | |||
| --- | ||||
| issues: | ||||
|   exclude-rules: | ||||
|     # syscall param structs will have unused fields in Go code. | ||||
|     - path: syscall.*.go | ||||
|       linters: | ||||
|         - structcheck | ||||
| 
 | ||||
| linters: | ||||
|   disable-all: true | ||||
|   enable: | ||||
|     - deadcode | ||||
|     - errcheck | ||||
|     - goimports | ||||
|     - gosimple | ||||
|     - govet | ||||
|     - ineffassign | ||||
|     - misspell | ||||
|     - staticcheck | ||||
|     - structcheck | ||||
|     - typecheck | ||||
|     - unused | ||||
|     - varcheck | ||||
| 
 | ||||
|     # Could be enabled later: | ||||
|     # - gocyclo | ||||
|     # - maligned | ||||
|     # - gosec | ||||
							
								
								
									
										10
									
								
								vendor/github.com/cilium/ebpf/ARCHITECTURE.md
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/cilium/ebpf/ARCHITECTURE.md
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -57,7 +57,7 @@ Objects | |||
| loading a spec will fail because the kernel is too old, or a feature is not | ||||
| enabled. There are multiple ways the library deals with that: | ||||
| 
 | ||||
| * Fallback: older kernels don't allowing naming programs and maps. The library | ||||
| * Fallback: older kernels don't allow naming programs and maps. The library | ||||
|   automatically detects support for names, and omits them during load if | ||||
|   necessary. This works since name is primarily a debug aid. | ||||
| 
 | ||||
|  | @ -68,7 +68,7 @@ enabled. There are multiple ways the library deals with that: | |||
| Once program and map objects are loaded they expose the kernel's low-level API, | ||||
| e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer | ||||
| wrappers on top of the low-level API, like `MapIterator`. The low-level API is | ||||
| useful as an out when our higher-level API doesn't support a particular use case. | ||||
| useful when our higher-level API doesn't support a particular use case. | ||||
| 
 | ||||
| Links | ||||
| --- | ||||
|  | @ -78,3 +78,9 @@ tend to use bpf_link to do so. Older hooks unfortunately use a combination of | |||
| syscalls, netlink messages, etc. Adding support for a new link type should not | ||||
| pull in large dependencies like netlink, so XDP programs or tracepoints are | ||||
| out of scope. | ||||
| 
 | ||||
| Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds | ||||
| to BPF_LINK_TRACING. In general, these types should be unexported as long as they | ||||
| don't export methods outside of the Link interface. Each Go type may have multiple | ||||
| exported constructors. For example `AttachTracing` and `AttachLSM` create a | ||||
| tracing link, but are distinct functions since they may require different arguments. | ||||
|  |  | |||
							
								
								
									
										25
									
								
								vendor/github.com/cilium/ebpf/CONTRIBUTING.md
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/cilium/ebpf/CONTRIBUTING.md
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -6,8 +6,8 @@ are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get | |||
| a better understanding for the high-level goals. | ||||
| 
 | ||||
| New features must be accompanied by tests. Before starting work on any large | ||||
| feature, please [join](https://cilium.herokuapp.com/) the | ||||
| [#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack to | ||||
| feature, please [join](https://ebpf.io/slack) the | ||||
| [#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to | ||||
| discuss the design first. | ||||
| 
 | ||||
| When submitting pull requests, consider writing details about what problem you | ||||
|  | @ -18,6 +18,23 @@ reason about the proposed changes. | |||
| ## Running the tests | ||||
| 
 | ||||
| Many of the tests require privileges to set resource limits and load eBPF code. | ||||
| The easiest way to obtain these is to run the tests with `sudo`: | ||||
| The easiest way to obtain these is to run the tests with `sudo`. | ||||
| 
 | ||||
| To test the current package with your local kernel you can simply run: | ||||
| ``` | ||||
| go test -exec sudo  ./... | ||||
| ``` | ||||
| 
 | ||||
| To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script. | ||||
| It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed. | ||||
| 
 | ||||
| Examples: | ||||
| 
 | ||||
| ```bash | ||||
| # Run all tests on a 5.4 kernel | ||||
| ./run-tests.sh 5.4 | ||||
| 
 | ||||
| # Run a subset of tests: | ||||
| ./run-tests.sh 5.4 go test ./link | ||||
| ``` | ||||
| 
 | ||||
|     sudo go test ./... | ||||
							
								
								
									
										8
									
								
								vendor/github.com/cilium/ebpf/MAINTAINERS.md
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/cilium/ebpf/MAINTAINERS.md
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,8 @@ | |||
| # Maintainers | ||||
| 
 | ||||
|  * [Lorenz Bauer]  | ||||
|  * [Timo Beckers] (Isovalent) | ||||
| 
 | ||||
| 
 | ||||
| [Lorenz Bauer]: https://github.com/lmb | ||||
| [Timo Beckers]: https://github.com/ti-mo | ||||
							
								
								
									
										93
									
								
								vendor/github.com/cilium/ebpf/Makefile
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										93
									
								
								vendor/github.com/cilium/ebpf/Makefile
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,67 +1,110 @@ | |||
| # The development version of clang is distributed as the 'clang' binary,
 | ||||
| # while stable/released versions have a version number attached.
 | ||||
| # Pin the default clang to a stable version.
 | ||||
| CLANG ?= clang-11 | ||||
| CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS) | ||||
| CLANG ?= clang-14 | ||||
| STRIP ?= llvm-strip-14 | ||||
| OBJCOPY ?= llvm-objcopy-14 | ||||
| CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) | ||||
| 
 | ||||
| CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ | ||||
| 
 | ||||
| # Obtain an absolute path to the directory of the Makefile.
 | ||||
| # Assume the Makefile is in the root of the repository.
 | ||||
| REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) | ||||
| UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) | ||||
| 
 | ||||
| # Prefer podman if installed, otherwise use docker.
 | ||||
| # Note: Setting the var at runtime will always override.
 | ||||
| CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) | ||||
| CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}") | ||||
| 
 | ||||
| IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) | ||||
| VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) | ||||
| 
 | ||||
| 
 | ||||
| # clang <8 doesn't tag relocs properly (STT_NOTYPE)
 | ||||
| # clang 9 is the first version emitting BTF
 | ||||
| TARGETS := \
 | ||||
| 	testdata/loader-clang-7 \
 | ||||
| 	testdata/loader-clang-9 \
 | ||||
| 	testdata/loader-clang-11 \
 | ||||
| 	testdata/loader-$(CLANG) \
 | ||||
| 	testdata/btf_map_init \
 | ||||
| 	testdata/invalid_map \
 | ||||
| 	testdata/raw_tracepoint \
 | ||||
| 	testdata/invalid_map_static \
 | ||||
| 	testdata/initialized_btf_map \
 | ||||
| 	testdata/invalid_btf_map_init \
 | ||||
| 	testdata/strings \
 | ||||
| 	internal/btf/testdata/relocs | ||||
| 	testdata/freplace \
 | ||||
| 	testdata/iproute2_map_compat \
 | ||||
| 	testdata/map_spin_lock \
 | ||||
| 	testdata/subprog_reloc \
 | ||||
| 	testdata/fwd_decl \
 | ||||
| 	btf/testdata/relocs \
 | ||||
| 	btf/testdata/relocs_read \
 | ||||
| 	btf/testdata/relocs_read_tgt | ||||
| 
 | ||||
| .PHONY: all clean docker-all docker-shell | ||||
| .PHONY: all clean container-all container-shell generate | ||||
| 
 | ||||
| .DEFAULT_TARGET = docker-all | ||||
| .DEFAULT_TARGET = container-all | ||||
| 
 | ||||
| # Build all ELF binaries using a Dockerized LLVM toolchain.
 | ||||
| docker-all: | ||||
| 	docker run --rm --user "${UIDGID}" \
 | ||||
| # Build all ELF binaries using a containerized LLVM toolchain.
 | ||||
| container-all: | ||||
| 	${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \
 | ||||
| 		-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
 | ||||
| 		--env CFLAGS="-fdebug-prefix-map=/ebpf=." \
 | ||||
| 		--env HOME="/tmp" \
 | ||||
| 		"${IMAGE}:${VERSION}" \
 | ||||
| 		make all | ||||
| 		$(MAKE) all | ||||
| 
 | ||||
| # (debug) Drop the user into a shell inside the Docker container as root.
 | ||||
| docker-shell: | ||||
| 	docker run --rm -ti \
 | ||||
| # (debug) Drop the user into a shell inside the container as root.
 | ||||
| container-shell: | ||||
| 	${CONTAINER_ENGINE} run --rm -ti \
 | ||||
| 		-v "${REPODIR}":/ebpf -w /ebpf \
 | ||||
| 		"${IMAGE}:${VERSION}" | ||||
| 
 | ||||
| clean: | ||||
| 	-$(RM) testdata/*.elf | ||||
| 	-$(RM) internal/btf/testdata/*.elf | ||||
| 	-$(RM) btf/testdata/*.elf | ||||
| 
 | ||||
| all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) | ||||
| format: | ||||
| 	find . -type f -name "*.c" | xargs clang-format -i | ||||
| 
 | ||||
| all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate | ||||
| 	ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf | ||||
| 	ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf | ||||
| 
 | ||||
| # $BPF_CLANG is used in go:generate invocations.
 | ||||
| generate: export BPF_CLANG := $(CLANG) | ||||
| generate: export BPF_CFLAGS := $(CFLAGS) | ||||
| generate: | ||||
| 	go generate ./cmd/bpf2go/test | ||||
| 	go generate ./internal/sys | ||||
| 	cd examples/ && go generate ./... | ||||
| 
 | ||||
| testdata/loader-%-el.elf: testdata/loader.c | ||||
| 	$* $(CFLAGS) -mlittle-endian -c $< -o $@ | ||||
| 	$* $(CFLAGS) -target bpfel -c $< -o $@ | ||||
| 	$(STRIP) -g $@ | ||||
| 
 | ||||
| testdata/loader-%-eb.elf: testdata/loader.c | ||||
| 	$* $(CFLAGS) -mbig-endian -c $< -o $@ | ||||
| 	$* $(CFLAGS) -target bpfeb -c $< -o $@ | ||||
| 	$(STRIP) -g $@ | ||||
| 
 | ||||
| %-el.elf: %.c | ||||
| 	$(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@ | ||||
| 	$(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ | ||||
| 	$(STRIP) -g $@ | ||||
| 
 | ||||
| %-eb.elf : %.c | ||||
| 	$(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@ | ||||
| 	$(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ | ||||
| 	$(STRIP) -g $@ | ||||
| 
 | ||||
| # Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf
 | ||||
| .PHONY: vmlinux-btf | ||||
| vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz | ||||
| internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX) | ||||
| 	objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@" | ||||
| .PHONY: generate-btf | ||||
| generate-btf: KERNEL_VERSION?=5.18 | ||||
| generate-btf: | ||||
| 	$(eval TMP := $(shell mktemp -d)) | ||||
| 	curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION).bz" -o "$(TMP)/bzImage" | ||||
| 	./testdata/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux" | ||||
| 	$(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz" | ||||
| 	curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-selftests-bpf.tgz" -o "$(TMP)/selftests.tgz" | ||||
| 	tar -xf "$(TMP)/selftests.tgz" --to-stdout tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.ko | \
 | ||||
| 		$(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" - /dev/null | ||||
| 	$(RM) -r "$(TMP)" | ||||
|  |  | |||
							
								
								
									
										71
									
								
								vendor/github.com/cilium/ebpf/README.md
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										71
									
								
								vendor/github.com/cilium/ebpf/README.md
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -2,44 +2,60 @@ | |||
| 
 | ||||
| [](https://pkg.go.dev/github.com/cilium/ebpf) | ||||
| 
 | ||||
|  | ||||
| 
 | ||||
| eBPF is a pure Go library that provides utilities for loading, compiling, and | ||||
| debugging eBPF programs. It has minimal external dependencies and is intended to | ||||
| be used in long running processes. | ||||
| 
 | ||||
| The library is maintained by [Cloudflare](https://www.cloudflare.com) and | ||||
| [Cilium](https://www.cilium.io). | ||||
| 
 | ||||
| See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem. | ||||
| 
 | ||||
| ## Getting Started | ||||
| 
 | ||||
| A small collection of Go and eBPF programs that serve as examples for building | ||||
| your own tools can be found under [examples/](examples/). | ||||
| 
 | ||||
| Contributions are highly encouraged, as they highlight certain use cases of | ||||
| eBPF and the library, and help shape the future of the project. | ||||
| 
 | ||||
| ## Getting Help | ||||
| 
 | ||||
| Please | ||||
| [join](https://ebpf.io/slack) the | ||||
| [#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you | ||||
| have questions regarding the library. | ||||
| 
 | ||||
| ## Packages | ||||
| 
 | ||||
| This library includes the following packages:  | ||||
| 
 | ||||
| * [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic | ||||
|   assembler | ||||
|   assembler, allowing you to write eBPF assembly instructions directly | ||||
|   within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) | ||||
| * [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows | ||||
|   compiling and embedding eBPF programs written in C within Go code. As well as | ||||
|   compiling the C code, it auto-generates Go code for loading and manipulating | ||||
|   the eBPF program and map objects.  | ||||
| * [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF | ||||
|   to various hooks | ||||
| * [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a | ||||
|   `PERF_EVENT_ARRAY` | ||||
| * [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows | ||||
|   embedding eBPF in Go | ||||
| 
 | ||||
| The library is maintained by [Cloudflare](https://www.cloudflare.com) and | ||||
| [Cilium](https://www.cilium.io). Feel free to | ||||
| [join](https://cilium.herokuapp.com/) the | ||||
| [#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack. | ||||
| 
 | ||||
| ## Current status | ||||
| 
 | ||||
| The package is production ready, but **the API is explicitly unstable right | ||||
| now**. Expect to update your code if you want to follow along. | ||||
| * [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a | ||||
|   `BPF_MAP_TYPE_RINGBUF` map | ||||
| * [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent | ||||
|   of `bpftool feature probe` for discovering BPF-related kernel features using native Go. | ||||
| * [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift | ||||
|   the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. | ||||
| 
 | ||||
| ## Requirements | ||||
| 
 | ||||
| * A version of Go that is [supported by | ||||
|   upstream](https://golang.org/doc/devel/release.html#policy) | ||||
| * Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested) | ||||
| 
 | ||||
| ## Useful resources | ||||
| 
 | ||||
| * [eBPF.io](https://ebpf.io) (recommended) | ||||
| * [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide) | ||||
|   (recommended) | ||||
| * [Linux documentation on | ||||
|   BPF](https://www.kernel.org/doc/html/latest/networking/filter.html) | ||||
| * [eBPF features by Linux | ||||
|   version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md) | ||||
| * Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is | ||||
|   not tested against. | ||||
| 
 | ||||
| ## Regenerating Testdata | ||||
| 
 | ||||
|  | @ -47,8 +63,15 @@ Run `make` in the root of this repository to rebuild testdata in all | |||
| subpackages. This requires Docker, as it relies on a standardized build | ||||
| environment to keep the build output stable. | ||||
| 
 | ||||
| It is possible to regenerate data using Podman by overriding the `CONTAINER_*` | ||||
| variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`. | ||||
| 
 | ||||
| The toolchain image build files are kept in [testdata/docker/](testdata/docker/). | ||||
| 
 | ||||
| ## License | ||||
| 
 | ||||
| MIT | ||||
| 
 | ||||
| ### eBPF Gopher | ||||
| 
 | ||||
| The eBPF honeygopher is based on the Go gopher designed by Renee French. | ||||
|  |  | |||
							
								
								
									
										101
									
								
								vendor/github.com/cilium/ebpf/asm/func.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										101
									
								
								vendor/github.com/cilium/ebpf/asm/func.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -5,9 +5,13 @@ package asm | |||
| // BuiltinFunc is a built-in eBPF function. | ||||
| type BuiltinFunc int32 | ||||
| 
 | ||||
| func (_ BuiltinFunc) Max() BuiltinFunc { | ||||
| 	return maxBuiltinFunc - 1 | ||||
| } | ||||
| 
 | ||||
| // eBPF built-in functions | ||||
| // | ||||
| // You can renegerate this list using the following gawk script: | ||||
| // You can regenerate this list using the following gawk script: | ||||
| // | ||||
| //    /FN\(.+\),/ { | ||||
| //      match($1, /\((.+)\)/, r) | ||||
|  | @ -132,6 +136,101 @@ const ( | |||
| 	FnSkStorageDelete | ||||
| 	FnSendSignal | ||||
| 	FnTcpGenSyncookie | ||||
| 	FnSkbOutput | ||||
| 	FnProbeReadUser | ||||
| 	FnProbeReadKernel | ||||
| 	FnProbeReadUserStr | ||||
| 	FnProbeReadKernelStr | ||||
| 	FnTcpSendAck | ||||
| 	FnSendSignalThread | ||||
| 	FnJiffies64 | ||||
| 	FnReadBranchRecords | ||||
| 	FnGetNsCurrentPidTgid | ||||
| 	FnXdpOutput | ||||
| 	FnGetNetnsCookie | ||||
| 	FnGetCurrentAncestorCgroupId | ||||
| 	FnSkAssign | ||||
| 	FnKtimeGetBootNs | ||||
| 	FnSeqPrintf | ||||
| 	FnSeqWrite | ||||
| 	FnSkCgroupId | ||||
| 	FnSkAncestorCgroupId | ||||
| 	FnRingbufOutput | ||||
| 	FnRingbufReserve | ||||
| 	FnRingbufSubmit | ||||
| 	FnRingbufDiscard | ||||
| 	FnRingbufQuery | ||||
| 	FnCsumLevel | ||||
| 	FnSkcToTcp6Sock | ||||
| 	FnSkcToTcpSock | ||||
| 	FnSkcToTcpTimewaitSock | ||||
| 	FnSkcToTcpRequestSock | ||||
| 	FnSkcToUdp6Sock | ||||
| 	FnGetTaskStack | ||||
| 	FnLoadHdrOpt | ||||
| 	FnStoreHdrOpt | ||||
| 	FnReserveHdrOpt | ||||
| 	FnInodeStorageGet | ||||
| 	FnInodeStorageDelete | ||||
| 	FnDPath | ||||
| 	FnCopyFromUser | ||||
| 	FnSnprintfBtf | ||||
| 	FnSeqPrintfBtf | ||||
| 	FnSkbCgroupClassid | ||||
| 	FnRedirectNeigh | ||||
| 	FnPerCpuPtr | ||||
| 	FnThisCpuPtr | ||||
| 	FnRedirectPeer | ||||
| 	FnTaskStorageGet | ||||
| 	FnTaskStorageDelete | ||||
| 	FnGetCurrentTaskBtf | ||||
| 	FnBprmOptsSet | ||||
| 	FnKtimeGetCoarseNs | ||||
| 	FnImaInodeHash | ||||
| 	FnSockFromFile | ||||
| 	FnCheckMtu | ||||
| 	FnForEachMapElem | ||||
| 	FnSnprintf | ||||
| 	FnSysBpf | ||||
| 	FnBtfFindByNameKind | ||||
| 	FnSysClose | ||||
| 	FnTimerInit | ||||
| 	FnTimerSetCallback | ||||
| 	FnTimerStart | ||||
| 	FnTimerCancel | ||||
| 	FnGetFuncIp | ||||
| 	FnGetAttachCookie | ||||
| 	FnTaskPtRegs | ||||
| 	FnGetBranchSnapshot | ||||
| 	FnTraceVprintk | ||||
| 	FnSkcToUnixSock | ||||
| 	FnKallsymsLookupName | ||||
| 	FnFindVma | ||||
| 	FnLoop | ||||
| 	FnStrncmp | ||||
| 	FnGetFuncArg | ||||
| 	FnGetFuncRet | ||||
| 	FnGetFuncArgCnt | ||||
| 	FnGetRetval | ||||
| 	FnSetRetval | ||||
| 	FnXdpGetBuffLen | ||||
| 	FnXdpLoadBytes | ||||
| 	FnXdpStoreBytes | ||||
| 	FnCopyFromUserTask | ||||
| 	FnSkbSetTstamp | ||||
| 	FnImaFileHash | ||||
| 	FnKptrXchg | ||||
| 	FnMapLookupPercpuElem | ||||
| 	FnSkcToMptcpSock | ||||
| 	FnDynptrFromMem | ||||
| 	FnRingbufReserveDynptr | ||||
| 	FnRingbufSubmitDynptr | ||||
| 	FnRingbufDiscardDynptr | ||||
| 	FnDynptrRead | ||||
| 	FnDynptrWrite | ||||
| 	FnDynptrData | ||||
| 
 | ||||
| 	maxBuiltinFunc | ||||
| ) | ||||
| 
 | ||||
| // Call emits a function call. | ||||
|  |  | |||
							
								
								
									
										98
									
								
								vendor/github.com/cilium/ebpf/asm/func_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										98
									
								
								vendor/github.com/cilium/ebpf/asm/func_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -119,11 +119,105 @@ func _() { | |||
| 	_ = x[FnSkStorageDelete-108] | ||||
| 	_ = x[FnSendSignal-109] | ||||
| 	_ = x[FnTcpGenSyncookie-110] | ||||
| 	_ = x[FnSkbOutput-111] | ||||
| 	_ = x[FnProbeReadUser-112] | ||||
| 	_ = x[FnProbeReadKernel-113] | ||||
| 	_ = x[FnProbeReadUserStr-114] | ||||
| 	_ = x[FnProbeReadKernelStr-115] | ||||
| 	_ = x[FnTcpSendAck-116] | ||||
| 	_ = x[FnSendSignalThread-117] | ||||
| 	_ = x[FnJiffies64-118] | ||||
| 	_ = x[FnReadBranchRecords-119] | ||||
| 	_ = x[FnGetNsCurrentPidTgid-120] | ||||
| 	_ = x[FnXdpOutput-121] | ||||
| 	_ = x[FnGetNetnsCookie-122] | ||||
| 	_ = x[FnGetCurrentAncestorCgroupId-123] | ||||
| 	_ = x[FnSkAssign-124] | ||||
| 	_ = x[FnKtimeGetBootNs-125] | ||||
| 	_ = x[FnSeqPrintf-126] | ||||
| 	_ = x[FnSeqWrite-127] | ||||
| 	_ = x[FnSkCgroupId-128] | ||||
| 	_ = x[FnSkAncestorCgroupId-129] | ||||
| 	_ = x[FnRingbufOutput-130] | ||||
| 	_ = x[FnRingbufReserve-131] | ||||
| 	_ = x[FnRingbufSubmit-132] | ||||
| 	_ = x[FnRingbufDiscard-133] | ||||
| 	_ = x[FnRingbufQuery-134] | ||||
| 	_ = x[FnCsumLevel-135] | ||||
| 	_ = x[FnSkcToTcp6Sock-136] | ||||
| 	_ = x[FnSkcToTcpSock-137] | ||||
| 	_ = x[FnSkcToTcpTimewaitSock-138] | ||||
| 	_ = x[FnSkcToTcpRequestSock-139] | ||||
| 	_ = x[FnSkcToUdp6Sock-140] | ||||
| 	_ = x[FnGetTaskStack-141] | ||||
| 	_ = x[FnLoadHdrOpt-142] | ||||
| 	_ = x[FnStoreHdrOpt-143] | ||||
| 	_ = x[FnReserveHdrOpt-144] | ||||
| 	_ = x[FnInodeStorageGet-145] | ||||
| 	_ = x[FnInodeStorageDelete-146] | ||||
| 	_ = x[FnDPath-147] | ||||
| 	_ = x[FnCopyFromUser-148] | ||||
| 	_ = x[FnSnprintfBtf-149] | ||||
| 	_ = x[FnSeqPrintfBtf-150] | ||||
| 	_ = x[FnSkbCgroupClassid-151] | ||||
| 	_ = x[FnRedirectNeigh-152] | ||||
| 	_ = x[FnPerCpuPtr-153] | ||||
| 	_ = x[FnThisCpuPtr-154] | ||||
| 	_ = x[FnRedirectPeer-155] | ||||
| 	_ = x[FnTaskStorageGet-156] | ||||
| 	_ = x[FnTaskStorageDelete-157] | ||||
| 	_ = x[FnGetCurrentTaskBtf-158] | ||||
| 	_ = x[FnBprmOptsSet-159] | ||||
| 	_ = x[FnKtimeGetCoarseNs-160] | ||||
| 	_ = x[FnImaInodeHash-161] | ||||
| 	_ = x[FnSockFromFile-162] | ||||
| 	_ = x[FnCheckMtu-163] | ||||
| 	_ = x[FnForEachMapElem-164] | ||||
| 	_ = x[FnSnprintf-165] | ||||
| 	_ = x[FnSysBpf-166] | ||||
| 	_ = x[FnBtfFindByNameKind-167] | ||||
| 	_ = x[FnSysClose-168] | ||||
| 	_ = x[FnTimerInit-169] | ||||
| 	_ = x[FnTimerSetCallback-170] | ||||
| 	_ = x[FnTimerStart-171] | ||||
| 	_ = x[FnTimerCancel-172] | ||||
| 	_ = x[FnGetFuncIp-173] | ||||
| 	_ = x[FnGetAttachCookie-174] | ||||
| 	_ = x[FnTaskPtRegs-175] | ||||
| 	_ = x[FnGetBranchSnapshot-176] | ||||
| 	_ = x[FnTraceVprintk-177] | ||||
| 	_ = x[FnSkcToUnixSock-178] | ||||
| 	_ = x[FnKallsymsLookupName-179] | ||||
| 	_ = x[FnFindVma-180] | ||||
| 	_ = x[FnLoop-181] | ||||
| 	_ = x[FnStrncmp-182] | ||||
| 	_ = x[FnGetFuncArg-183] | ||||
| 	_ = x[FnGetFuncRet-184] | ||||
| 	_ = x[FnGetFuncArgCnt-185] | ||||
| 	_ = x[FnGetRetval-186] | ||||
| 	_ = x[FnSetRetval-187] | ||||
| 	_ = x[FnXdpGetBuffLen-188] | ||||
| 	_ = x[FnXdpLoadBytes-189] | ||||
| 	_ = x[FnXdpStoreBytes-190] | ||||
| 	_ = x[FnCopyFromUserTask-191] | ||||
| 	_ = x[FnSkbSetTstamp-192] | ||||
| 	_ = x[FnImaFileHash-193] | ||||
| 	_ = x[FnKptrXchg-194] | ||||
| 	_ = x[FnMapLookupPercpuElem-195] | ||||
| 	_ = x[FnSkcToMptcpSock-196] | ||||
| 	_ = x[FnDynptrFromMem-197] | ||||
| 	_ = x[FnRingbufReserveDynptr-198] | ||||
| 	_ = x[FnRingbufSubmitDynptr-199] | ||||
| 	_ = x[FnRingbufDiscardDynptr-200] | ||||
| 	_ = x[FnDynptrRead-201] | ||||
| 	_ = x[FnDynptrWrite-202] | ||||
| 	_ = x[FnDynptrData-203] | ||||
| 	_ = x[maxBuiltinFunc-204] | ||||
| } | ||||
| 
 | ||||
| const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookie" | ||||
| const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDatamaxBuiltinFunc" | ||||
| 
 | ||||
| var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632} | ||||
| var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3011} | ||||
| 
 | ||||
| func (i BuiltinFunc) String() string { | ||||
| 	if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { | ||||
|  |  | |||
							
								
								
									
										587
									
								
								vendor/github.com/cilium/ebpf/asm/instruction.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										587
									
								
								vendor/github.com/cilium/ebpf/asm/instruction.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -8,8 +8,10 @@ import ( | |||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
|  | @ -19,6 +21,10 @@ const InstructionSize = 8 | |||
| // RawInstructionOffset is an offset in units of raw BPF instructions. | ||||
| type RawInstructionOffset uint64 | ||||
| 
 | ||||
| var ErrUnreferencedSymbol = errors.New("unreferenced symbol") | ||||
| var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") | ||||
| var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") | ||||
| 
 | ||||
| // Bytes returns the offset of an instruction in bytes. | ||||
| func (rio RawInstructionOffset) Bytes() uint64 { | ||||
| 	return uint64(rio) * InstructionSize | ||||
|  | @ -26,50 +32,57 @@ func (rio RawInstructionOffset) Bytes() uint64 { | |||
| 
 | ||||
| // Instruction is a single eBPF instruction. | ||||
| type Instruction struct { | ||||
| 	OpCode    OpCode | ||||
| 	Dst       Register | ||||
| 	Src       Register | ||||
| 	Offset    int16 | ||||
| 	Constant  int64 | ||||
| 	Reference string | ||||
| 	Symbol    string | ||||
| } | ||||
| 	OpCode   OpCode | ||||
| 	Dst      Register | ||||
| 	Src      Register | ||||
| 	Offset   int16 | ||||
| 	Constant int64 | ||||
| 
 | ||||
| // Sym creates a symbol. | ||||
| func (ins Instruction) Sym(name string) Instruction { | ||||
| 	ins.Symbol = name | ||||
| 	return ins | ||||
| 	// Metadata contains optional metadata about this instruction. | ||||
| 	Metadata Metadata | ||||
| } | ||||
| 
 | ||||
| // Unmarshal decodes a BPF instruction. | ||||
| func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { | ||||
| 	var bi bpfInstruction | ||||
| 	err := binary.Read(r, bo, &bi) | ||||
| 	if err != nil { | ||||
| 	data := make([]byte, InstructionSize) | ||||
| 	if _, err := io.ReadFull(r, data); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	ins.OpCode = bi.OpCode | ||||
| 	ins.Offset = bi.Offset | ||||
| 	ins.Constant = int64(bi.Constant) | ||||
| 	ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("can't unmarshal registers: %s", err) | ||||
| 	ins.OpCode = OpCode(data[0]) | ||||
| 
 | ||||
| 	regs := data[1] | ||||
| 	switch bo { | ||||
| 	case binary.LittleEndian: | ||||
| 		ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) | ||||
| 	case binary.BigEndian: | ||||
| 		ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) | ||||
| 	} | ||||
| 
 | ||||
| 	if !bi.OpCode.isDWordLoad() { | ||||
| 	ins.Offset = int16(bo.Uint16(data[2:4])) | ||||
| 	// Convert to int32 before widening to int64 | ||||
| 	// to ensure the signed bit is carried over. | ||||
| 	ins.Constant = int64(int32(bo.Uint32(data[4:8]))) | ||||
| 
 | ||||
| 	if !ins.OpCode.IsDWordLoad() { | ||||
| 		return InstructionSize, nil | ||||
| 	} | ||||
| 
 | ||||
| 	var bi2 bpfInstruction | ||||
| 	if err := binary.Read(r, bo, &bi2); err != nil { | ||||
| 	// Pull another instruction from the stream to retrieve the second | ||||
| 	// half of the 64-bit immediate value. | ||||
| 	if _, err := io.ReadFull(r, data); err != nil { | ||||
| 		// No Wrap, to avoid io.EOF clash | ||||
| 		return 0, errors.New("64bit immediate is missing second half") | ||||
| 	} | ||||
| 	if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 { | ||||
| 
 | ||||
| 	// Require that all fields other than the value are zero. | ||||
| 	if bo.Uint32(data[0:4]) != 0 { | ||||
| 		return 0, errors.New("64bit immediate has non-zero fields") | ||||
| 	} | ||||
| 	ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant))) | ||||
| 
 | ||||
| 	cons1 := uint32(ins.Constant) | ||||
| 	cons2 := int32(bo.Uint32(data[4:8])) | ||||
| 	ins.Constant = int64(cons2)<<32 | int64(cons1) | ||||
| 
 | ||||
| 	return 2 * InstructionSize, nil | ||||
| } | ||||
|  | @ -80,7 +93,7 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) | |||
| 		return 0, errors.New("invalid opcode") | ||||
| 	} | ||||
| 
 | ||||
| 	isDWordLoad := ins.OpCode.isDWordLoad() | ||||
| 	isDWordLoad := ins.OpCode.IsDWordLoad() | ||||
| 
 | ||||
| 	cons := int32(ins.Constant) | ||||
| 	if isDWordLoad { | ||||
|  | @ -93,14 +106,12 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) | |||
| 		return 0, fmt.Errorf("can't marshal registers: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	bpfi := bpfInstruction{ | ||||
| 		ins.OpCode, | ||||
| 		regs, | ||||
| 		ins.Offset, | ||||
| 		cons, | ||||
| 	} | ||||
| 
 | ||||
| 	if err := binary.Write(w, bo, &bpfi); err != nil { | ||||
| 	data := make([]byte, InstructionSize) | ||||
| 	data[0] = byte(ins.OpCode) | ||||
| 	data[1] = byte(regs) | ||||
| 	bo.PutUint16(data[2:4], uint16(ins.Offset)) | ||||
| 	bo.PutUint32(data[4:8], uint32(cons)) | ||||
| 	if _, err := w.Write(data); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
|  | @ -108,45 +119,83 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) | |||
| 		return InstructionSize, nil | ||||
| 	} | ||||
| 
 | ||||
| 	bpfi = bpfInstruction{ | ||||
| 		Constant: int32(ins.Constant >> 32), | ||||
| 	} | ||||
| 
 | ||||
| 	if err := binary.Write(w, bo, &bpfi); err != nil { | ||||
| 	// The first half of the second part of a double-wide instruction | ||||
| 	// must be zero. The second half carries the value. | ||||
| 	bo.PutUint32(data[0:4], 0) | ||||
| 	bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) | ||||
| 	if _, err := w.Write(data); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	return 2 * InstructionSize, nil | ||||
| } | ||||
| 
 | ||||
| // RewriteMapPtr changes an instruction to use a new map fd. | ||||
| // AssociateMap associates a Map with this Instruction. | ||||
| // | ||||
| // Returns an error if the instruction doesn't load a map. | ||||
| func (ins *Instruction) RewriteMapPtr(fd int) error { | ||||
| 	if !ins.OpCode.isDWordLoad() { | ||||
| 		return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) | ||||
| 	} | ||||
| 
 | ||||
| 	if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue { | ||||
| // Implicitly clears the Instruction's Reference field. | ||||
| // | ||||
| // Returns an error if the Instruction is not a map load. | ||||
| func (ins *Instruction) AssociateMap(m FDer) error { | ||||
| 	if !ins.IsLoadFromMap() { | ||||
| 		return errors.New("not a load from a map") | ||||
| 	} | ||||
| 
 | ||||
| 	ins.Metadata.Set(referenceMeta{}, nil) | ||||
| 	ins.Metadata.Set(mapMeta{}, m) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // RewriteMapPtr changes an instruction to use a new map fd. | ||||
| // | ||||
| // Returns an error if the instruction doesn't load a map. | ||||
| // | ||||
| // Deprecated: use AssociateMap instead. If you cannot provide a Map, | ||||
| // wrap an fd in a type implementing FDer. | ||||
| func (ins *Instruction) RewriteMapPtr(fd int) error { | ||||
| 	if !ins.IsLoadFromMap() { | ||||
| 		return errors.New("not a load from a map") | ||||
| 	} | ||||
| 
 | ||||
| 	ins.encodeMapFD(fd) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (ins *Instruction) encodeMapFD(fd int) { | ||||
| 	// Preserve the offset value for direct map loads. | ||||
| 	offset := uint64(ins.Constant) & (math.MaxUint32 << 32) | ||||
| 	rawFd := uint64(uint32(fd)) | ||||
| 	ins.Constant = int64(offset | rawFd) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (ins *Instruction) mapPtr() uint32 { | ||||
| 	return uint32(uint64(ins.Constant) & math.MaxUint32) | ||||
| // MapPtr returns the map fd for this instruction. | ||||
| // | ||||
| // The result is undefined if the instruction is not a load from a map, | ||||
| // see IsLoadFromMap. | ||||
| // | ||||
| // Deprecated: use Map() instead. | ||||
| func (ins *Instruction) MapPtr() int { | ||||
| 	// If there is a map associated with the instruction, return its FD. | ||||
| 	if fd := ins.Metadata.Get(mapMeta{}); fd != nil { | ||||
| 		return fd.(FDer).FD() | ||||
| 	} | ||||
| 
 | ||||
| 	// Fall back to the fd stored in the Constant field | ||||
| 	return ins.mapFd() | ||||
| } | ||||
| 
 | ||||
| // mapFd returns the map file descriptor stored in the 32 least significant | ||||
| // bits of ins' Constant field. | ||||
| func (ins *Instruction) mapFd() int { | ||||
| 	return int(int32(ins.Constant)) | ||||
| } | ||||
| 
 | ||||
| // RewriteMapOffset changes the offset of a direct load from a map. | ||||
| // | ||||
| // Returns an error if the instruction is not a direct load. | ||||
| func (ins *Instruction) RewriteMapOffset(offset uint32) error { | ||||
| 	if !ins.OpCode.isDWordLoad() { | ||||
| 	if !ins.OpCode.IsDWordLoad() { | ||||
| 		return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) | ||||
| 	} | ||||
| 
 | ||||
|  | @ -163,10 +212,10 @@ func (ins *Instruction) mapOffset() uint32 { | |||
| 	return uint32(uint64(ins.Constant) >> 32) | ||||
| } | ||||
| 
 | ||||
| // isLoadFromMap returns true if the instruction loads from a map. | ||||
| // IsLoadFromMap returns true if the instruction loads from a map. | ||||
| // | ||||
| // This covers both loading the map pointer and direct map value loads. | ||||
| func (ins *Instruction) isLoadFromMap() bool { | ||||
| func (ins *Instruction) IsLoadFromMap() bool { | ||||
| 	return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue) | ||||
| } | ||||
| 
 | ||||
|  | @ -177,6 +226,29 @@ func (ins *Instruction) IsFunctionCall() bool { | |||
| 	return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall | ||||
| } | ||||
| 
 | ||||
| // IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. | ||||
| func (ins *Instruction) IsLoadOfFunctionPointer() bool { | ||||
| 	return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc | ||||
| } | ||||
| 
 | ||||
| // IsFunctionReference returns true if the instruction references another BPF | ||||
| // function, either by invoking a Call jump operation or by loading a function | ||||
| // pointer. | ||||
| func (ins *Instruction) IsFunctionReference() bool { | ||||
| 	return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() | ||||
| } | ||||
| 
 | ||||
| // IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. | ||||
| func (ins *Instruction) IsBuiltinCall() bool { | ||||
| 	return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 | ||||
| } | ||||
| 
 | ||||
| // IsConstantLoad returns true if the instruction loads a constant of the | ||||
| // given size. | ||||
| func (ins *Instruction) IsConstantLoad(size Size) bool { | ||||
| 	return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0 | ||||
| } | ||||
| 
 | ||||
| // Format implements fmt.Formatter. | ||||
| func (ins Instruction) Format(f fmt.State, c rune) { | ||||
| 	if c != 'v' { | ||||
|  | @ -197,22 +269,31 @@ func (ins Instruction) Format(f fmt.State, c rune) { | |||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	if ins.isLoadFromMap() { | ||||
| 		fd := int32(ins.mapPtr()) | ||||
| 	if ins.IsLoadFromMap() { | ||||
| 		fd := ins.mapFd() | ||||
| 		m := ins.Map() | ||||
| 		switch ins.Src { | ||||
| 		case PseudoMapFD: | ||||
| 			fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) | ||||
| 			if m != nil { | ||||
| 				fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) | ||||
| 			} else { | ||||
| 				fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) | ||||
| 			} | ||||
| 
 | ||||
| 		case PseudoMapValue: | ||||
| 			fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) | ||||
| 			if m != nil { | ||||
| 				fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) | ||||
| 			} else { | ||||
| 				fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		goto ref | ||||
| 	} | ||||
| 
 | ||||
| 	fmt.Fprintf(f, "%v ", op) | ||||
| 	switch cls := op.Class(); cls { | ||||
| 	case LdClass, LdXClass, StClass, StXClass: | ||||
| 	switch cls := op.Class(); { | ||||
| 	case cls.isLoadOrStore(): | ||||
| 		switch op.Mode() { | ||||
| 		case ImmMode: | ||||
| 			fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) | ||||
|  | @ -226,7 +307,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { | |||
| 			fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) | ||||
| 		} | ||||
| 
 | ||||
| 	case ALU64Class, ALUClass: | ||||
| 	case cls.IsALU(): | ||||
| 		fmt.Fprintf(f, "dst: %s ", ins.Dst) | ||||
| 		if op.ALUOp() == Swap || op.Source() == ImmSource { | ||||
| 			fmt.Fprintf(f, "imm: %d", ins.Constant) | ||||
|  | @ -234,7 +315,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { | |||
| 			fmt.Fprintf(f, "src: %s", ins.Src) | ||||
| 		} | ||||
| 
 | ||||
| 	case JumpClass: | ||||
| 	case cls.IsJump(): | ||||
| 		switch jop := op.JumpOp(); jop { | ||||
| 		case Call: | ||||
| 			if ins.Src == PseudoCall { | ||||
|  | @ -255,34 +336,171 @@ func (ins Instruction) Format(f fmt.State, c rune) { | |||
| 	} | ||||
| 
 | ||||
| ref: | ||||
| 	if ins.Reference != "" { | ||||
| 		fmt.Fprintf(f, " <%s>", ins.Reference) | ||||
| 	if ins.Reference() != "" { | ||||
| 		fmt.Fprintf(f, " <%s>", ins.Reference()) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (ins Instruction) equal(other Instruction) bool { | ||||
| 	return ins.OpCode == other.OpCode && | ||||
| 		ins.Dst == other.Dst && | ||||
| 		ins.Src == other.Src && | ||||
| 		ins.Offset == other.Offset && | ||||
| 		ins.Constant == other.Constant | ||||
| } | ||||
| 
 | ||||
| // Size returns the amount of bytes ins would occupy in binary form. | ||||
| func (ins Instruction) Size() uint64 { | ||||
| 	return uint64(InstructionSize * ins.OpCode.rawInstructions()) | ||||
| } | ||||
| 
 | ||||
| type symbolMeta struct{} | ||||
| 
 | ||||
| // WithSymbol marks the Instruction as a Symbol, which other Instructions | ||||
| // can point to using corresponding calls to WithReference. | ||||
| func (ins Instruction) WithSymbol(name string) Instruction { | ||||
| 	ins.Metadata.Set(symbolMeta{}, name) | ||||
| 	return ins | ||||
| } | ||||
| 
 | ||||
| // Sym creates a symbol. | ||||
| // | ||||
| // Deprecated: use WithSymbol instead. | ||||
| func (ins Instruction) Sym(name string) Instruction { | ||||
| 	return ins.WithSymbol(name) | ||||
| } | ||||
| 
 | ||||
| // Symbol returns the value ins has been marked with using WithSymbol, | ||||
| // otherwise returns an empty string. A symbol is often an Instruction | ||||
| // at the start of a function body. | ||||
| func (ins Instruction) Symbol() string { | ||||
| 	sym, _ := ins.Metadata.Get(symbolMeta{}).(string) | ||||
| 	return sym | ||||
| } | ||||
| 
 | ||||
| type referenceMeta struct{} | ||||
| 
 | ||||
| // WithReference makes ins reference another Symbol or map by name. | ||||
| func (ins Instruction) WithReference(ref string) Instruction { | ||||
| 	ins.Metadata.Set(referenceMeta{}, ref) | ||||
| 	return ins | ||||
| } | ||||
| 
 | ||||
| // Reference returns the Symbol or map name referenced by ins, if any. | ||||
| func (ins Instruction) Reference() string { | ||||
| 	ref, _ := ins.Metadata.Get(referenceMeta{}).(string) | ||||
| 	return ref | ||||
| } | ||||
| 
 | ||||
| type mapMeta struct{} | ||||
| 
 | ||||
| // Map returns the Map referenced by ins, if any. | ||||
| // An Instruction will contain a Map if e.g. it references an existing, | ||||
| // pinned map that was opened during ELF loading. | ||||
| func (ins Instruction) Map() FDer { | ||||
| 	fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) | ||||
| 	return fd | ||||
| } | ||||
| 
 | ||||
| type sourceMeta struct{} | ||||
| 
 | ||||
| // WithSource adds source information about the Instruction. | ||||
| func (ins Instruction) WithSource(src fmt.Stringer) Instruction { | ||||
| 	ins.Metadata.Set(sourceMeta{}, src) | ||||
| 	return ins | ||||
| } | ||||
| 
 | ||||
| // Source returns source information about the Instruction. The field is | ||||
| // present when the compiler emits BTF line info about the Instruction and | ||||
| // usually contains the line of source code responsible for it. | ||||
| func (ins Instruction) Source() fmt.Stringer { | ||||
| 	str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) | ||||
| 	return str | ||||
| } | ||||
| 
 | ||||
| // A Comment can be passed to Instruction.WithSource to add a comment | ||||
| // to an instruction. | ||||
| type Comment string | ||||
| 
 | ||||
| func (s Comment) String() string { | ||||
| 	return string(s) | ||||
| } | ||||
| 
 | ||||
| // FDer represents a resource tied to an underlying file descriptor. | ||||
| // Used as a stand-in for e.g. ebpf.Map since that type cannot be | ||||
| // imported here and FD() is the only method we rely on. | ||||
| type FDer interface { | ||||
| 	FD() int | ||||
| } | ||||
| 
 | ||||
| // Instructions is an eBPF program. | ||||
| type Instructions []Instruction | ||||
| 
 | ||||
| // Unmarshal unmarshals an Instructions from a binary instruction stream. | ||||
| // All instructions in insns are replaced by instructions decoded from r. | ||||
| func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { | ||||
| 	if len(*insns) > 0 { | ||||
| 		*insns = nil | ||||
| 	} | ||||
| 
 | ||||
| 	var offset uint64 | ||||
| 	for { | ||||
| 		var ins Instruction | ||||
| 		n, err := ins.Unmarshal(r, bo) | ||||
| 		if errors.Is(err, io.EOF) { | ||||
| 			break | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("offset %d: %w", offset, err) | ||||
| 		} | ||||
| 
 | ||||
| 		*insns = append(*insns, ins) | ||||
| 		offset += n | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Name returns the name of the function insns belongs to, if any. | ||||
| func (insns Instructions) Name() string { | ||||
| 	if len(insns) == 0 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	return insns[0].Symbol() | ||||
| } | ||||
| 
 | ||||
| func (insns Instructions) String() string { | ||||
| 	return fmt.Sprint(insns) | ||||
| } | ||||
| 
 | ||||
| // RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. | ||||
| // Size returns the amount of bytes insns would occupy in binary form. | ||||
| func (insns Instructions) Size() uint64 { | ||||
| 	var sum uint64 | ||||
| 	for _, ins := range insns { | ||||
| 		sum += ins.Size() | ||||
| 	} | ||||
| 	return sum | ||||
| } | ||||
| 
 | ||||
| // AssociateMap updates all Instructions that Reference the given symbol | ||||
| // to point to an existing Map m instead. | ||||
| // | ||||
| // Returns an error if the symbol isn't used, see IsUnreferencedSymbol. | ||||
| func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { | ||||
| // Returns ErrUnreferencedSymbol error if no references to symbol are found | ||||
| // in insns. If symbol is anything else than the symbol name of map (e.g. | ||||
| // a bpf2bpf subprogram), an error is returned. | ||||
| func (insns Instructions) AssociateMap(symbol string, m FDer) error { | ||||
| 	if symbol == "" { | ||||
| 		return errors.New("empty symbol") | ||||
| 	} | ||||
| 
 | ||||
| 	found := false | ||||
| 	var found bool | ||||
| 	for i := range insns { | ||||
| 		ins := &insns[i] | ||||
| 		if ins.Reference != symbol { | ||||
| 		if ins.Reference() != symbol { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if err := ins.RewriteMapPtr(fd); err != nil { | ||||
| 		if err := ins.AssociateMap(m); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
|  | @ -290,7 +508,40 @@ func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { | |||
| 	} | ||||
| 
 | ||||
| 	if !found { | ||||
| 		return &unreferencedSymbolError{symbol} | ||||
| 		return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. | ||||
| // | ||||
| // Returns ErrUnreferencedSymbol if the symbol isn't used. | ||||
| // | ||||
| // Deprecated: use AssociateMap instead. | ||||
| func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { | ||||
| 	if symbol == "" { | ||||
| 		return errors.New("empty symbol") | ||||
| 	} | ||||
| 
 | ||||
| 	var found bool | ||||
| 	for i := range insns { | ||||
| 		ins := &insns[i] | ||||
| 		if ins.Reference() != symbol { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if !ins.IsLoadFromMap() { | ||||
| 			return errors.New("not a load from a map") | ||||
| 		} | ||||
| 
 | ||||
| 		ins.encodeMapFD(fd) | ||||
| 
 | ||||
| 		found = true | ||||
| 	} | ||||
| 
 | ||||
| 	if !found { | ||||
| 		return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
|  | @ -302,31 +553,61 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) { | |||
| 	offsets := make(map[string]int) | ||||
| 
 | ||||
| 	for i, ins := range insns { | ||||
| 		if ins.Symbol == "" { | ||||
| 		if ins.Symbol() == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if _, ok := offsets[ins.Symbol]; ok { | ||||
| 			return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol) | ||||
| 		if _, ok := offsets[ins.Symbol()]; ok { | ||||
| 			return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) | ||||
| 		} | ||||
| 
 | ||||
| 		offsets[ins.Symbol] = i | ||||
| 		offsets[ins.Symbol()] = i | ||||
| 	} | ||||
| 
 | ||||
| 	return offsets, nil | ||||
| } | ||||
| 
 | ||||
| // FunctionReferences returns a set of symbol names these Instructions make | ||||
| // bpf-to-bpf calls to. | ||||
| func (insns Instructions) FunctionReferences() []string { | ||||
| 	calls := make(map[string]struct{}) | ||||
| 	for _, ins := range insns { | ||||
| 		if ins.Constant != -1 { | ||||
| 			// BPF-to-BPF calls have -1 constants. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if ins.Reference() == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if !ins.IsFunctionReference() { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		calls[ins.Reference()] = struct{}{} | ||||
| 	} | ||||
| 
 | ||||
| 	result := make([]string, 0, len(calls)) | ||||
| 	for call := range calls { | ||||
| 		result = append(result, call) | ||||
| 	} | ||||
| 
 | ||||
| 	sort.Strings(result) | ||||
| 	return result | ||||
| } | ||||
| 
 | ||||
| // ReferenceOffsets returns the set of references and their offset in | ||||
| // the instructions. | ||||
| func (insns Instructions) ReferenceOffsets() map[string][]int { | ||||
| 	offsets := make(map[string][]int) | ||||
| 
 | ||||
| 	for i, ins := range insns { | ||||
| 		if ins.Reference == "" { | ||||
| 		if ins.Reference() == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		offsets[ins.Reference] = append(offsets[ins.Reference], i) | ||||
| 		offsets[ins.Reference()] = append(offsets[ins.Reference()], i) | ||||
| 	} | ||||
| 
 | ||||
| 	return offsets | ||||
|  | @ -337,7 +618,7 @@ func (insns Instructions) ReferenceOffsets() map[string][]int { | |||
| // You can control indentation of symbols by | ||||
| // specifying a width. Setting a precision controls the indentation of | ||||
| // instructions. | ||||
| // The default character is a tab, which can be overriden by specifying | ||||
| // The default character is a tab, which can be overridden by specifying | ||||
| // the ' ' space flag. | ||||
| func (insns Instructions) Format(f fmt.State, c rune) { | ||||
| 	if c != 's' && c != 'v' { | ||||
|  | @ -377,20 +658,36 @@ func (insns Instructions) Format(f fmt.State, c rune) { | |||
| 
 | ||||
| 	iter := insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		if iter.Ins.Symbol != "" { | ||||
| 			fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol) | ||||
| 		if iter.Ins.Symbol() != "" { | ||||
| 			fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) | ||||
| 		} | ||||
| 		if src := iter.Ins.Source(); src != nil { | ||||
| 			line := strings.TrimSpace(src.String()) | ||||
| 			if line != "" { | ||||
| 				fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) | ||||
| 			} | ||||
| 		} | ||||
| 		fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) | ||||
| 	} | ||||
| 
 | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| // Marshal encodes a BPF program into the kernel format. | ||||
| // | ||||
| // insns may be modified if there are unresolved jumps or bpf2bpf calls. | ||||
| // | ||||
| // Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction | ||||
| // without a matching Symbol Instruction within insns. | ||||
| func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { | ||||
| 	if err := insns.encodeFunctionReferences(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := insns.encodeMapPointers(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	for i, ins := range insns { | ||||
| 		_, err := ins.Marshal(w, bo) | ||||
| 		if err != nil { | ||||
| 		if _, err := ins.Marshal(w, bo); err != nil { | ||||
| 			return fmt.Errorf("instruction %d: %w", i, err) | ||||
| 		} | ||||
| 	} | ||||
|  | @ -405,7 +702,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { | |||
| func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { | ||||
| 	h := sha1.New() | ||||
| 	for i, ins := range insns { | ||||
| 		if ins.isLoadFromMap() { | ||||
| 		if ins.IsLoadFromMap() { | ||||
| 			ins.Constant = 0 | ||||
| 		} | ||||
| 		_, err := ins.Marshal(h, bo) | ||||
|  | @ -416,6 +713,95 @@ func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { | |||
| 	return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil | ||||
| } | ||||
| 
 | ||||
| // encodeFunctionReferences populates the Offset (or Constant, depending on | ||||
| // the instruction type) field of instructions with a Reference field to point | ||||
| // to the offset of the corresponding instruction with a matching Symbol field. | ||||
| // | ||||
| // Only Reference Instructions that are either jumps or BPF function references | ||||
| // (calls or function pointer loads) are populated. | ||||
| // | ||||
| // Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction | ||||
| // without at least one corresponding Symbol Instruction within insns. | ||||
| func (insns Instructions) encodeFunctionReferences() error { | ||||
| 	// Index the offsets of instructions tagged as a symbol. | ||||
| 	symbolOffsets := make(map[string]RawInstructionOffset) | ||||
| 	iter := insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		ins := iter.Ins | ||||
| 
 | ||||
| 		if ins.Symbol() == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if _, ok := symbolOffsets[ins.Symbol()]; ok { | ||||
| 			return fmt.Errorf("duplicate symbol %s", ins.Symbol()) | ||||
| 		} | ||||
| 
 | ||||
| 		symbolOffsets[ins.Symbol()] = iter.Offset | ||||
| 	} | ||||
| 
 | ||||
| 	// Find all instructions tagged as references to other symbols. | ||||
| 	// Depending on the instruction type, populate their constant or offset | ||||
| 	// fields to point to the symbol they refer to within the insn stream. | ||||
| 	iter = insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		i := iter.Index | ||||
| 		offset := iter.Offset | ||||
| 		ins := iter.Ins | ||||
| 
 | ||||
| 		if ins.Reference() == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		switch { | ||||
| 		case ins.IsFunctionReference() && ins.Constant == -1: | ||||
| 			symOffset, ok := symbolOffsets[ins.Reference()] | ||||
| 			if !ok { | ||||
| 				return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) | ||||
| 			} | ||||
| 
 | ||||
| 			ins.Constant = int64(symOffset - offset - 1) | ||||
| 
 | ||||
| 		case ins.OpCode.Class().IsJump() && ins.Offset == -1: | ||||
| 			symOffset, ok := symbolOffsets[ins.Reference()] | ||||
| 			if !ok { | ||||
| 				return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) | ||||
| 			} | ||||
| 
 | ||||
| 			ins.Offset = int16(symOffset - offset - 1) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // encodeMapPointers finds all Map Instructions and encodes their FDs | ||||
| // into their Constant fields. | ||||
| func (insns Instructions) encodeMapPointers() error { | ||||
| 	iter := insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		ins := iter.Ins | ||||
| 
 | ||||
| 		if !ins.IsLoadFromMap() { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		m := ins.Map() | ||||
| 		if m == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		fd := m.FD() | ||||
| 		if fd < 0 { | ||||
| 			return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) | ||||
| 		} | ||||
| 
 | ||||
| 		ins.encodeMapFD(m.FD()) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Iterate allows iterating a BPF program while keeping track of | ||||
| // various offsets. | ||||
| // | ||||
|  | @ -451,13 +837,6 @@ func (iter *InstructionIterator) Next() bool { | |||
| 	return true | ||||
| } | ||||
| 
 | ||||
| type bpfInstruction struct { | ||||
| 	OpCode    OpCode | ||||
| 	Registers bpfRegisters | ||||
| 	Offset    int16 | ||||
| 	Constant  int32 | ||||
| } | ||||
| 
 | ||||
| type bpfRegisters uint8 | ||||
| 
 | ||||
| func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { | ||||
|  | @ -471,28 +850,10 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) { | ||||
| 	switch bo { | ||||
| 	case binary.LittleEndian: | ||||
| 		return Register(r & 0xF), Register(r >> 4), nil | ||||
| 	case binary.BigEndian: | ||||
| 		return Register(r >> 4), Register(r & 0xf), nil | ||||
| 	default: | ||||
| 		return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| type unreferencedSymbolError struct { | ||||
| 	symbol string | ||||
| } | ||||
| 
 | ||||
| func (use *unreferencedSymbolError) Error() string { | ||||
| 	return fmt.Sprintf("unreferenced symbol %s", use.symbol) | ||||
| } | ||||
| 
 | ||||
| // IsUnreferencedSymbol returns true if err was caused by | ||||
| // an unreferenced symbol. | ||||
| // | ||||
| // Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). | ||||
| func IsUnreferencedSymbol(err error) bool { | ||||
| 	_, ok := err.(*unreferencedSymbolError) | ||||
| 	return ok | ||||
| 	return errors.Is(err, ErrUnreferencedSymbol) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										76
									
								
								vendor/github.com/cilium/ebpf/asm/jump.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										76
									
								
								vendor/github.com/cilium/ebpf/asm/jump.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -60,50 +60,68 @@ func (op JumpOp) Op(source Source) OpCode { | |||
| 	return OpCode(JumpClass).SetJumpOp(op).SetSource(source) | ||||
| } | ||||
| 
 | ||||
| // Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled. | ||||
| // Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. | ||||
| func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { | ||||
| 	if op == Exit || op == Call || op == Ja { | ||||
| 		return Instruction{OpCode: InvalidOpCode} | ||||
| 	} | ||||
| 
 | ||||
| 	return Instruction{ | ||||
| 		OpCode:    OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource), | ||||
| 		Dst:       dst, | ||||
| 		Offset:    -1, | ||||
| 		Constant:  int64(value), | ||||
| 		Reference: label, | ||||
| 	} | ||||
| 		OpCode:   op.opCode(JumpClass, ImmSource), | ||||
| 		Dst:      dst, | ||||
| 		Offset:   -1, | ||||
| 		Constant: int64(value), | ||||
| 	}.WithReference(label) | ||||
| } | ||||
| 
 | ||||
| // Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled. | ||||
| // Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. | ||||
| // Requires kernel 5.1. | ||||
| func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { | ||||
| 	return Instruction{ | ||||
| 		OpCode:   op.opCode(Jump32Class, ImmSource), | ||||
| 		Dst:      dst, | ||||
| 		Offset:   -1, | ||||
| 		Constant: int64(value), | ||||
| 	}.WithReference(label) | ||||
| } | ||||
| 
 | ||||
| // Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. | ||||
| func (op JumpOp) Reg(dst, src Register, label string) Instruction { | ||||
| 	return Instruction{ | ||||
| 		OpCode: op.opCode(JumpClass, RegSource), | ||||
| 		Dst:    dst, | ||||
| 		Src:    src, | ||||
| 		Offset: -1, | ||||
| 	}.WithReference(label) | ||||
| } | ||||
| 
 | ||||
| // Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. | ||||
| // Requires kernel 5.1. | ||||
| func (op JumpOp) Reg32(dst, src Register, label string) Instruction { | ||||
| 	return Instruction{ | ||||
| 		OpCode: op.opCode(Jump32Class, RegSource), | ||||
| 		Dst:    dst, | ||||
| 		Src:    src, | ||||
| 		Offset: -1, | ||||
| 	}.WithReference(label) | ||||
| } | ||||
| 
 | ||||
| func (op JumpOp) opCode(class Class, source Source) OpCode { | ||||
| 	if op == Exit || op == Call || op == Ja { | ||||
| 		return Instruction{OpCode: InvalidOpCode} | ||||
| 		return InvalidOpCode | ||||
| 	} | ||||
| 
 | ||||
| 	return Instruction{ | ||||
| 		OpCode:    OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource), | ||||
| 		Dst:       dst, | ||||
| 		Src:       src, | ||||
| 		Offset:    -1, | ||||
| 		Reference: label, | ||||
| 	} | ||||
| 	return OpCode(class).SetJumpOp(op).SetSource(source) | ||||
| } | ||||
| 
 | ||||
| // Label adjusts PC to the address of the label. | ||||
| func (op JumpOp) Label(label string) Instruction { | ||||
| 	if op == Call { | ||||
| 		return Instruction{ | ||||
| 			OpCode:    OpCode(JumpClass).SetJumpOp(Call), | ||||
| 			Src:       PseudoCall, | ||||
| 			Constant:  -1, | ||||
| 			Reference: label, | ||||
| 		} | ||||
| 			OpCode:   OpCode(JumpClass).SetJumpOp(Call), | ||||
| 			Src:      PseudoCall, | ||||
| 			Constant: -1, | ||||
| 		}.WithReference(label) | ||||
| 	} | ||||
| 
 | ||||
| 	return Instruction{ | ||||
| 		OpCode:    OpCode(JumpClass).SetJumpOp(op), | ||||
| 		Offset:    -1, | ||||
| 		Reference: label, | ||||
| 	} | ||||
| 		OpCode: OpCode(JumpClass).SetJumpOp(op), | ||||
| 		Offset: -1, | ||||
| 	}.WithReference(label) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										2
									
								
								vendor/github.com/cilium/ebpf/asm/load_store.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/cilium/ebpf/asm/load_store.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -111,7 +111,7 @@ func LoadMapPtr(dst Register, fd int) Instruction { | |||
| 		OpCode:   LoadImmOp(DWord), | ||||
| 		Dst:      dst, | ||||
| 		Src:      PseudoMapFD, | ||||
| 		Constant: int64(fd), | ||||
| 		Constant: int64(uint32(fd)), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										80
									
								
								vendor/github.com/cilium/ebpf/asm/metadata.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								vendor/github.com/cilium/ebpf/asm/metadata.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,80 @@ | |||
| package asm | ||||
| 
 | ||||
| // Metadata contains metadata about an instruction. | ||||
| type Metadata struct { | ||||
| 	head *metaElement | ||||
| } | ||||
| 
 | ||||
| type metaElement struct { | ||||
| 	next       *metaElement | ||||
| 	key, value interface{} | ||||
| } | ||||
| 
 | ||||
| // Find the element containing key. | ||||
| // | ||||
| // Returns nil if there is no such element. | ||||
| func (m *Metadata) find(key interface{}) *metaElement { | ||||
| 	for e := m.head; e != nil; e = e.next { | ||||
| 		if e.key == key { | ||||
| 			return e | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Remove an element from the linked list. | ||||
| // | ||||
| // Copies as many elements of the list as necessary to remove r, but doesn't | ||||
| // perform a full copy. | ||||
| func (m *Metadata) remove(r *metaElement) { | ||||
| 	current := &m.head | ||||
| 	for e := m.head; e != nil; e = e.next { | ||||
| 		if e == r { | ||||
| 			// We've found the element we want to remove. | ||||
| 			*current = e.next | ||||
| 
 | ||||
| 			// No need to copy the tail. | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		// There is another element in front of the one we want to remove. | ||||
| 		// We have to copy it to be able to change metaElement.next. | ||||
| 		cpy := &metaElement{key: e.key, value: e.value} | ||||
| 		*current = cpy | ||||
| 		current = &cpy.next | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Set a key to a value. | ||||
| // | ||||
| // If value is nil, the key is removed. Avoids modifying old metadata by | ||||
| // copying if necessary. | ||||
| func (m *Metadata) Set(key, value interface{}) { | ||||
| 	if e := m.find(key); e != nil { | ||||
| 		if e.value == value { | ||||
| 			// Key is present and the value is the same. Nothing to do. | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		// Key is present with a different value. Create a copy of the list | ||||
| 		// which doesn't have the element in it. | ||||
| 		m.remove(e) | ||||
| 	} | ||||
| 
 | ||||
| 	// m.head is now a linked list that doesn't contain key. | ||||
| 	if value == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	m.head = &metaElement{key: key, value: value, next: m.head} | ||||
| } | ||||
| 
 | ||||
| // Get the value of a key. | ||||
| // | ||||
| // Returns nil if no value with the given key is present. | ||||
| func (m *Metadata) Get(key interface{}) interface{} { | ||||
| 	if e := m.find(key); e != nil { | ||||
| 		return e.value | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										120
									
								
								vendor/github.com/cilium/ebpf/asm/opcode.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										120
									
								
								vendor/github.com/cilium/ebpf/asm/opcode.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -7,14 +7,6 @@ import ( | |||
| 
 | ||||
| //go:generate stringer -output opcode_string.go -type=Class | ||||
| 
 | ||||
| type encoding int | ||||
| 
 | ||||
| const ( | ||||
| 	unknownEncoding encoding = iota | ||||
| 	loadOrStore | ||||
| 	jumpOrALU | ||||
| ) | ||||
| 
 | ||||
| // Class of operations | ||||
| // | ||||
| //    msb      lsb | ||||
|  | @ -26,31 +18,52 @@ type Class uint8 | |||
| const classMask OpCode = 0x07 | ||||
| 
 | ||||
| const ( | ||||
| 	// LdClass load memory | ||||
| 	// LdClass loads immediate values into registers. | ||||
| 	// Also used for non-standard load operations from cBPF. | ||||
| 	LdClass Class = 0x00 | ||||
| 	// LdXClass load memory from constant | ||||
| 	// LdXClass loads memory into registers. | ||||
| 	LdXClass Class = 0x01 | ||||
| 	// StClass load register from memory | ||||
| 	// StClass stores immediate values to memory. | ||||
| 	StClass Class = 0x02 | ||||
| 	// StXClass load register from constant | ||||
| 	// StXClass stores registers to memory. | ||||
| 	StXClass Class = 0x03 | ||||
| 	// ALUClass arithmetic operators | ||||
| 	// ALUClass describes arithmetic operators. | ||||
| 	ALUClass Class = 0x04 | ||||
| 	// JumpClass jump operators | ||||
| 	// JumpClass describes jump operators. | ||||
| 	JumpClass Class = 0x05 | ||||
| 	// ALU64Class arithmetic in 64 bit mode | ||||
| 	// Jump32Class describes jump operators with 32-bit comparisons. | ||||
| 	// Requires kernel 5.1. | ||||
| 	Jump32Class Class = 0x06 | ||||
| 	// ALU64Class describes arithmetic operators in 64-bit mode. | ||||
| 	ALU64Class Class = 0x07 | ||||
| ) | ||||
| 
 | ||||
| func (cls Class) encoding() encoding { | ||||
| 	switch cls { | ||||
| 	case LdClass, LdXClass, StClass, StXClass: | ||||
| 		return loadOrStore | ||||
| 	case ALU64Class, ALUClass, JumpClass: | ||||
| 		return jumpOrALU | ||||
| 	default: | ||||
| 		return unknownEncoding | ||||
| 	} | ||||
| // IsLoad checks if this is either LdClass or LdXClass. | ||||
| func (cls Class) IsLoad() bool { | ||||
| 	return cls == LdClass || cls == LdXClass | ||||
| } | ||||
| 
 | ||||
| // IsStore checks if this is either StClass or StXClass. | ||||
| func (cls Class) IsStore() bool { | ||||
| 	return cls == StClass || cls == StXClass | ||||
| } | ||||
| 
 | ||||
| func (cls Class) isLoadOrStore() bool { | ||||
| 	return cls.IsLoad() || cls.IsStore() | ||||
| } | ||||
| 
 | ||||
| // IsALU checks if this is either ALUClass or ALU64Class. | ||||
| func (cls Class) IsALU() bool { | ||||
| 	return cls == ALUClass || cls == ALU64Class | ||||
| } | ||||
| 
 | ||||
| // IsJump checks if this is either JumpClass or Jump32Class. | ||||
| func (cls Class) IsJump() bool { | ||||
| 	return cls == JumpClass || cls == Jump32Class | ||||
| } | ||||
| 
 | ||||
| func (cls Class) isJumpOrALU() bool { | ||||
| 	return cls.IsJump() || cls.IsALU() | ||||
| } | ||||
| 
 | ||||
| // OpCode is a packed eBPF opcode. | ||||
|  | @ -69,13 +82,13 @@ const InvalidOpCode OpCode = 0xff | |||
| // rawInstructions returns the number of BPF instructions required | ||||
| // to encode this opcode. | ||||
| func (op OpCode) rawInstructions() int { | ||||
| 	if op.isDWordLoad() { | ||||
| 	if op.IsDWordLoad() { | ||||
| 		return 2 | ||||
| 	} | ||||
| 	return 1 | ||||
| } | ||||
| 
 | ||||
| func (op OpCode) isDWordLoad() bool { | ||||
| func (op OpCode) IsDWordLoad() bool { | ||||
| 	return op == LoadImmOp(DWord) | ||||
| } | ||||
| 
 | ||||
|  | @ -86,7 +99,7 @@ func (op OpCode) Class() Class { | |||
| 
 | ||||
| // Mode returns the mode for load and store operations. | ||||
| func (op OpCode) Mode() Mode { | ||||
| 	if op.Class().encoding() != loadOrStore { | ||||
| 	if !op.Class().isLoadOrStore() { | ||||
| 		return InvalidMode | ||||
| 	} | ||||
| 	return Mode(op & modeMask) | ||||
|  | @ -94,7 +107,7 @@ func (op OpCode) Mode() Mode { | |||
| 
 | ||||
| // Size returns the size for load and store operations. | ||||
| func (op OpCode) Size() Size { | ||||
| 	if op.Class().encoding() != loadOrStore { | ||||
| 	if !op.Class().isLoadOrStore() { | ||||
| 		return InvalidSize | ||||
| 	} | ||||
| 	return Size(op & sizeMask) | ||||
|  | @ -102,7 +115,7 @@ func (op OpCode) Size() Size { | |||
| 
 | ||||
| // Source returns the source for branch and ALU operations. | ||||
| func (op OpCode) Source() Source { | ||||
| 	if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap { | ||||
| 	if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { | ||||
| 		return InvalidSource | ||||
| 	} | ||||
| 	return Source(op & sourceMask) | ||||
|  | @ -110,7 +123,7 @@ func (op OpCode) Source() Source { | |||
| 
 | ||||
| // ALUOp returns the ALUOp. | ||||
| func (op OpCode) ALUOp() ALUOp { | ||||
| 	if op.Class().encoding() != jumpOrALU { | ||||
| 	if !op.Class().IsALU() { | ||||
| 		return InvalidALUOp | ||||
| 	} | ||||
| 	return ALUOp(op & aluMask) | ||||
|  | @ -125,18 +138,27 @@ func (op OpCode) Endianness() Endianness { | |||
| } | ||||
| 
 | ||||
| // JumpOp returns the JumpOp. | ||||
| // Returns InvalidJumpOp if it doesn't encode a jump. | ||||
| func (op OpCode) JumpOp() JumpOp { | ||||
| 	if op.Class().encoding() != jumpOrALU { | ||||
| 	if !op.Class().IsJump() { | ||||
| 		return InvalidJumpOp | ||||
| 	} | ||||
| 	return JumpOp(op & jumpMask) | ||||
| 
 | ||||
| 	jumpOp := JumpOp(op & jumpMask) | ||||
| 
 | ||||
| 	// Some JumpOps are only supported by JumpClass, not Jump32Class. | ||||
| 	if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) { | ||||
| 		return InvalidJumpOp | ||||
| 	} | ||||
| 
 | ||||
| 	return jumpOp | ||||
| } | ||||
| 
 | ||||
| // SetMode sets the mode on load and store operations. | ||||
| // | ||||
| // Returns InvalidOpCode if op is of the wrong class. | ||||
| func (op OpCode) SetMode(mode Mode) OpCode { | ||||
| 	if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) { | ||||
| 	if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { | ||||
| 		return InvalidOpCode | ||||
| 	} | ||||
| 	return (op & ^modeMask) | OpCode(mode) | ||||
|  | @ -146,7 +168,7 @@ func (op OpCode) SetMode(mode Mode) OpCode { | |||
| // | ||||
| // Returns InvalidOpCode if op is of the wrong class. | ||||
| func (op OpCode) SetSize(size Size) OpCode { | ||||
| 	if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) { | ||||
| 	if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { | ||||
| 		return InvalidOpCode | ||||
| 	} | ||||
| 	return (op & ^sizeMask) | OpCode(size) | ||||
|  | @ -156,7 +178,7 @@ func (op OpCode) SetSize(size Size) OpCode { | |||
| // | ||||
| // Returns InvalidOpCode if op is of the wrong class. | ||||
| func (op OpCode) SetSource(source Source) OpCode { | ||||
| 	if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) { | ||||
| 	if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { | ||||
| 		return InvalidOpCode | ||||
| 	} | ||||
| 	return (op & ^sourceMask) | OpCode(source) | ||||
|  | @ -166,8 +188,7 @@ func (op OpCode) SetSource(source Source) OpCode { | |||
| // | ||||
| // Returns InvalidOpCode if op is of the wrong class. | ||||
| func (op OpCode) SetALUOp(alu ALUOp) OpCode { | ||||
| 	class := op.Class() | ||||
| 	if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) { | ||||
| 	if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { | ||||
| 		return InvalidOpCode | ||||
| 	} | ||||
| 	return (op & ^aluMask) | OpCode(alu) | ||||
|  | @ -177,17 +198,25 @@ func (op OpCode) SetALUOp(alu ALUOp) OpCode { | |||
| // | ||||
| // Returns InvalidOpCode if op is of the wrong class. | ||||
| func (op OpCode) SetJumpOp(jump JumpOp) OpCode { | ||||
| 	if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) { | ||||
| 	if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { | ||||
| 		return InvalidOpCode | ||||
| 	} | ||||
| 	return (op & ^jumpMask) | OpCode(jump) | ||||
| 
 | ||||
| 	newOp := (op & ^jumpMask) | OpCode(jump) | ||||
| 
 | ||||
| 	// Check newOp is legal. | ||||
| 	if newOp.JumpOp() == InvalidJumpOp { | ||||
| 		return InvalidOpCode | ||||
| 	} | ||||
| 
 | ||||
| 	return newOp | ||||
| } | ||||
| 
 | ||||
| func (op OpCode) String() string { | ||||
| 	var f strings.Builder | ||||
| 
 | ||||
| 	switch class := op.Class(); class { | ||||
| 	case LdClass, LdXClass, StClass, StXClass: | ||||
| 	switch class := op.Class(); { | ||||
| 	case class.isLoadOrStore(): | ||||
| 		f.WriteString(strings.TrimSuffix(class.String(), "Class")) | ||||
| 
 | ||||
| 		mode := op.Mode() | ||||
|  | @ -204,7 +233,7 @@ func (op OpCode) String() string { | |||
| 			f.WriteString("B") | ||||
| 		} | ||||
| 
 | ||||
| 	case ALU64Class, ALUClass: | ||||
| 	case class.IsALU(): | ||||
| 		f.WriteString(op.ALUOp().String()) | ||||
| 
 | ||||
| 		if op.ALUOp() == Swap { | ||||
|  | @ -218,8 +247,13 @@ func (op OpCode) String() string { | |||
| 			f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) | ||||
| 		} | ||||
| 
 | ||||
| 	case JumpClass: | ||||
| 	case class.IsJump(): | ||||
| 		f.WriteString(op.JumpOp().String()) | ||||
| 
 | ||||
| 		if class == Jump32Class { | ||||
| 			f.WriteString("32") | ||||
| 		} | ||||
| 
 | ||||
| 		if jop := op.JumpOp(); jop != Exit && jop != Call { | ||||
| 			f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) | ||||
| 		} | ||||
|  |  | |||
							
								
								
									
										18
									
								
								vendor/github.com/cilium/ebpf/asm/opcode_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								vendor/github.com/cilium/ebpf/asm/opcode_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -14,25 +14,17 @@ func _() { | |||
| 	_ = x[StXClass-3] | ||||
| 	_ = x[ALUClass-4] | ||||
| 	_ = x[JumpClass-5] | ||||
| 	_ = x[Jump32Class-6] | ||||
| 	_ = x[ALU64Class-7] | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	_Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass" | ||||
| 	_Class_name_1 = "ALU64Class" | ||||
| ) | ||||
| const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" | ||||
| 
 | ||||
| var ( | ||||
| 	_Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47} | ||||
| ) | ||||
| var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} | ||||
| 
 | ||||
| func (i Class) String() string { | ||||
| 	switch { | ||||
| 	case 0 <= i && i <= 5: | ||||
| 		return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]] | ||||
| 	case i == 7: | ||||
| 		return _Class_name_1 | ||||
| 	default: | ||||
| 	if i >= Class(len(_Class_index)-1) { | ||||
| 		return "Class(" + strconv.FormatInt(int64(i), 10) + ")" | ||||
| 	} | ||||
| 	return _Class_name[_Class_index[i]:_Class_index[i+1]] | ||||
| } | ||||
|  |  | |||
							
								
								
									
										1
									
								
								vendor/github.com/cilium/ebpf/asm/register.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/cilium/ebpf/asm/register.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -38,6 +38,7 @@ const ( | |||
| 	PseudoMapFD    = R1 // BPF_PSEUDO_MAP_FD | ||||
| 	PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE | ||||
| 	PseudoCall     = R1 // BPF_PSEUDO_CALL | ||||
| 	PseudoFunc     = R4 // BPF_PSEUDO_FUNC | ||||
| ) | ||||
| 
 | ||||
| func (r Register) String() string { | ||||
|  |  | |||
							
								
								
									
										65
									
								
								vendor/github.com/cilium/ebpf/attachtype_string.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								vendor/github.com/cilium/ebpf/attachtype_string.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,65 @@ | |||
| // Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. | ||||
| 
 | ||||
| package ebpf | ||||
| 
 | ||||
| import "strconv" | ||||
| 
 | ||||
| func _() { | ||||
| 	// An "invalid array index" compiler error signifies that the constant values have changed. | ||||
| 	// Re-run the stringer command to generate them again. | ||||
| 	var x [1]struct{} | ||||
| 	_ = x[AttachNone-0] | ||||
| 	_ = x[AttachCGroupInetIngress-0] | ||||
| 	_ = x[AttachCGroupInetEgress-1] | ||||
| 	_ = x[AttachCGroupInetSockCreate-2] | ||||
| 	_ = x[AttachCGroupSockOps-3] | ||||
| 	_ = x[AttachSkSKBStreamParser-4] | ||||
| 	_ = x[AttachSkSKBStreamVerdict-5] | ||||
| 	_ = x[AttachCGroupDevice-6] | ||||
| 	_ = x[AttachSkMsgVerdict-7] | ||||
| 	_ = x[AttachCGroupInet4Bind-8] | ||||
| 	_ = x[AttachCGroupInet6Bind-9] | ||||
| 	_ = x[AttachCGroupInet4Connect-10] | ||||
| 	_ = x[AttachCGroupInet6Connect-11] | ||||
| 	_ = x[AttachCGroupInet4PostBind-12] | ||||
| 	_ = x[AttachCGroupInet6PostBind-13] | ||||
| 	_ = x[AttachCGroupUDP4Sendmsg-14] | ||||
| 	_ = x[AttachCGroupUDP6Sendmsg-15] | ||||
| 	_ = x[AttachLircMode2-16] | ||||
| 	_ = x[AttachFlowDissector-17] | ||||
| 	_ = x[AttachCGroupSysctl-18] | ||||
| 	_ = x[AttachCGroupUDP4Recvmsg-19] | ||||
| 	_ = x[AttachCGroupUDP6Recvmsg-20] | ||||
| 	_ = x[AttachCGroupGetsockopt-21] | ||||
| 	_ = x[AttachCGroupSetsockopt-22] | ||||
| 	_ = x[AttachTraceRawTp-23] | ||||
| 	_ = x[AttachTraceFEntry-24] | ||||
| 	_ = x[AttachTraceFExit-25] | ||||
| 	_ = x[AttachModifyReturn-26] | ||||
| 	_ = x[AttachLSMMac-27] | ||||
| 	_ = x[AttachTraceIter-28] | ||||
| 	_ = x[AttachCgroupInet4GetPeername-29] | ||||
| 	_ = x[AttachCgroupInet6GetPeername-30] | ||||
| 	_ = x[AttachCgroupInet4GetSockname-31] | ||||
| 	_ = x[AttachCgroupInet6GetSockname-32] | ||||
| 	_ = x[AttachXDPDevMap-33] | ||||
| 	_ = x[AttachCgroupInetSockRelease-34] | ||||
| 	_ = x[AttachXDPCPUMap-35] | ||||
| 	_ = x[AttachSkLookup-36] | ||||
| 	_ = x[AttachXDP-37] | ||||
| 	_ = x[AttachSkSKBVerdict-38] | ||||
| 	_ = x[AttachSkReuseportSelect-39] | ||||
| 	_ = x[AttachSkReuseportSelectOrMigrate-40] | ||||
| 	_ = x[AttachPerfEvent-41] | ||||
| } | ||||
| 
 | ||||
| const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent" | ||||
| 
 | ||||
| var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610} | ||||
| 
 | ||||
| func (i AttachType) String() string { | ||||
| 	if i >= AttachType(len(_AttachType_index)-1) { | ||||
| 		return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" | ||||
| 	} | ||||
| 	return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] | ||||
| } | ||||
							
								
								
									
										897
									
								
								vendor/github.com/cilium/ebpf/btf/btf.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										897
									
								
								vendor/github.com/cilium/ebpf/btf/btf.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,897 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"debug/elf" | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"os" | ||||
| 	"reflect" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| const btfMagic = 0xeB9F | ||||
| 
 | ||||
| // Errors returned by BTF functions. | ||||
| var ( | ||||
| 	ErrNotSupported   = internal.ErrNotSupported | ||||
| 	ErrNotFound       = errors.New("not found") | ||||
| 	ErrNoExtendedInfo = errors.New("no extended info") | ||||
| ) | ||||
| 
 | ||||
| // ID represents the unique ID of a BTF object. | ||||
| type ID = sys.BTFID | ||||
| 
 | ||||
| // Spec represents decoded BTF. | ||||
| type Spec struct { | ||||
| 	// Data from .BTF. | ||||
| 	rawTypes []rawType | ||||
| 	strings  *stringTable | ||||
| 
 | ||||
| 	// All types contained by the spec. For the base type, the position of | ||||
| 	// a type in the slice is its ID. | ||||
| 	types types | ||||
| 
 | ||||
| 	// Type IDs indexed by type. | ||||
| 	typeIDs map[Type]TypeID | ||||
| 
 | ||||
| 	// Types indexed by essential name. | ||||
| 	// Includes all struct flavors and types with the same name. | ||||
| 	namedTypes map[essentialName][]Type | ||||
| 
 | ||||
| 	byteOrder binary.ByteOrder | ||||
| } | ||||
| 
 | ||||
| type btfHeader struct { | ||||
| 	Magic   uint16 | ||||
| 	Version uint8 | ||||
| 	Flags   uint8 | ||||
| 	HdrLen  uint32 | ||||
| 
 | ||||
| 	TypeOff   uint32 | ||||
| 	TypeLen   uint32 | ||||
| 	StringOff uint32 | ||||
| 	StringLen uint32 | ||||
| } | ||||
| 
 | ||||
| // typeStart returns the offset from the beginning of the .BTF section | ||||
| // to the start of its type entries. | ||||
| func (h *btfHeader) typeStart() int64 { | ||||
| 	return int64(h.HdrLen + h.TypeOff) | ||||
| } | ||||
| 
 | ||||
| // stringStart returns the offset from the beginning of the .BTF section | ||||
| // to the start of its string table. | ||||
| func (h *btfHeader) stringStart() int64 { | ||||
| 	return int64(h.HdrLen + h.StringOff) | ||||
| } | ||||
| 
 | ||||
| // LoadSpec opens file and calls LoadSpecFromReader on it. | ||||
| func LoadSpec(file string) (*Spec, error) { | ||||
| 	fh, err := os.Open(file) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer fh.Close() | ||||
| 
 | ||||
| 	return LoadSpecFromReader(fh) | ||||
| } | ||||
| 
 | ||||
| // LoadSpecFromReader reads from an ELF or a raw BTF blob. | ||||
| // | ||||
| // Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos | ||||
| // may be nil. | ||||
| func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { | ||||
| 	file, err := internal.NewSafeELFFile(rd) | ||||
| 	if err != nil { | ||||
| 		if bo := guessRawBTFByteOrder(rd); bo != nil { | ||||
| 			// Try to parse a naked BTF blob. This will return an error if | ||||
| 			// we encounter a Datasec, since we can't fix it up. | ||||
| 			spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil) | ||||
| 			return spec, err | ||||
| 		} | ||||
| 
 | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return loadSpecFromELF(file) | ||||
| } | ||||
| 
 | ||||
| // LoadSpecAndExtInfosFromReader reads from an ELF. | ||||
| // | ||||
| // ExtInfos may be nil if the ELF doesn't contain section metadta. | ||||
| // Returns ErrNotFound if the ELF contains no BTF. | ||||
| func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { | ||||
| 	file, err := internal.NewSafeELFFile(rd) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	spec, err := loadSpecFromELF(file) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings) | ||||
| 	if err != nil && !errors.Is(err, ErrNotFound) { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return spec, extInfos, nil | ||||
| } | ||||
| 
 | ||||
| // variableOffsets extracts all symbols offsets from an ELF and indexes them by | ||||
| // section and variable name. | ||||
| // | ||||
| // References to variables in BTF data sections carry unsigned 32-bit offsets. | ||||
| // Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well | ||||
| // beyond this range. Since these symbols cannot be described by BTF info, | ||||
| // ignore them here. | ||||
| func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) { | ||||
| 	symbols, err := file.Symbols() | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't read symbols: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	variableOffsets := make(map[variable]uint32) | ||||
| 	for _, symbol := range symbols { | ||||
| 		if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { | ||||
| 			// Ignore things like SHN_ABS | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if symbol.Value > math.MaxUint32 { | ||||
| 			// VarSecinfo offset is u32, cannot reference symbols in higher regions. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if int(symbol.Section) >= len(file.Sections) { | ||||
| 			return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) | ||||
| 		} | ||||
| 
 | ||||
| 		secName := file.Sections[symbol.Section].Name | ||||
| 		variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) | ||||
| 	} | ||||
| 
 | ||||
| 	return variableOffsets, nil | ||||
| } | ||||
| 
 | ||||
| func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { | ||||
| 	var ( | ||||
| 		btfSection   *elf.Section | ||||
| 		sectionSizes = make(map[string]uint32) | ||||
| 	) | ||||
| 
 | ||||
| 	for _, sec := range file.Sections { | ||||
| 		switch sec.Name { | ||||
| 		case ".BTF": | ||||
| 			btfSection = sec | ||||
| 		default: | ||||
| 			if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { | ||||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			if sec.Size > math.MaxUint32 { | ||||
| 				return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) | ||||
| 			} | ||||
| 
 | ||||
| 			sectionSizes[sec.Name] = uint32(sec.Size) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if btfSection == nil { | ||||
| 		return nil, fmt.Errorf("btf: %w", ErrNotFound) | ||||
| 	} | ||||
| 
 | ||||
| 	vars, err := variableOffsets(file) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if btfSection.ReaderAt == nil { | ||||
| 		return nil, fmt.Errorf("compressed BTF is not supported") | ||||
| 	} | ||||
| 
 | ||||
| 	rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil) | ||||
| } | ||||
| 
 | ||||
| func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, | ||||
| 	baseTypes types, baseStrings *stringTable) (*Spec, error) { | ||||
| 
 | ||||
| 	rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return inflateSpec(rawTypes, rawStrings, bo, baseTypes) | ||||
| } | ||||
| 
 | ||||
| func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder, | ||||
| 	baseTypes types) (*Spec, error) { | ||||
| 
 | ||||
| 	types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes))) | ||||
| 
 | ||||
| 	return &Spec{ | ||||
| 		rawTypes:   rawTypes, | ||||
| 		namedTypes: typesByName, | ||||
| 		typeIDs:    typeIDs, | ||||
| 		types:      types, | ||||
| 		strings:    rawStrings, | ||||
| 		byteOrder:  bo, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) { | ||||
| 	namedTypes := 0 | ||||
| 	for _, typ := range types { | ||||
| 		if typ.TypeName() != "" { | ||||
| 			// Do a pre-pass to figure out how big types by name has to be. | ||||
| 			// Most types have unique names, so it's OK to ignore essentialName | ||||
| 			// here. | ||||
| 			namedTypes++ | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	typeIDs := make(map[Type]TypeID, len(types)) | ||||
| 	typesByName := make(map[essentialName][]Type, namedTypes) | ||||
| 
 | ||||
| 	for i, typ := range types { | ||||
| 		if name := newEssentialName(typ.TypeName()); name != "" { | ||||
| 			typesByName[name] = append(typesByName[name], typ) | ||||
| 		} | ||||
| 		typeIDs[typ] = TypeID(i) + typeIDOffset | ||||
| 	} | ||||
| 
 | ||||
| 	return typeIDs, typesByName | ||||
| } | ||||
| 
 | ||||
| // LoadKernelSpec returns the current kernel's BTF information. | ||||
| // | ||||
| // Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system | ||||
| // for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. | ||||
| func LoadKernelSpec() (*Spec, error) { | ||||
| 	fh, err := os.Open("/sys/kernel/btf/vmlinux") | ||||
| 	if err == nil { | ||||
| 		defer fh.Close() | ||||
| 
 | ||||
| 		return loadRawSpec(fh, internal.NativeEndian, nil, nil) | ||||
| 	} | ||||
| 
 | ||||
| 	file, err := findVMLinux() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer file.Close() | ||||
| 
 | ||||
| 	return loadSpecFromELF(file) | ||||
| } | ||||
| 
 | ||||
| // findVMLinux scans multiple well-known paths for vmlinux kernel images. | ||||
| func findVMLinux() (*internal.SafeELFFile, error) { | ||||
| 	release, err := internal.KernelRelease() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// use same list of locations as libbpf | ||||
| 	// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 | ||||
| 	locations := []string{ | ||||
| 		"/boot/vmlinux-%s", | ||||
| 		"/lib/modules/%s/vmlinux-%[1]s", | ||||
| 		"/lib/modules/%s/build/vmlinux", | ||||
| 		"/usr/lib/modules/%s/kernel/vmlinux", | ||||
| 		"/usr/lib/debug/boot/vmlinux-%s", | ||||
| 		"/usr/lib/debug/boot/vmlinux-%s.debug", | ||||
| 		"/usr/lib/debug/lib/modules/%s/vmlinux", | ||||
| 	} | ||||
| 
 | ||||
| 	for _, loc := range locations { | ||||
| 		file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release)) | ||||
| 		if errors.Is(err, os.ErrNotExist) { | ||||
| 			continue | ||||
| 		} | ||||
| 		return file, err | ||||
| 	} | ||||
| 
 | ||||
| 	return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| // parseBTFHeader parses the header of the .BTF section. | ||||
| func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { | ||||
| 	var header btfHeader | ||||
| 	if err := binary.Read(r, bo, &header); err != nil { | ||||
| 		return nil, fmt.Errorf("can't read header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Magic != btfMagic { | ||||
| 		return nil, fmt.Errorf("incorrect magic value %v", header.Magic) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Version != 1 { | ||||
| 		return nil, fmt.Errorf("unexpected version %v", header.Version) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Flags != 0 { | ||||
| 		return nil, fmt.Errorf("unsupported flags %v", header.Flags) | ||||
| 	} | ||||
| 
 | ||||
| 	remainder := int64(header.HdrLen) - int64(binary.Size(&header)) | ||||
| 	if remainder < 0 { | ||||
| 		return nil, errors.New("header length shorter than btfHeader size") | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { | ||||
| 		return nil, fmt.Errorf("header padding: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &header, nil | ||||
| } | ||||
| 
 | ||||
| func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { | ||||
| 	buf := new(bufio.Reader) | ||||
| 	for _, bo := range []binary.ByteOrder{ | ||||
| 		binary.LittleEndian, | ||||
| 		binary.BigEndian, | ||||
| 	} { | ||||
| 		buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64)) | ||||
| 		if _, err := parseBTFHeader(buf, bo); err == nil { | ||||
| 			return bo | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // parseBTF reads a .BTF section into memory and parses it into a list of | ||||
| // raw types and a string table. | ||||
| func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) { | ||||
| 	buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) | ||||
| 	header, err := parseBTFHeader(buf, bo) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)), | ||||
| 		baseStrings) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't read type names: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) | ||||
| 	rawTypes, err := readTypes(buf, bo, header.TypeLen) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't read types: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return rawTypes, rawStrings, nil | ||||
| } | ||||
| 
 | ||||
| type variable struct { | ||||
| 	section string | ||||
| 	name    string | ||||
| } | ||||
| 
 | ||||
| func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { | ||||
| 	for i, rawType := range rawTypes { | ||||
| 		if rawType.Kind() != kindDatasec { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		name, err := rawStrings.Lookup(rawType.NameOff) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		if name == ".kconfig" || name == ".ksyms" { | ||||
| 			return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) | ||||
| 		} | ||||
| 
 | ||||
| 		if rawTypes[i].SizeType != 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		size, ok := sectionSizes[name] | ||||
| 		if !ok { | ||||
| 			return fmt.Errorf("data section %s: missing size", name) | ||||
| 		} | ||||
| 
 | ||||
| 		rawTypes[i].SizeType = size | ||||
| 
 | ||||
| 		secinfos := rawType.data.([]btfVarSecinfo) | ||||
| 		for j, secInfo := range secinfos { | ||||
| 			id := int(secInfo.Type - 1) | ||||
| 			if id >= len(rawTypes) { | ||||
| 				return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) | ||||
| 			} | ||||
| 
 | ||||
| 			varName, err := rawStrings.Lookup(rawTypes[id].NameOff) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) | ||||
| 			} | ||||
| 
 | ||||
| 			offset, ok := variableOffsets[variable{name, varName}] | ||||
| 			if !ok { | ||||
| 				return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) | ||||
| 			} | ||||
| 
 | ||||
| 			secinfos[j].Offset = offset | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Copy creates a copy of Spec. | ||||
| func (s *Spec) Copy() *Spec { | ||||
| 	types := copyTypes(s.types, nil) | ||||
| 
 | ||||
| 	typeIDOffset := TypeID(0) | ||||
| 	if len(s.types) != 0 { | ||||
| 		typeIDOffset = s.typeIDs[s.types[0]] | ||||
| 	} | ||||
| 	typeIDs, typesByName := indexTypes(types, typeIDOffset) | ||||
| 
 | ||||
| 	// NB: Other parts of spec are not copied since they are immutable. | ||||
| 	return &Spec{ | ||||
| 		s.rawTypes, | ||||
| 		s.strings, | ||||
| 		types, | ||||
| 		typeIDs, | ||||
| 		typesByName, | ||||
| 		s.byteOrder, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| type marshalOpts struct { | ||||
| 	ByteOrder        binary.ByteOrder | ||||
| 	StripFuncLinkage bool | ||||
| } | ||||
| 
 | ||||
| func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { | ||||
| 	var ( | ||||
| 		buf       bytes.Buffer | ||||
| 		header    = new(btfHeader) | ||||
| 		headerLen = binary.Size(header) | ||||
| 	) | ||||
| 
 | ||||
| 	// Reserve space for the header. We have to write it last since | ||||
| 	// we don't know the size of the type section yet. | ||||
| 	_, _ = buf.Write(make([]byte, headerLen)) | ||||
| 
 | ||||
| 	// Write type section, just after the header. | ||||
| 	for _, raw := range s.rawTypes { | ||||
| 		switch { | ||||
| 		case opts.StripFuncLinkage && raw.Kind() == kindFunc: | ||||
| 			raw.SetLinkage(StaticFunc) | ||||
| 		} | ||||
| 
 | ||||
| 		if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { | ||||
| 			return nil, fmt.Errorf("can't marshal BTF: %w", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	typeLen := uint32(buf.Len() - headerLen) | ||||
| 
 | ||||
| 	// Write string section after type section. | ||||
| 	stringsLen := s.strings.Length() | ||||
| 	buf.Grow(stringsLen) | ||||
| 	if err := s.strings.Marshal(&buf); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// Fill out the header, and write it out. | ||||
| 	header = &btfHeader{ | ||||
| 		Magic:     btfMagic, | ||||
| 		Version:   1, | ||||
| 		Flags:     0, | ||||
| 		HdrLen:    uint32(headerLen), | ||||
| 		TypeOff:   0, | ||||
| 		TypeLen:   typeLen, | ||||
| 		StringOff: typeLen, | ||||
| 		StringLen: uint32(stringsLen), | ||||
| 	} | ||||
| 
 | ||||
| 	raw := buf.Bytes() | ||||
| 	err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't write header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return raw, nil | ||||
| } | ||||
| 
 | ||||
| type sliceWriter []byte | ||||
| 
 | ||||
| func (sw sliceWriter) Write(p []byte) (int, error) { | ||||
| 	if len(p) != len(sw) { | ||||
| 		return 0, errors.New("size doesn't match") | ||||
| 	} | ||||
| 
 | ||||
| 	return copy(sw, p), nil | ||||
| } | ||||
| 
 | ||||
| // TypeByID returns the BTF Type with the given type ID. | ||||
| // | ||||
| // Returns an error wrapping ErrNotFound if a Type with the given ID | ||||
| // does not exist in the Spec. | ||||
| func (s *Spec) TypeByID(id TypeID) (Type, error) { | ||||
| 	return s.types.ByID(id) | ||||
| } | ||||
| 
 | ||||
| // TypeID returns the ID for a given Type. | ||||
| // | ||||
| // Returns an error wrapping ErrNoFound if the type isn't part of the Spec. | ||||
| func (s *Spec) TypeID(typ Type) (TypeID, error) { | ||||
| 	if _, ok := typ.(*Void); ok { | ||||
| 		// Equality is weird for void, since it is a zero sized type. | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 
 | ||||
| 	id, ok := s.typeIDs[typ] | ||||
| 	if !ok { | ||||
| 		return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) | ||||
| 	} | ||||
| 
 | ||||
| 	return id, nil | ||||
| } | ||||
| 
 | ||||
| // AnyTypesByName returns a list of BTF Types with the given name. | ||||
| // | ||||
| // If the BTF blob describes multiple compilation units like vmlinux, multiple | ||||
| // Types with the same name and kind can exist, but might not describe the same | ||||
| // data structure. | ||||
| // | ||||
| // Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. | ||||
| func (s *Spec) AnyTypesByName(name string) ([]Type, error) { | ||||
| 	types := s.namedTypes[newEssentialName(name)] | ||||
| 	if len(types) == 0 { | ||||
| 		return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) | ||||
| 	} | ||||
| 
 | ||||
| 	// Return a copy to prevent changes to namedTypes. | ||||
| 	result := make([]Type, 0, len(types)) | ||||
| 	for _, t := range types { | ||||
| 		// Match against the full name, not just the essential one | ||||
| 		// in case the type being looked up is a struct flavor. | ||||
| 		if t.TypeName() == name { | ||||
| 			result = append(result, t) | ||||
| 		} | ||||
| 	} | ||||
| 	return result, nil | ||||
| } | ||||
| 
 | ||||
| // AnyTypeByName returns a Type with the given name. | ||||
| // | ||||
| // Returns an error if multiple types of that name exist. | ||||
| func (s *Spec) AnyTypeByName(name string) (Type, error) { | ||||
| 	types, err := s.AnyTypesByName(name) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if len(types) > 1 { | ||||
| 		return nil, fmt.Errorf("found multiple types: %v", types) | ||||
| 	} | ||||
| 
 | ||||
| 	return types[0], nil | ||||
| } | ||||
| 
 | ||||
| // TypeByName searches for a Type with a specific name. Since multiple | ||||
| // Types with the same name can exist, the parameter typ is taken to | ||||
| // narrow down the search in case of a clash. | ||||
| // | ||||
| // typ must be a non-nil pointer to an implementation of a Type. | ||||
| // On success, the address of the found Type will be copied to typ. | ||||
| // | ||||
| // Returns an error wrapping ErrNotFound if no matching | ||||
| // Type exists in the Spec. If multiple candidates are found, | ||||
| // an error is returned. | ||||
| func (s *Spec) TypeByName(name string, typ interface{}) error { | ||||
| 	typValue := reflect.ValueOf(typ) | ||||
| 	if typValue.Kind() != reflect.Ptr { | ||||
| 		return fmt.Errorf("%T is not a pointer", typ) | ||||
| 	} | ||||
| 
 | ||||
| 	typPtr := typValue.Elem() | ||||
| 	if !typPtr.CanSet() { | ||||
| 		return fmt.Errorf("%T cannot be set", typ) | ||||
| 	} | ||||
| 
 | ||||
| 	wanted := typPtr.Type() | ||||
| 	if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) { | ||||
| 		return fmt.Errorf("%T does not satisfy Type interface", typ) | ||||
| 	} | ||||
| 
 | ||||
| 	types, err := s.AnyTypesByName(name) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	var candidate Type | ||||
| 	for _, typ := range types { | ||||
| 		if reflect.TypeOf(typ) != wanted { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if candidate != nil { | ||||
| 			return fmt.Errorf("type %s: multiple candidates for %T", name, typ) | ||||
| 		} | ||||
| 
 | ||||
| 		candidate = typ | ||||
| 	} | ||||
| 
 | ||||
| 	if candidate == nil { | ||||
| 		return fmt.Errorf("type %s: %w", name, ErrNotFound) | ||||
| 	} | ||||
| 
 | ||||
| 	typPtr.Set(reflect.ValueOf(candidate)) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // LoadSplitSpecFromReader loads split BTF from a reader. | ||||
| // | ||||
| // Types from base are used to resolve references in the split BTF. | ||||
| // The returned Spec only contains types from the split BTF, not from the base. | ||||
| func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { | ||||
| 	return loadRawSpec(r, internal.NativeEndian, base.types, base.strings) | ||||
| } | ||||
| 
 | ||||
| // TypesIterator iterates over types of a given spec. | ||||
| type TypesIterator struct { | ||||
| 	spec  *Spec | ||||
| 	index int | ||||
| 	// The last visited type in the spec. | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| // Iterate returns the types iterator. | ||||
| func (s *Spec) Iterate() *TypesIterator { | ||||
| 	return &TypesIterator{spec: s, index: 0} | ||||
| } | ||||
| 
 | ||||
| // Next returns true as long as there are any remaining types. | ||||
| func (iter *TypesIterator) Next() bool { | ||||
| 	if len(iter.spec.types) <= iter.index { | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	iter.Type = iter.spec.types[iter.index] | ||||
| 	iter.index++ | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Handle is a reference to BTF loaded into the kernel. | ||||
| type Handle struct { | ||||
| 	fd *sys.FD | ||||
| 
 | ||||
| 	// Size of the raw BTF in bytes. | ||||
| 	size uint32 | ||||
| } | ||||
| 
 | ||||
| // NewHandle loads BTF into the kernel. | ||||
| // | ||||
| // Returns ErrNotSupported if BTF is not supported. | ||||
| func NewHandle(spec *Spec) (*Handle, error) { | ||||
| 	if err := haveBTF(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if spec.byteOrder != internal.NativeEndian { | ||||
| 		return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) | ||||
| 	} | ||||
| 
 | ||||
| 	btf, err := spec.marshal(marshalOpts{ | ||||
| 		ByteOrder:        internal.NativeEndian, | ||||
| 		StripFuncLinkage: haveFuncLinkage() != nil, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't marshal BTF: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if uint64(len(btf)) > math.MaxUint32 { | ||||
| 		return nil, errors.New("BTF exceeds the maximum size") | ||||
| 	} | ||||
| 
 | ||||
| 	attr := &sys.BtfLoadAttr{ | ||||
| 		Btf:     sys.NewSlicePointer(btf), | ||||
| 		BtfSize: uint32(len(btf)), | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := sys.BtfLoad(attr) | ||||
| 	if err != nil { | ||||
| 		logBuf := make([]byte, 64*1024) | ||||
| 		attr.BtfLogBuf = sys.NewSlicePointer(logBuf) | ||||
| 		attr.BtfLogSize = uint32(len(logBuf)) | ||||
| 		attr.BtfLogLevel = 1 | ||||
| 		// NB: The syscall will never return ENOSPC as of 5.18-rc4. | ||||
| 		_, _ = sys.BtfLoad(attr) | ||||
| 		return nil, internal.ErrorWithLog(err, logBuf) | ||||
| 	} | ||||
| 
 | ||||
| 	return &Handle{fd, attr.BtfSize}, nil | ||||
| } | ||||
| 
 | ||||
| // NewHandleFromID returns the BTF handle for a given id. | ||||
| // | ||||
| // Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. | ||||
| // | ||||
| // Returns ErrNotExist, if there is no BTF with the given id. | ||||
| // | ||||
| // Requires CAP_SYS_ADMIN. | ||||
| func NewHandleFromID(id ID) (*Handle, error) { | ||||
| 	fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ | ||||
| 		Id: uint32(id), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("get FD for ID %d: %w", id, err) | ||||
| 	} | ||||
| 
 | ||||
| 	info, err := newHandleInfoFromFD(fd) | ||||
| 	if err != nil { | ||||
| 		_ = fd.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &Handle{fd, info.size}, nil | ||||
| } | ||||
| 
 | ||||
| // Spec parses the kernel BTF into Go types. | ||||
| // | ||||
| // base is used to decode split BTF and may be nil. | ||||
| func (h *Handle) Spec(base *Spec) (*Spec, error) { | ||||
| 	var btfInfo sys.BtfInfo | ||||
| 	btfBuffer := make([]byte, h.size) | ||||
| 	btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) | ||||
| 
 | ||||
| 	if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	var baseTypes types | ||||
| 	var baseStrings *stringTable | ||||
| 	if base != nil { | ||||
| 		baseTypes = base.types | ||||
| 		baseStrings = base.strings | ||||
| 	} | ||||
| 
 | ||||
| 	return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings) | ||||
| } | ||||
| 
 | ||||
| // Close destroys the handle. | ||||
| // | ||||
| // Subsequent calls to FD will return an invalid value. | ||||
| func (h *Handle) Close() error { | ||||
| 	if h == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	return h.fd.Close() | ||||
| } | ||||
| 
 | ||||
| // FD returns the file descriptor for the handle. | ||||
| func (h *Handle) FD() int { | ||||
| 	return h.fd.Int() | ||||
| } | ||||
| 
 | ||||
| // Info returns metadata about the handle. | ||||
| func (h *Handle) Info() (*HandleInfo, error) { | ||||
| 	return newHandleInfoFromFD(h.fd) | ||||
| } | ||||
| 
 | ||||
| func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { | ||||
| 	const minHeaderLength = 24 | ||||
| 
 | ||||
| 	typesLen := uint32(binary.Size(types)) | ||||
| 	header := btfHeader{ | ||||
| 		Magic:     btfMagic, | ||||
| 		Version:   1, | ||||
| 		HdrLen:    minHeaderLength, | ||||
| 		TypeOff:   0, | ||||
| 		TypeLen:   typesLen, | ||||
| 		StringOff: typesLen, | ||||
| 		StringLen: uint32(len(strings)), | ||||
| 	} | ||||
| 
 | ||||
| 	buf := new(bytes.Buffer) | ||||
| 	_ = binary.Write(buf, bo, &header) | ||||
| 	_ = binary.Write(buf, bo, types) | ||||
| 	buf.Write(strings) | ||||
| 
 | ||||
| 	return buf.Bytes() | ||||
| } | ||||
| 
 | ||||
| var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { | ||||
| 	var ( | ||||
| 		types struct { | ||||
| 			Integer btfType | ||||
| 			Var     btfType | ||||
| 			btfVar  struct{ Linkage uint32 } | ||||
| 		} | ||||
| 		strings = []byte{0, 'a', 0} | ||||
| 	) | ||||
| 
 | ||||
| 	// We use a BTF_KIND_VAR here, to make sure that | ||||
| 	// the kernel understands BTF at least as well as we | ||||
| 	// do. BTF_KIND_VAR was introduced ~5.1. | ||||
| 	types.Integer.SetKind(kindPointer) | ||||
| 	types.Var.NameOff = 1 | ||||
| 	types.Var.SetKind(kindVar) | ||||
| 	types.Var.SizeType = 1 | ||||
| 
 | ||||
| 	btf := marshalBTF(&types, strings, internal.NativeEndian) | ||||
| 
 | ||||
| 	fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ | ||||
| 		Btf:     sys.NewSlicePointer(btf), | ||||
| 		BtfSize: uint32(len(btf)), | ||||
| 	}) | ||||
| 	if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { | ||||
| 		// Treat both EINVAL and EPERM as not supported: loading the program | ||||
| 		// might still succeed without BTF. | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	fd.Close() | ||||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { | ||||
| 	if err := haveBTF(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	var ( | ||||
| 		types struct { | ||||
| 			FuncProto btfType | ||||
| 			Func      btfType | ||||
| 		} | ||||
| 		strings = []byte{0, 'a', 0} | ||||
| 	) | ||||
| 
 | ||||
| 	types.FuncProto.SetKind(kindFuncProto) | ||||
| 	types.Func.SetKind(kindFunc) | ||||
| 	types.Func.SizeType = 1 // aka FuncProto | ||||
| 	types.Func.NameOff = 1 | ||||
| 	types.Func.SetLinkage(GlobalFunc) | ||||
| 
 | ||||
| 	btf := marshalBTF(&types, strings, internal.NativeEndian) | ||||
| 
 | ||||
| 	fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ | ||||
| 		Btf:     sys.NewSlicePointer(btf), | ||||
| 		BtfSize: uint32(len(btf)), | ||||
| 	}) | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	fd.Close() | ||||
| 	return nil | ||||
| }) | ||||
|  | @ -6,6 +6,8 @@ import ( | |||
| 	"io" | ||||
| ) | ||||
| 
 | ||||
| //go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage | ||||
| 
 | ||||
| // btfKind describes a Type. | ||||
| type btfKind uint8 | ||||
| 
 | ||||
|  | @ -29,19 +31,32 @@ const ( | |||
| 	// Added ~5.1 | ||||
| 	kindVar | ||||
| 	kindDatasec | ||||
| 	// Added ~5.13 | ||||
| 	kindFloat | ||||
| ) | ||||
| 
 | ||||
| type btfFuncLinkage uint8 | ||||
| // FuncLinkage describes BTF function linkage metadata. | ||||
| type FuncLinkage int | ||||
| 
 | ||||
| // Equivalent of enum btf_func_linkage. | ||||
| const ( | ||||
| 	StaticFunc FuncLinkage = iota // static | ||||
| 	GlobalFunc                    // global | ||||
| 	ExternFunc                    // extern | ||||
| ) | ||||
| 
 | ||||
| // VarLinkage describes BTF variable linkage metadata. | ||||
| type VarLinkage int | ||||
| 
 | ||||
| const ( | ||||
| 	linkageStatic btfFuncLinkage = iota | ||||
| 	linkageGlobal | ||||
| 	linkageExtern | ||||
| 	StaticVar VarLinkage = iota // static | ||||
| 	GlobalVar                   // global | ||||
| 	ExternVar                   // extern | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	btfTypeKindShift     = 24 | ||||
| 	btfTypeKindLen       = 4 | ||||
| 	btfTypeKindLen       = 5 | ||||
| 	btfTypeVlenShift     = 0 | ||||
| 	btfTypeVlenMask      = 16 | ||||
| 	btfTypeKindFlagShift = 31 | ||||
|  | @ -54,8 +69,8 @@ type btfType struct { | |||
| 	/* "info" bits arrangement | ||||
| 	 * bits  0-15: vlen (e.g. # of struct's members), linkage | ||||
| 	 * bits 16-23: unused | ||||
| 	 * bits 24-27: kind (e.g. int, ptr, array...etc) | ||||
| 	 * bits 28-30: unused | ||||
| 	 * bits 24-28: kind (e.g. int, ptr, array...etc) | ||||
| 	 * bits 29-30: unused | ||||
| 	 * bit     31: kind_flag, currently used by | ||||
| 	 *             struct, union and fwd | ||||
| 	 */ | ||||
|  | @ -104,6 +119,8 @@ func (k btfKind) String() string { | |||
| 		return "Variable" | ||||
| 	case kindDatasec: | ||||
| 		return "Section" | ||||
| 	case kindFloat: | ||||
| 		return "Float" | ||||
| 	default: | ||||
| 		return fmt.Sprintf("Unknown (%d)", k) | ||||
| 	} | ||||
|  | @ -113,13 +130,22 @@ func mask(len uint32) uint32 { | |||
| 	return (1 << len) - 1 | ||||
| } | ||||
| 
 | ||||
| func readBits(value, len, shift uint32) uint32 { | ||||
| 	return (value >> shift) & mask(len) | ||||
| } | ||||
| 
 | ||||
| func writeBits(value, len, shift, new uint32) uint32 { | ||||
| 	value &^= mask(len) << shift | ||||
| 	value |= (new & mask(len)) << shift | ||||
| 	return value | ||||
| } | ||||
| 
 | ||||
| func (bt *btfType) info(len, shift uint32) uint32 { | ||||
| 	return (bt.Info >> shift) & mask(len) | ||||
| 	return readBits(bt.Info, len, shift) | ||||
| } | ||||
| 
 | ||||
| func (bt *btfType) setInfo(value, len, shift uint32) { | ||||
| 	bt.Info &^= mask(len) << shift | ||||
| 	bt.Info |= (value & mask(len)) << shift | ||||
| 	bt.Info = writeBits(bt.Info, len, shift, value) | ||||
| } | ||||
| 
 | ||||
| func (bt *btfType) Kind() btfKind { | ||||
|  | @ -142,11 +168,11 @@ func (bt *btfType) KindFlag() bool { | |||
| 	return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 | ||||
| } | ||||
| 
 | ||||
| func (bt *btfType) Linkage() btfFuncLinkage { | ||||
| 	return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) | ||||
| func (bt *btfType) Linkage() FuncLinkage { | ||||
| 	return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) | ||||
| } | ||||
| 
 | ||||
| func (bt *btfType) SetLinkage(linkage btfFuncLinkage) { | ||||
| func (bt *btfType) SetLinkage(linkage FuncLinkage) { | ||||
| 	bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) | ||||
| } | ||||
| 
 | ||||
|  | @ -160,6 +186,10 @@ func (bt *btfType) Size() uint32 { | |||
| 	return bt.SizeType | ||||
| } | ||||
| 
 | ||||
| func (bt *btfType) SetSize(size uint32) { | ||||
| 	bt.SizeType = size | ||||
| } | ||||
| 
 | ||||
| type rawType struct { | ||||
| 	btfType | ||||
| 	data interface{} | ||||
|  | @ -177,6 +207,50 @@ func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { | |||
| 	return binary.Write(w, bo, rt.data) | ||||
| } | ||||
| 
 | ||||
| // btfInt encodes additional data for integers. | ||||
| // | ||||
| //    ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b | ||||
| //    ? = undefined | ||||
| //    e = encoding | ||||
| //    o = offset (bitfields?) | ||||
| //    b = bits (bitfields) | ||||
| type btfInt struct { | ||||
| 	Raw uint32 | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	btfIntEncodingLen   = 4 | ||||
| 	btfIntEncodingShift = 24 | ||||
| 	btfIntOffsetLen     = 8 | ||||
| 	btfIntOffsetShift   = 16 | ||||
| 	btfIntBitsLen       = 8 | ||||
| 	btfIntBitsShift     = 0 | ||||
| ) | ||||
| 
 | ||||
| func (bi btfInt) Encoding() IntEncoding { | ||||
| 	return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) | ||||
| } | ||||
| 
 | ||||
| func (bi *btfInt) SetEncoding(e IntEncoding) { | ||||
| 	bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) | ||||
| } | ||||
| 
 | ||||
| func (bi btfInt) Offset() Bits { | ||||
| 	return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) | ||||
| } | ||||
| 
 | ||||
| func (bi *btfInt) SetOffset(offset uint32) { | ||||
| 	bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) | ||||
| } | ||||
| 
 | ||||
| func (bi btfInt) Bits() Bits { | ||||
| 	return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) | ||||
| } | ||||
| 
 | ||||
| func (bi *btfInt) SetBits(bits byte) { | ||||
| 	bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) | ||||
| } | ||||
| 
 | ||||
| type btfArray struct { | ||||
| 	Type      TypeID | ||||
| 	IndexType TypeID | ||||
|  | @ -209,11 +283,14 @@ type btfParam struct { | |||
| 	Type    TypeID | ||||
| } | ||||
| 
 | ||||
| func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { | ||||
| 	var ( | ||||
| 		header btfType | ||||
| 		types  []rawType | ||||
| 	) | ||||
| func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) { | ||||
| 	var header btfType | ||||
| 	// because of the interleaving between types and struct members it is difficult to | ||||
| 	// precompute the numbers of raw types this will parse | ||||
| 	// this "guess" is a good first estimation | ||||
| 	sizeOfbtfType := uintptr(binary.Size(btfType{})) | ||||
| 	tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 | ||||
| 	types := make([]rawType, 0, tyMaxCount) | ||||
| 
 | ||||
| 	for id := TypeID(1); ; id++ { | ||||
| 		if err := binary.Read(r, bo, &header); err == io.EOF { | ||||
|  | @ -225,7 +302,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { | |||
| 		var data interface{} | ||||
| 		switch header.Kind() { | ||||
| 		case kindInt: | ||||
| 			data = new(uint32) | ||||
| 			data = new(btfInt) | ||||
| 		case kindPointer: | ||||
| 		case kindArray: | ||||
| 			data = new(btfArray) | ||||
|  | @ -247,6 +324,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { | |||
| 			data = new(btfVariable) | ||||
| 		case kindDatasec: | ||||
| 			data = make([]btfVarSecinfo, header.Vlen()) | ||||
| 		case kindFloat: | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind()) | ||||
| 		} | ||||
|  | @ -263,7 +341,3 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { | |||
| 		types = append(types, rawType{header, data}) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func intEncoding(raw uint32) (IntEncoding, uint32, byte) { | ||||
| 	return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff) | ||||
| } | ||||
							
								
								
									
										44
									
								
								vendor/github.com/cilium/ebpf/btf/btf_types_string.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								vendor/github.com/cilium/ebpf/btf/btf_types_string.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,44 @@ | |||
| // Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT. | ||||
| 
 | ||||
| package btf | ||||
| 
 | ||||
| import "strconv" | ||||
| 
 | ||||
| func _() { | ||||
| 	// An "invalid array index" compiler error signifies that the constant values have changed. | ||||
| 	// Re-run the stringer command to generate them again. | ||||
| 	var x [1]struct{} | ||||
| 	_ = x[StaticFunc-0] | ||||
| 	_ = x[GlobalFunc-1] | ||||
| 	_ = x[ExternFunc-2] | ||||
| } | ||||
| 
 | ||||
| const _FuncLinkage_name = "staticglobalextern" | ||||
| 
 | ||||
| var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} | ||||
| 
 | ||||
| func (i FuncLinkage) String() string { | ||||
| 	if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) { | ||||
| 		return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" | ||||
| 	} | ||||
| 	return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]] | ||||
| } | ||||
| func _() { | ||||
| 	// An "invalid array index" compiler error signifies that the constant values have changed. | ||||
| 	// Re-run the stringer command to generate them again. | ||||
| 	var x [1]struct{} | ||||
| 	_ = x[StaticVar-0] | ||||
| 	_ = x[GlobalVar-1] | ||||
| 	_ = x[ExternVar-2] | ||||
| } | ||||
| 
 | ||||
| const _VarLinkage_name = "staticglobalextern" | ||||
| 
 | ||||
| var _VarLinkage_index = [...]uint8{0, 6, 12, 18} | ||||
| 
 | ||||
| func (i VarLinkage) String() string { | ||||
| 	if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) { | ||||
| 		return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" | ||||
| 	} | ||||
| 	return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]] | ||||
| } | ||||
							
								
								
									
										972
									
								
								vendor/github.com/cilium/ebpf/btf/core.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										972
									
								
								vendor/github.com/cilium/ebpf/btf/core.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,972 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"reflect" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| ) | ||||
| 
 | ||||
| // Code in this file is derived from libbpf, which is available under a BSD | ||||
| // 2-Clause license. | ||||
| 
 | ||||
| // COREFixup is the result of computing a CO-RE relocation for a target. | ||||
| type COREFixup struct { | ||||
| 	kind   coreKind | ||||
| 	local  uint32 | ||||
| 	target uint32 | ||||
| 	// True if there is no valid fixup. The instruction is replaced with an | ||||
| 	// invalid dummy. | ||||
| 	poison bool | ||||
| 	// True if the validation of the local value should be skipped. Used by | ||||
| 	// some kinds of bitfield relocations. | ||||
| 	skipLocalValidation bool | ||||
| } | ||||
| 
 | ||||
| func (f *COREFixup) equal(other COREFixup) bool { | ||||
| 	return f.local == other.local && f.target == other.target | ||||
| } | ||||
| 
 | ||||
| func (f *COREFixup) String() string { | ||||
| 	if f.poison { | ||||
| 		return fmt.Sprintf("%s=poison", f.kind) | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) | ||||
| } | ||||
| 
 | ||||
| func (f *COREFixup) Apply(ins *asm.Instruction) error { | ||||
| 	if f.poison { | ||||
| 		const badRelo = 0xbad2310 | ||||
| 
 | ||||
| 		*ins = asm.BuiltinFunc(badRelo).Call() | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	switch class := ins.OpCode.Class(); class { | ||||
| 	case asm.LdXClass, asm.StClass, asm.StXClass: | ||||
| 		if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { | ||||
| 			return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) | ||||
| 		} | ||||
| 
 | ||||
| 		if f.target > math.MaxInt16 { | ||||
| 			return fmt.Errorf("offset %d exceeds MaxInt16", f.target) | ||||
| 		} | ||||
| 
 | ||||
| 		ins.Offset = int16(f.target) | ||||
| 
 | ||||
| 	case asm.LdClass: | ||||
| 		if !ins.IsConstantLoad(asm.DWord) { | ||||
| 			return fmt.Errorf("not a dword-sized immediate load") | ||||
| 		} | ||||
| 
 | ||||
| 		if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { | ||||
| 			return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) | ||||
| 		} | ||||
| 
 | ||||
| 		ins.Constant = int64(f.target) | ||||
| 
 | ||||
| 	case asm.ALUClass: | ||||
| 		if ins.OpCode.ALUOp() == asm.Swap { | ||||
| 			return fmt.Errorf("relocation against swap") | ||||
| 		} | ||||
| 
 | ||||
| 		fallthrough | ||||
| 
 | ||||
| 	case asm.ALU64Class: | ||||
| 		if src := ins.OpCode.Source(); src != asm.ImmSource { | ||||
| 			return fmt.Errorf("invalid source %s", src) | ||||
| 		} | ||||
| 
 | ||||
| 		if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { | ||||
| 			return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) | ||||
| 		} | ||||
| 
 | ||||
| 		if f.target > math.MaxInt32 { | ||||
| 			return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) | ||||
| 		} | ||||
| 
 | ||||
| 		ins.Constant = int64(f.target) | ||||
| 
 | ||||
| 	default: | ||||
| 		return fmt.Errorf("invalid class %s", class) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (f COREFixup) isNonExistant() bool { | ||||
| 	return f.kind.checksForExistence() && f.target == 0 | ||||
| } | ||||
| 
 | ||||
| // coreKind is the type of CO-RE relocation as specified in BPF source code. | ||||
| type coreKind uint32 | ||||
| 
 | ||||
| const ( | ||||
| 	reloFieldByteOffset coreKind = iota /* field byte offset */ | ||||
| 	reloFieldByteSize                   /* field size in bytes */ | ||||
| 	reloFieldExists                     /* field existence in target kernel */ | ||||
| 	reloFieldSigned                     /* field signedness (0 - unsigned, 1 - signed) */ | ||||
| 	reloFieldLShiftU64                  /* bitfield-specific left bitshift */ | ||||
| 	reloFieldRShiftU64                  /* bitfield-specific right bitshift */ | ||||
| 	reloTypeIDLocal                     /* type ID in local BPF object */ | ||||
| 	reloTypeIDTarget                    /* type ID in target kernel */ | ||||
| 	reloTypeExists                      /* type existence in target kernel */ | ||||
| 	reloTypeSize                        /* type size in bytes */ | ||||
| 	reloEnumvalExists                   /* enum value existence in target kernel */ | ||||
| 	reloEnumvalValue                    /* enum value integer value */ | ||||
| ) | ||||
| 
 | ||||
| func (k coreKind) checksForExistence() bool { | ||||
| 	return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists | ||||
| } | ||||
| 
 | ||||
| func (k coreKind) String() string { | ||||
| 	switch k { | ||||
| 	case reloFieldByteOffset: | ||||
| 		return "byte_off" | ||||
| 	case reloFieldByteSize: | ||||
| 		return "byte_sz" | ||||
| 	case reloFieldExists: | ||||
| 		return "field_exists" | ||||
| 	case reloFieldSigned: | ||||
| 		return "signed" | ||||
| 	case reloFieldLShiftU64: | ||||
| 		return "lshift_u64" | ||||
| 	case reloFieldRShiftU64: | ||||
| 		return "rshift_u64" | ||||
| 	case reloTypeIDLocal: | ||||
| 		return "local_type_id" | ||||
| 	case reloTypeIDTarget: | ||||
| 		return "target_type_id" | ||||
| 	case reloTypeExists: | ||||
| 		return "type_exists" | ||||
| 	case reloTypeSize: | ||||
| 		return "type_size" | ||||
| 	case reloEnumvalExists: | ||||
| 		return "enumval_exists" | ||||
| 	case reloEnumvalValue: | ||||
| 		return "enumval_value" | ||||
| 	default: | ||||
| 		return "unknown" | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // CORERelocate calculates the difference in types between local and target. | ||||
| // | ||||
| // Returns a list of fixups which can be applied to instructions to make them | ||||
| // match the target type(s). | ||||
| // | ||||
| // Fixups are returned in the order of relos, e.g. fixup[i] is the solution | ||||
| // for relos[i]. | ||||
| func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) { | ||||
| 	if local.byteOrder != target.byteOrder { | ||||
| 		return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) | ||||
| 	} | ||||
| 
 | ||||
| 	type reloGroup struct { | ||||
| 		relos []*CORERelocation | ||||
| 		// Position of each relocation in relos. | ||||
| 		indices []int | ||||
| 	} | ||||
| 
 | ||||
| 	// Split relocations into per Type lists. | ||||
| 	relosByType := make(map[Type]*reloGroup) | ||||
| 	result := make([]COREFixup, len(relos)) | ||||
| 	for i, relo := range relos { | ||||
| 		if relo.kind == reloTypeIDLocal { | ||||
| 			// Filtering out reloTypeIDLocal here makes our lives a lot easier | ||||
| 			// down the line, since it doesn't have a target at all. | ||||
| 			if len(relo.accessor) > 1 || relo.accessor[0] != 0 { | ||||
| 				return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) | ||||
| 			} | ||||
| 
 | ||||
| 			id, err := local.TypeID(relo.typ) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("%s: %w", relo.kind, err) | ||||
| 			} | ||||
| 
 | ||||
| 			result[i] = COREFixup{ | ||||
| 				kind:   relo.kind, | ||||
| 				local:  uint32(id), | ||||
| 				target: uint32(id), | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		group, ok := relosByType[relo.typ] | ||||
| 		if !ok { | ||||
| 			group = &reloGroup{} | ||||
| 			relosByType[relo.typ] = group | ||||
| 		} | ||||
| 		group.relos = append(group.relos, relo) | ||||
| 		group.indices = append(group.indices, i) | ||||
| 	} | ||||
| 
 | ||||
| 	for localType, group := range relosByType { | ||||
| 		localTypeName := localType.TypeName() | ||||
| 		if localTypeName == "" { | ||||
| 			return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) | ||||
| 		} | ||||
| 
 | ||||
| 		targets := target.namedTypes[newEssentialName(localTypeName)] | ||||
| 		fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("relocate %s: %w", localType, err) | ||||
| 		} | ||||
| 
 | ||||
| 		for j, index := range group.indices { | ||||
| 			result[index] = fixups[j] | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return result, nil | ||||
| } | ||||
| 
 | ||||
| var errAmbiguousRelocation = errors.New("ambiguous relocation") | ||||
| var errImpossibleRelocation = errors.New("impossible relocation") | ||||
| 
 | ||||
| // coreCalculateFixups calculates the fixups for the given relocations using | ||||
| // the "best" target. | ||||
| // | ||||
| // The best target is determined by scoring: the less poisoning we have to do | ||||
| // the better the target is. | ||||
| func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) { | ||||
| 	localID, err := localSpec.TypeID(local) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("local type ID: %w", err) | ||||
| 	} | ||||
| 	local = Copy(local, UnderlyingType) | ||||
| 
 | ||||
| 	bestScore := len(relos) | ||||
| 	var bestFixups []COREFixup | ||||
| 	for i := range targets { | ||||
| 		targetID, err := targetSpec.TypeID(targets[i]) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("target type ID: %w", err) | ||||
| 		} | ||||
| 		target := Copy(targets[i], UnderlyingType) | ||||
| 
 | ||||
| 		score := 0 // lower is better | ||||
| 		fixups := make([]COREFixup, 0, len(relos)) | ||||
| 		for _, relo := range relos { | ||||
| 			fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("target %s: %w", target, err) | ||||
| 			} | ||||
| 			if fixup.poison || fixup.isNonExistant() { | ||||
| 				score++ | ||||
| 			} | ||||
| 			fixups = append(fixups, fixup) | ||||
| 		} | ||||
| 
 | ||||
| 		if score > bestScore { | ||||
| 			// We have a better target already, ignore this one. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if score < bestScore { | ||||
| 			// This is the best target yet, use it. | ||||
| 			bestScore = score | ||||
| 			bestFixups = fixups | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Some other target has the same score as the current one. Make sure | ||||
| 		// the fixups agree with each other. | ||||
| 		for i, fixup := range bestFixups { | ||||
| 			if !fixup.equal(fixups[i]) { | ||||
| 				return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if bestFixups == nil { | ||||
| 		// Nothing at all matched, probably because there are no suitable | ||||
| 		// targets at all. | ||||
| 		// | ||||
| 		// Poison everything except checksForExistence. | ||||
| 		bestFixups = make([]COREFixup, len(relos)) | ||||
| 		for i, relo := range relos { | ||||
| 			if relo.kind.checksForExistence() { | ||||
| 				bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} | ||||
| 			} else { | ||||
| 				bestFixups[i] = COREFixup{kind: relo.kind, poison: true} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return bestFixups, nil | ||||
| } | ||||
| 
 | ||||
| // coreCalculateFixup calculates the fixup for a single local type, target type | ||||
| // and relocation. | ||||
| func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) { | ||||
| 	fixup := func(local, target uint32) (COREFixup, error) { | ||||
| 		return COREFixup{kind: relo.kind, local: local, target: target}, nil | ||||
| 	} | ||||
| 	fixupWithoutValidation := func(local, target uint32) (COREFixup, error) { | ||||
| 		return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil | ||||
| 	} | ||||
| 	poison := func() (COREFixup, error) { | ||||
| 		if relo.kind.checksForExistence() { | ||||
| 			return fixup(1, 0) | ||||
| 		} | ||||
| 		return COREFixup{kind: relo.kind, poison: true}, nil | ||||
| 	} | ||||
| 	zero := COREFixup{} | ||||
| 
 | ||||
| 	switch relo.kind { | ||||
| 	case reloTypeIDTarget, reloTypeSize, reloTypeExists: | ||||
| 		if len(relo.accessor) > 1 || relo.accessor[0] != 0 { | ||||
| 			return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) | ||||
| 		} | ||||
| 
 | ||||
| 		err := coreAreTypesCompatible(local, target) | ||||
| 		if errors.Is(err, errImpossibleRelocation) { | ||||
| 			return poison() | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) | ||||
| 		} | ||||
| 
 | ||||
| 		switch relo.kind { | ||||
| 		case reloTypeExists: | ||||
| 			return fixup(1, 1) | ||||
| 
 | ||||
| 		case reloTypeIDTarget: | ||||
| 			return fixup(uint32(localID), uint32(targetID)) | ||||
| 
 | ||||
| 		case reloTypeSize: | ||||
| 			localSize, err := Sizeof(local) | ||||
| 			if err != nil { | ||||
| 				return zero, err | ||||
| 			} | ||||
| 
 | ||||
| 			targetSize, err := Sizeof(target) | ||||
| 			if err != nil { | ||||
| 				return zero, err | ||||
| 			} | ||||
| 
 | ||||
| 			return fixup(uint32(localSize), uint32(targetSize)) | ||||
| 		} | ||||
| 
 | ||||
| 	case reloEnumvalValue, reloEnumvalExists: | ||||
| 		localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) | ||||
| 		if errors.Is(err, errImpossibleRelocation) { | ||||
| 			return poison() | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) | ||||
| 		} | ||||
| 
 | ||||
| 		switch relo.kind { | ||||
| 		case reloEnumvalExists: | ||||
| 			return fixup(1, 1) | ||||
| 
 | ||||
| 		case reloEnumvalValue: | ||||
| 			return fixup(uint32(localValue.Value), uint32(targetValue.Value)) | ||||
| 		} | ||||
| 
 | ||||
| 	case reloFieldSigned: | ||||
| 		switch local.(type) { | ||||
| 		case *Enum: | ||||
| 			return fixup(1, 1) | ||||
| 		case *Int: | ||||
| 			return fixup( | ||||
| 				uint32(local.(*Int).Encoding&Signed), | ||||
| 				uint32(target.(*Int).Encoding&Signed), | ||||
| 			) | ||||
| 		default: | ||||
| 			return fixupWithoutValidation(0, 0) | ||||
| 		} | ||||
| 
 | ||||
| 	case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64: | ||||
| 		if _, ok := target.(*Fwd); ok { | ||||
| 			// We can't relocate fields using a forward declaration, so | ||||
| 			// skip it. If a non-forward declaration is present in the BTF | ||||
| 			// we'll find it in one of the other iterations. | ||||
| 			return poison() | ||||
| 		} | ||||
| 
 | ||||
| 		localField, targetField, err := coreFindField(local, relo.accessor, target) | ||||
| 		if errors.Is(err, errImpossibleRelocation) { | ||||
| 			return poison() | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return zero, fmt.Errorf("target %s: %w", target, err) | ||||
| 		} | ||||
| 
 | ||||
| 		maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { | ||||
| 			f.skipLocalValidation = localField.bitfieldSize > 0 | ||||
| 			return f, err | ||||
| 		} | ||||
| 
 | ||||
| 		switch relo.kind { | ||||
| 		case reloFieldExists: | ||||
| 			return fixup(1, 1) | ||||
| 
 | ||||
| 		case reloFieldByteOffset: | ||||
| 			return maybeSkipValidation(fixup(localField.offset, targetField.offset)) | ||||
| 
 | ||||
| 		case reloFieldByteSize: | ||||
| 			localSize, err := Sizeof(localField.Type) | ||||
| 			if err != nil { | ||||
| 				return zero, err | ||||
| 			} | ||||
| 
 | ||||
| 			targetSize, err := Sizeof(targetField.Type) | ||||
| 			if err != nil { | ||||
| 				return zero, err | ||||
| 			} | ||||
| 			return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize))) | ||||
| 
 | ||||
| 		case reloFieldLShiftU64: | ||||
| 			var target uint32 | ||||
| 			if byteOrder == binary.LittleEndian { | ||||
| 				targetSize, err := targetField.sizeBits() | ||||
| 				if err != nil { | ||||
| 					return zero, err | ||||
| 				} | ||||
| 
 | ||||
| 				target = uint32(64 - targetField.bitfieldOffset - targetSize) | ||||
| 			} else { | ||||
| 				loadWidth, err := Sizeof(targetField.Type) | ||||
| 				if err != nil { | ||||
| 					return zero, err | ||||
| 				} | ||||
| 
 | ||||
| 				target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) | ||||
| 			} | ||||
| 			return fixupWithoutValidation(0, target) | ||||
| 
 | ||||
| 		case reloFieldRShiftU64: | ||||
| 			targetSize, err := targetField.sizeBits() | ||||
| 			if err != nil { | ||||
| 				return zero, err | ||||
| 			} | ||||
| 
 | ||||
| 			return fixupWithoutValidation(0, uint32(64-targetSize)) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| /* coreAccessor contains a path through a struct. It contains at least one index. | ||||
|  * | ||||
|  * The interpretation depends on the kind of the relocation. The following is | ||||
|  * taken from struct bpf_core_relo in libbpf_internal.h: | ||||
|  * | ||||
|  * - for field-based relocations, string encodes an accessed field using | ||||
|  *   a sequence of field and array indices, separated by colon (:). It's | ||||
|  *   conceptually very close to LLVM's getelementptr ([0]) instruction's | ||||
|  *   arguments for identifying offset to a field. | ||||
|  * - for type-based relocations, strings is expected to be just "0"; | ||||
|  * - for enum value-based relocations, string contains an index of enum | ||||
|  *   value within its enum type; | ||||
|  * | ||||
|  * Example to provide a better feel. | ||||
|  * | ||||
|  *   struct sample { | ||||
|  *       int a; | ||||
|  *       struct { | ||||
|  *           int b[10]; | ||||
|  *       }; | ||||
|  *   }; | ||||
|  * | ||||
|  *   struct sample s = ...; | ||||
|  *   int x = &s->a;     // encoded as "0:0" (a is field #0) | ||||
|  *   int y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1, | ||||
|  *                      // b is field #0 inside anon struct, accessing elem #5) | ||||
|  *   int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) | ||||
|  */ | ||||
| type coreAccessor []int | ||||
| 
 | ||||
| func parseCOREAccessor(accessor string) (coreAccessor, error) { | ||||
| 	if accessor == "" { | ||||
| 		return nil, fmt.Errorf("empty accessor") | ||||
| 	} | ||||
| 
 | ||||
| 	parts := strings.Split(accessor, ":") | ||||
| 	result := make(coreAccessor, 0, len(parts)) | ||||
| 	for _, part := range parts { | ||||
| 		// 31 bits to avoid overflowing int on 32 bit platforms. | ||||
| 		index, err := strconv.ParseUint(part, 10, 31) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("accessor index %q: %s", part, err) | ||||
| 		} | ||||
| 
 | ||||
| 		result = append(result, int(index)) | ||||
| 	} | ||||
| 
 | ||||
| 	return result, nil | ||||
| } | ||||
| 
 | ||||
| func (ca coreAccessor) String() string { | ||||
| 	strs := make([]string, 0, len(ca)) | ||||
| 	for _, i := range ca { | ||||
| 		strs = append(strs, strconv.Itoa(i)) | ||||
| 	} | ||||
| 	return strings.Join(strs, ":") | ||||
| } | ||||
| 
 | ||||
| func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { | ||||
| 	e, ok := t.(*Enum) | ||||
| 	if !ok { | ||||
| 		return nil, fmt.Errorf("not an enum: %s", t) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(ca) > 1 { | ||||
| 		return nil, fmt.Errorf("invalid accessor %s for enum", ca) | ||||
| 	} | ||||
| 
 | ||||
| 	i := ca[0] | ||||
| 	if i >= len(e.Values) { | ||||
| 		return nil, fmt.Errorf("invalid index %d for %s", i, e) | ||||
| 	} | ||||
| 
 | ||||
| 	return &e.Values[i], nil | ||||
| } | ||||
| 
 | ||||
| // coreField represents the position of a "child" of a composite type from the | ||||
| // start of that type. | ||||
| // | ||||
| //     /- start of composite | ||||
| //     | offset * 8 | bitfieldOffset | bitfieldSize | ... | | ||||
| //                  \- start of field       end of field -/ | ||||
| type coreField struct { | ||||
| 	Type Type | ||||
| 
 | ||||
| 	// The position of the field from the start of the composite type in bytes. | ||||
| 	offset uint32 | ||||
| 
 | ||||
| 	// The offset of the bitfield in bits from the start of the field. | ||||
| 	bitfieldOffset Bits | ||||
| 
 | ||||
| 	// The size of the bitfield in bits. | ||||
| 	// | ||||
| 	// Zero if the field is not a bitfield. | ||||
| 	bitfieldSize Bits | ||||
| } | ||||
| 
 | ||||
| func (cf *coreField) adjustOffsetToNthElement(n int) error { | ||||
| 	size, err := Sizeof(cf.Type) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	cf.offset += uint32(n) * uint32(size) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (cf *coreField) adjustOffsetBits(offset Bits) error { | ||||
| 	align, err := alignof(cf.Type) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// We can compute the load offset by: | ||||
| 	// 1) converting the bit offset to bytes with a flooring division. | ||||
| 	// 2) dividing and multiplying that offset by the alignment, yielding the | ||||
| 	//    load size aligned offset. | ||||
| 	offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) | ||||
| 
 | ||||
| 	// The number of bits remaining is the bit offset less the number of bits | ||||
| 	// we can "skip" with the aligned offset. | ||||
| 	cf.bitfieldOffset = offset - Bits(offsetBytes*8) | ||||
| 
 | ||||
| 	// We know that cf.offset is aligned at to at least align since we get it | ||||
| 	// from the compiler via BTF. Adding an aligned offsetBytes preserves the | ||||
| 	// alignment. | ||||
| 	cf.offset += offsetBytes | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (cf *coreField) sizeBits() (Bits, error) { | ||||
| 	if cf.bitfieldSize > 0 { | ||||
| 		return cf.bitfieldSize, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Someone is trying to access a non-bitfield via a bit shift relocation. | ||||
| 	// This happens when a field changes from a bitfield to a regular field | ||||
| 	// between kernel versions. Synthesise the size to make the shifts work. | ||||
| 	size, err := Sizeof(cf.Type) | ||||
| 	if err != nil { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	return Bits(size * 8), nil | ||||
| } | ||||
| 
 | ||||
| // coreFindField descends into the local type using the accessor and tries to | ||||
| // find an equivalent field in target at each step. | ||||
| // | ||||
| // Returns the field and the offset of the field from the start of | ||||
| // target in bits. | ||||
| func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { | ||||
| 	local := coreField{Type: localT} | ||||
| 	target := coreField{Type: targetT} | ||||
| 
 | ||||
| 	// The first index is used to offset a pointer of the base type like | ||||
| 	// when accessing an array. | ||||
| 	if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { | ||||
| 		return coreField{}, coreField{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { | ||||
| 		return coreField{}, coreField{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { | ||||
| 		return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	var localMaybeFlex, targetMaybeFlex bool | ||||
| 	for i, acc := range localAcc[1:] { | ||||
| 		switch localType := local.Type.(type) { | ||||
| 		case composite: | ||||
| 			// For composite types acc is used to find the field in the local type, | ||||
| 			// and then we try to find a field in target with the same name. | ||||
| 			localMembers := localType.members() | ||||
| 			if acc >= len(localMembers) { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) | ||||
| 			} | ||||
| 
 | ||||
| 			localMember := localMembers[acc] | ||||
| 			if localMember.Name == "" { | ||||
| 				_, ok := localMember.Type.(composite) | ||||
| 				if !ok { | ||||
| 					return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) | ||||
| 				} | ||||
| 
 | ||||
| 				// This is an anonymous struct or union, ignore it. | ||||
| 				local = coreField{ | ||||
| 					Type:   localMember.Type, | ||||
| 					offset: local.offset + localMember.Offset.Bytes(), | ||||
| 				} | ||||
| 				localMaybeFlex = false | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			targetType, ok := target.Type.(composite) | ||||
| 			if !ok { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) | ||||
| 			} | ||||
| 
 | ||||
| 			targetMember, last, err := coreFindMember(targetType, localMember.Name) | ||||
| 			if err != nil { | ||||
| 				return coreField{}, coreField{}, err | ||||
| 			} | ||||
| 
 | ||||
| 			local = coreField{ | ||||
| 				Type:         localMember.Type, | ||||
| 				offset:       local.offset, | ||||
| 				bitfieldSize: localMember.BitfieldSize, | ||||
| 			} | ||||
| 			localMaybeFlex = acc == len(localMembers)-1 | ||||
| 
 | ||||
| 			target = coreField{ | ||||
| 				Type:         targetMember.Type, | ||||
| 				offset:       target.offset, | ||||
| 				bitfieldSize: targetMember.BitfieldSize, | ||||
| 			} | ||||
| 			targetMaybeFlex = last | ||||
| 
 | ||||
| 			if local.bitfieldSize == 0 && target.bitfieldSize == 0 { | ||||
| 				local.offset += localMember.Offset.Bytes() | ||||
| 				target.offset += targetMember.Offset.Bytes() | ||||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			// Either of the members is a bitfield. Make sure we're at the | ||||
| 			// end of the accessor. | ||||
| 			if next := i + 1; next < len(localAcc[1:]) { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") | ||||
| 			} | ||||
| 
 | ||||
| 			if err := local.adjustOffsetBits(localMember.Offset); err != nil { | ||||
| 				return coreField{}, coreField{}, err | ||||
| 			} | ||||
| 
 | ||||
| 			if err := target.adjustOffsetBits(targetMember.Offset); err != nil { | ||||
| 				return coreField{}, coreField{}, err | ||||
| 			} | ||||
| 
 | ||||
| 		case *Array: | ||||
| 			// For arrays, acc is the index in the target. | ||||
| 			targetType, ok := target.Type.(*Array) | ||||
| 			if !ok { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) | ||||
| 			} | ||||
| 
 | ||||
| 			if localType.Nelems == 0 && !localMaybeFlex { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") | ||||
| 			} | ||||
| 			if targetType.Nelems == 0 && !targetMaybeFlex { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") | ||||
| 			} | ||||
| 
 | ||||
| 			if localType.Nelems > 0 && acc >= int(localType.Nelems) { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) | ||||
| 			} | ||||
| 			if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { | ||||
| 				return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) | ||||
| 			} | ||||
| 
 | ||||
| 			local = coreField{ | ||||
| 				Type:   localType.Type, | ||||
| 				offset: local.offset, | ||||
| 			} | ||||
| 			localMaybeFlex = false | ||||
| 
 | ||||
| 			if err := local.adjustOffsetToNthElement(acc); err != nil { | ||||
| 				return coreField{}, coreField{}, err | ||||
| 			} | ||||
| 
 | ||||
| 			target = coreField{ | ||||
| 				Type:   targetType.Type, | ||||
| 				offset: target.offset, | ||||
| 			} | ||||
| 			targetMaybeFlex = false | ||||
| 
 | ||||
| 			if err := target.adjustOffsetToNthElement(acc); err != nil { | ||||
| 				return coreField{}, coreField{}, err | ||||
| 			} | ||||
| 
 | ||||
| 		default: | ||||
| 			return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) | ||||
| 		} | ||||
| 
 | ||||
| 		if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { | ||||
| 			return coreField{}, coreField{}, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return local, target, nil | ||||
| } | ||||
| 
 | ||||
| // coreFindMember finds a member in a composite type while handling anonymous | ||||
| // structs and unions. | ||||
| func coreFindMember(typ composite, name string) (Member, bool, error) { | ||||
| 	if name == "" { | ||||
| 		return Member{}, false, errors.New("can't search for anonymous member") | ||||
| 	} | ||||
| 
 | ||||
| 	type offsetTarget struct { | ||||
| 		composite | ||||
| 		offset Bits | ||||
| 	} | ||||
| 
 | ||||
| 	targets := []offsetTarget{{typ, 0}} | ||||
| 	visited := make(map[composite]bool) | ||||
| 
 | ||||
| 	for i := 0; i < len(targets); i++ { | ||||
| 		target := targets[i] | ||||
| 
 | ||||
| 		// Only visit targets once to prevent infinite recursion. | ||||
| 		if visited[target] { | ||||
| 			continue | ||||
| 		} | ||||
| 		if len(visited) >= maxTypeDepth { | ||||
| 			// This check is different than libbpf, which restricts the entire | ||||
| 			// path to BPF_CORE_SPEC_MAX_LEN items. | ||||
| 			return Member{}, false, fmt.Errorf("type is nested too deep") | ||||
| 		} | ||||
| 		visited[target] = true | ||||
| 
 | ||||
| 		members := target.members() | ||||
| 		for j, member := range members { | ||||
| 			if member.Name == name { | ||||
| 				// NB: This is safe because member is a copy. | ||||
| 				member.Offset += target.offset | ||||
| 				return member, j == len(members)-1, nil | ||||
| 			} | ||||
| 
 | ||||
| 			// The names don't match, but this member could be an anonymous struct | ||||
| 			// or union. | ||||
| 			if member.Name != "" { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			comp, ok := member.Type.(composite) | ||||
| 			if !ok { | ||||
| 				return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) | ||||
| 			} | ||||
| 
 | ||||
| 			targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) | ||||
| } | ||||
| 
 | ||||
| // coreFindEnumValue follows localAcc to find the equivalent enum value in target. | ||||
| func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { | ||||
| 	localValue, err := localAcc.enumValue(local) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	targetEnum, ok := target.(*Enum) | ||||
| 	if !ok { | ||||
| 		return nil, nil, errImpossibleRelocation | ||||
| 	} | ||||
| 
 | ||||
| 	localName := newEssentialName(localValue.Name) | ||||
| 	for i, targetValue := range targetEnum.Values { | ||||
| 		if newEssentialName(targetValue.Name) != localName { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		return localValue, &targetEnum.Values[i], nil | ||||
| 	} | ||||
| 
 | ||||
| 	return nil, nil, errImpossibleRelocation | ||||
| } | ||||
| 
 | ||||
| /* The comment below is from bpf_core_types_are_compat in libbpf.c: | ||||
|  * | ||||
|  * Check local and target types for compatibility. This check is used for | ||||
|  * type-based CO-RE relocations and follow slightly different rules than | ||||
|  * field-based relocations. This function assumes that root types were already | ||||
|  * checked for name match. Beyond that initial root-level name check, names | ||||
|  * are completely ignored. Compatibility rules are as follows: | ||||
|  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but | ||||
|  *     kind should match for local and target types (i.e., STRUCT is not | ||||
|  *     compatible with UNION); | ||||
|  *   - for ENUMs, the size is ignored; | ||||
|  *   - for INT, size and signedness are ignored; | ||||
|  *   - for ARRAY, dimensionality is ignored, element types are checked for | ||||
|  *     compatibility recursively; | ||||
|  *   - CONST/VOLATILE/RESTRICT modifiers are ignored; | ||||
|  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; | ||||
|  *   - FUNC_PROTOs are compatible if they have compatible signature: same | ||||
|  *     number of input args and compatible return and argument types. | ||||
|  * These rules are not set in stone and probably will be adjusted as we get | ||||
|  * more experience with using BPF CO-RE relocations. | ||||
|  * | ||||
|  * Returns errImpossibleRelocation if types are not compatible. | ||||
|  */ | ||||
| func coreAreTypesCompatible(localType Type, targetType Type) error { | ||||
| 	var ( | ||||
| 		localTs, targetTs typeDeque | ||||
| 		l, t              = &localType, &targetType | ||||
| 		depth             = 0 | ||||
| 	) | ||||
| 
 | ||||
| 	for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() { | ||||
| 		if depth >= maxTypeDepth { | ||||
| 			return errors.New("types are nested too deep") | ||||
| 		} | ||||
| 
 | ||||
| 		localType = *l | ||||
| 		targetType = *t | ||||
| 
 | ||||
| 		if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { | ||||
| 			return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) | ||||
| 		} | ||||
| 
 | ||||
| 		switch lv := (localType).(type) { | ||||
| 		case *Void, *Struct, *Union, *Enum, *Fwd, *Int: | ||||
| 			// Nothing to do here | ||||
| 
 | ||||
| 		case *Pointer, *Array: | ||||
| 			depth++ | ||||
| 			localType.walk(&localTs) | ||||
| 			targetType.walk(&targetTs) | ||||
| 
 | ||||
| 		case *FuncProto: | ||||
| 			tv := targetType.(*FuncProto) | ||||
| 			if len(lv.Params) != len(tv.Params) { | ||||
| 				return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation) | ||||
| 			} | ||||
| 
 | ||||
| 			depth++ | ||||
| 			localType.walk(&localTs) | ||||
| 			targetType.walk(&targetTs) | ||||
| 
 | ||||
| 		default: | ||||
| 			return fmt.Errorf("unsupported type %T", localType) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if l != nil { | ||||
| 		return fmt.Errorf("dangling local type %T", *l) | ||||
| 	} | ||||
| 
 | ||||
| 	if t != nil { | ||||
| 		return fmt.Errorf("dangling target type %T", *t) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| /* coreAreMembersCompatible checks two types for field-based relocation compatibility. | ||||
|  * | ||||
|  * The comment below is from bpf_core_fields_are_compat in libbpf.c: | ||||
|  * | ||||
|  * Check two types for compatibility for the purpose of field access | ||||
|  * relocation. const/volatile/restrict and typedefs are skipped to ensure we | ||||
|  * are relocating semantically compatible entities: | ||||
|  *   - any two STRUCTs/UNIONs are compatible and can be mixed; | ||||
|  *   - any two FWDs are compatible, if their names match (modulo flavor suffix); | ||||
|  *   - any two PTRs are always compatible; | ||||
|  *   - for ENUMs, names should be the same (ignoring flavor suffix) or at | ||||
|  *     least one of enums should be anonymous; | ||||
|  *   - for ENUMs, check sizes, names are ignored; | ||||
|  *   - for INT, size and signedness are ignored; | ||||
|  *   - any two FLOATs are always compatible; | ||||
|  *   - for ARRAY, dimensionality is ignored, element types are checked for | ||||
|  *     compatibility recursively; | ||||
|  *     [ NB: coreAreMembersCompatible doesn't recurse, this check is done | ||||
|  *       by coreFindField. ] | ||||
|  *   - everything else shouldn't be ever a target of relocation. | ||||
|  * These rules are not set in stone and probably will be adjusted as we get | ||||
|  * more experience with using BPF CO-RE relocations. | ||||
|  * | ||||
|  * Returns errImpossibleRelocation if the members are not compatible. | ||||
|  */ | ||||
| func coreAreMembersCompatible(localType Type, targetType Type) error { | ||||
| 	doNamesMatch := func(a, b string) error { | ||||
| 		if a == "" || b == "" { | ||||
| 			// allow anonymous and named type to match | ||||
| 			return nil | ||||
| 		} | ||||
| 
 | ||||
| 		if newEssentialName(a) == newEssentialName(b) { | ||||
| 			return nil | ||||
| 		} | ||||
| 
 | ||||
| 		return fmt.Errorf("names don't match: %w", errImpossibleRelocation) | ||||
| 	} | ||||
| 
 | ||||
| 	_, lok := localType.(composite) | ||||
| 	_, tok := targetType.(composite) | ||||
| 	if lok && tok { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { | ||||
| 		return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) | ||||
| 	} | ||||
| 
 | ||||
| 	switch lv := localType.(type) { | ||||
| 	case *Array, *Pointer, *Float, *Int: | ||||
| 		return nil | ||||
| 
 | ||||
| 	case *Enum: | ||||
| 		tv := targetType.(*Enum) | ||||
| 		return doNamesMatch(lv.Name, tv.Name) | ||||
| 
 | ||||
| 	case *Fwd: | ||||
| 		tv := targetType.(*Fwd) | ||||
| 		return doNamesMatch(lv.Name, tv.Name) | ||||
| 
 | ||||
| 	default: | ||||
| 		return fmt.Errorf("type %s: %w", localType, ErrNotSupported) | ||||
| 	} | ||||
| } | ||||
|  | @ -2,7 +2,4 @@ | |||
| // | ||||
| // The canonical documentation lives in the Linux kernel repository and is | ||||
| // available at https://www.kernel.org/doc/html/latest/bpf/btf.html | ||||
| // | ||||
| // The API is very much unstable. You should only use this via the main | ||||
| // ebpf library. | ||||
| package btf | ||||
							
								
								
									
										721
									
								
								vendor/github.com/cilium/ebpf/btf/ext_info.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										721
									
								
								vendor/github.com/cilium/ebpf/btf/ext_info.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,721 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"sort" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| ) | ||||
| 
 | ||||
| // ExtInfos contains ELF section metadata. | ||||
| type ExtInfos struct { | ||||
| 	// The slices are sorted by offset in ascending order. | ||||
| 	funcInfos       map[string][]funcInfo | ||||
| 	lineInfos       map[string][]lineInfo | ||||
| 	relocationInfos map[string][]coreRelocationInfo | ||||
| } | ||||
| 
 | ||||
| // loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. | ||||
| // | ||||
| // Returns an error wrapping ErrNotFound if no ext infos are present. | ||||
| func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) { | ||||
| 	section := file.Section(".BTF.ext") | ||||
| 	if section == nil { | ||||
| 		return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) | ||||
| 	} | ||||
| 
 | ||||
| 	if section.ReaderAt == nil { | ||||
| 		return nil, fmt.Errorf("compressed ext_info is not supported") | ||||
| 	} | ||||
| 
 | ||||
| 	return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings) | ||||
| } | ||||
| 
 | ||||
| // loadExtInfos parses bare ext infos. | ||||
| func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) { | ||||
| 	// Open unbuffered section reader. binary.Read() calls io.ReadFull on | ||||
| 	// the header structs, resulting in one syscall per header. | ||||
| 	headerRd := io.NewSectionReader(r, 0, math.MaxInt64) | ||||
| 	extHeader, err := parseBTFExtHeader(headerRd, bo) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("parsing BTF extension header: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) | ||||
| 	btfFuncInfos, err := parseFuncInfos(buf, bo, strings) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("parsing BTF function info: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	funcInfos := make(map[string][]funcInfo, len(btfFuncInfos)) | ||||
| 	for section, bfis := range btfFuncInfos { | ||||
| 		funcInfos[section], err = newFuncInfos(bfis, ts) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("section %s: func infos: %w", section, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) | ||||
| 	btfLineInfos, err := parseLineInfos(buf, bo, strings) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("parsing BTF line info: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	lineInfos := make(map[string][]lineInfo, len(btfLineInfos)) | ||||
| 	for section, blis := range btfLineInfos { | ||||
| 		lineInfos[section], err = newLineInfos(blis, strings) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("section %s: line infos: %w", section, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if coreHeader == nil || coreHeader.COREReloLen == 0 { | ||||
| 		return &ExtInfos{funcInfos, lineInfos, nil}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	var btfCORERelos map[string][]bpfCORERelo | ||||
| 	buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) | ||||
| 	btfCORERelos, err = parseCORERelos(buf, bo, strings) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos)) | ||||
| 	for section, brs := range btfCORERelos { | ||||
| 		coreRelos[section], err = newRelocationInfos(brs, ts, strings) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil | ||||
| } | ||||
| 
 | ||||
| type funcInfoMeta struct{} | ||||
| type coreRelocationMeta struct{} | ||||
| 
 | ||||
| // Assign per-section metadata from BTF to a section's instructions. | ||||
| func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { | ||||
| 	funcInfos := ei.funcInfos[section] | ||||
| 	lineInfos := ei.lineInfos[section] | ||||
| 	reloInfos := ei.relocationInfos[section] | ||||
| 
 | ||||
| 	iter := insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset { | ||||
| 			iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn) | ||||
| 			funcInfos = funcInfos[1:] | ||||
| 		} | ||||
| 
 | ||||
| 		if len(lineInfos) > 0 && lineInfos[0].offset == iter.Offset { | ||||
| 			*iter.Ins = iter.Ins.WithSource(lineInfos[0].line) | ||||
| 			lineInfos = lineInfos[1:] | ||||
| 		} | ||||
| 
 | ||||
| 		if len(reloInfos) > 0 && reloInfos[0].offset == iter.Offset { | ||||
| 			iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos[0].relo) | ||||
| 			reloInfos = reloInfos[1:] | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // MarshalExtInfos encodes function and line info embedded in insns into kernel | ||||
| // wire format. | ||||
| func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) { | ||||
| 	iter := insns.Iterate() | ||||
| 	var fiBuf, liBuf bytes.Buffer | ||||
| 	for iter.Next() { | ||||
| 		if fn := FuncMetadata(iter.Ins); fn != nil { | ||||
| 			fi := &funcInfo{ | ||||
| 				fn:     fn, | ||||
| 				offset: iter.Offset, | ||||
| 			} | ||||
| 			if err := fi.marshal(&fiBuf, typeID); err != nil { | ||||
| 				return nil, nil, fmt.Errorf("write func info: %w", err) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if line, ok := iter.Ins.Source().(*Line); ok { | ||||
| 			li := &lineInfo{ | ||||
| 				line:   line, | ||||
| 				offset: iter.Offset, | ||||
| 			} | ||||
| 			if err := li.marshal(&liBuf); err != nil { | ||||
| 				return nil, nil, fmt.Errorf("write line info: %w", err) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return fiBuf.Bytes(), liBuf.Bytes(), nil | ||||
| } | ||||
| 
 | ||||
| // btfExtHeader is found at the start of the .BTF.ext section. | ||||
| type btfExtHeader struct { | ||||
| 	Magic   uint16 | ||||
| 	Version uint8 | ||||
| 	Flags   uint8 | ||||
| 
 | ||||
| 	// HdrLen is larger than the size of struct btfExtHeader when it is | ||||
| 	// immediately followed by a btfExtCOREHeader. | ||||
| 	HdrLen uint32 | ||||
| 
 | ||||
| 	FuncInfoOff uint32 | ||||
| 	FuncInfoLen uint32 | ||||
| 	LineInfoOff uint32 | ||||
| 	LineInfoLen uint32 | ||||
| } | ||||
| 
 | ||||
| // parseBTFExtHeader parses the header of the .BTF.ext section. | ||||
| func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { | ||||
| 	var header btfExtHeader | ||||
| 	if err := binary.Read(r, bo, &header); err != nil { | ||||
| 		return nil, fmt.Errorf("can't read header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Magic != btfMagic { | ||||
| 		return nil, fmt.Errorf("incorrect magic value %v", header.Magic) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Version != 1 { | ||||
| 		return nil, fmt.Errorf("unexpected version %v", header.Version) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Flags != 0 { | ||||
| 		return nil, fmt.Errorf("unsupported flags %v", header.Flags) | ||||
| 	} | ||||
| 
 | ||||
| 	if int64(header.HdrLen) < int64(binary.Size(&header)) { | ||||
| 		return nil, fmt.Errorf("header length shorter than btfExtHeader size") | ||||
| 	} | ||||
| 
 | ||||
| 	return &header, nil | ||||
| } | ||||
| 
 | ||||
| // funcInfoStart returns the offset from the beginning of the .BTF.ext section | ||||
| // to the start of its func_info entries. | ||||
| func (h *btfExtHeader) funcInfoStart() int64 { | ||||
| 	return int64(h.HdrLen + h.FuncInfoOff) | ||||
| } | ||||
| 
 | ||||
| // lineInfoStart returns the offset from the beginning of the .BTF.ext section | ||||
| // to the start of its line_info entries. | ||||
| func (h *btfExtHeader) lineInfoStart() int64 { | ||||
| 	return int64(h.HdrLen + h.LineInfoOff) | ||||
| } | ||||
| 
 | ||||
| // coreReloStart returns the offset from the beginning of the .BTF.ext section | ||||
| // to the start of its CO-RE relocation entries. | ||||
| func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { | ||||
| 	return int64(h.HdrLen + ch.COREReloOff) | ||||
| } | ||||
| 
 | ||||
| // btfExtCOREHeader is found right after the btfExtHeader when its HdrLen | ||||
| // field is larger than its size. | ||||
| type btfExtCOREHeader struct { | ||||
| 	COREReloOff uint32 | ||||
| 	COREReloLen uint32 | ||||
| } | ||||
| 
 | ||||
| // parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional | ||||
| // header bytes are present, extHeader.HdrLen will be larger than the struct, | ||||
| // indicating the presence of a CO-RE extension header. | ||||
| func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { | ||||
| 	extHdrSize := int64(binary.Size(&extHeader)) | ||||
| 	remainder := int64(extHeader.HdrLen) - extHdrSize | ||||
| 
 | ||||
| 	if remainder == 0 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	var coreHeader btfExtCOREHeader | ||||
| 	if err := binary.Read(r, bo, &coreHeader); err != nil { | ||||
| 		return nil, fmt.Errorf("can't read header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &coreHeader, nil | ||||
| } | ||||
| 
 | ||||
| type btfExtInfoSec struct { | ||||
| 	SecNameOff uint32 | ||||
| 	NumInfo    uint32 | ||||
| } | ||||
| 
 | ||||
| // parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, | ||||
| // appearing within func_info and line_info sub-sections. | ||||
| // These headers appear once for each program section in the ELF and are | ||||
| // followed by one or more func/line_info records for the section. | ||||
| func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { | ||||
| 	var infoHeader btfExtInfoSec | ||||
| 	if err := binary.Read(r, bo, &infoHeader); err != nil { | ||||
| 		return "", nil, fmt.Errorf("read ext info header: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	secName, err := strings.Lookup(infoHeader.SecNameOff) | ||||
| 	if err != nil { | ||||
| 		return "", nil, fmt.Errorf("get section name: %w", err) | ||||
| 	} | ||||
| 	if secName == "" { | ||||
| 		return "", nil, fmt.Errorf("extinfo header refers to empty section name") | ||||
| 	} | ||||
| 
 | ||||
| 	if infoHeader.NumInfo == 0 { | ||||
| 		return "", nil, fmt.Errorf("section %s has zero records", secName) | ||||
| 	} | ||||
| 
 | ||||
| 	return secName, &infoHeader, nil | ||||
| } | ||||
| 
 | ||||
| // parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos | ||||
| // or line_infos segment that describes the length of all extInfoRecords in | ||||
| // that segment. | ||||
| func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { | ||||
| 	const maxRecordSize = 256 | ||||
| 
 | ||||
| 	var recordSize uint32 | ||||
| 	if err := binary.Read(r, bo, &recordSize); err != nil { | ||||
| 		return 0, fmt.Errorf("can't read record size: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if recordSize < 4 { | ||||
| 		// Need at least InsnOff worth of bytes per record. | ||||
| 		return 0, errors.New("record size too short") | ||||
| 	} | ||||
| 	if recordSize > maxRecordSize { | ||||
| 		return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) | ||||
| 	} | ||||
| 
 | ||||
| 	return recordSize, nil | ||||
| } | ||||
| 
 | ||||
| // The size of a FuncInfo in BTF wire format. | ||||
| var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) | ||||
| 
 | ||||
| type funcInfo struct { | ||||
| 	fn     *Func | ||||
| 	offset asm.RawInstructionOffset | ||||
| } | ||||
| 
 | ||||
| type bpfFuncInfo struct { | ||||
| 	// Instruction offset of the function within an ELF section. | ||||
| 	InsnOff uint32 | ||||
| 	TypeID  TypeID | ||||
| } | ||||
| 
 | ||||
| func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) { | ||||
| 	typ, err := ts.ByID(fi.TypeID) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	fn, ok := typ.(*Func) | ||||
| 	if !ok { | ||||
| 		return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) | ||||
| 	} | ||||
| 
 | ||||
| 	// C doesn't have anonymous functions, but check just in case. | ||||
| 	if fn.Name == "" { | ||||
| 		return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) | ||||
| 	} | ||||
| 
 | ||||
| 	return &funcInfo{ | ||||
| 		fn, | ||||
| 		asm.RawInstructionOffset(fi.InsnOff), | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) { | ||||
| 	fis := make([]funcInfo, 0, len(bfis)) | ||||
| 	for _, bfi := range bfis { | ||||
| 		fi, err := newFuncInfo(bfi, ts) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) | ||||
| 		} | ||||
| 		fis = append(fis, *fi) | ||||
| 	} | ||||
| 	sort.Slice(fis, func(i, j int) bool { | ||||
| 		return fis[i].offset <= fis[j].offset | ||||
| 	}) | ||||
| 	return fis, nil | ||||
| } | ||||
| 
 | ||||
| // marshal into the BTF wire format. | ||||
| func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error { | ||||
| 	id, err := typeID(fi.fn) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	bfi := bpfFuncInfo{ | ||||
| 		InsnOff: uint32(fi.offset), | ||||
| 		TypeID:  id, | ||||
| 	} | ||||
| 	return binary.Write(w, internal.NativeEndian, &bfi) | ||||
| } | ||||
| 
 | ||||
| // parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of | ||||
| // func infos indexed by section name. | ||||
| func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { | ||||
| 	recordSize, err := parseExtInfoRecordSize(r, bo) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	result := make(map[string][]bpfFuncInfo) | ||||
| 	for { | ||||
| 		secName, infoHeader, err := parseExtInfoSec(r, bo, strings) | ||||
| 		if errors.Is(err, io.EOF) { | ||||
| 			return result, nil | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("section %v: %w", secName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		result[secName] = records | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // parseFuncInfoRecords parses a stream of func_infos into a funcInfos. | ||||
| // These records appear after a btf_ext_info_sec header in the func_info | ||||
| // sub-section of .BTF.ext. | ||||
| func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) { | ||||
| 	var out []bpfFuncInfo | ||||
| 	var fi bpfFuncInfo | ||||
| 
 | ||||
| 	if exp, got := FuncInfoSize, recordSize; exp != got { | ||||
| 		// BTF blob's record size is longer than we know how to parse. | ||||
| 		return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) | ||||
| 	} | ||||
| 
 | ||||
| 	for i := uint32(0); i < recordNum; i++ { | ||||
| 		if err := binary.Read(r, bo, &fi); err != nil { | ||||
| 			return nil, fmt.Errorf("can't read function info: %v", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if fi.InsnOff%asm.InstructionSize != 0 { | ||||
| 			return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) | ||||
| 		} | ||||
| 
 | ||||
| 		// ELF tracks offset in bytes, the kernel expects raw BPF instructions. | ||||
| 		// Convert as early as possible. | ||||
| 		fi.InsnOff /= asm.InstructionSize | ||||
| 
 | ||||
| 		out = append(out, fi) | ||||
| 	} | ||||
| 
 | ||||
| 	return out, nil | ||||
| } | ||||
| 
 | ||||
| var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) | ||||
| 
 | ||||
| // Line represents the location and contents of a single line of source | ||||
| // code a BPF ELF was compiled from. | ||||
| type Line struct { | ||||
| 	fileName   string | ||||
| 	line       string | ||||
| 	lineNumber uint32 | ||||
| 	lineColumn uint32 | ||||
| 
 | ||||
| 	// TODO: We should get rid of the fields below, but for that we need to be | ||||
| 	// able to write BTF. | ||||
| 
 | ||||
| 	fileNameOff uint32 | ||||
| 	lineOff     uint32 | ||||
| } | ||||
| 
 | ||||
| func (li *Line) FileName() string { | ||||
| 	return li.fileName | ||||
| } | ||||
| 
 | ||||
| func (li *Line) Line() string { | ||||
| 	return li.line | ||||
| } | ||||
| 
 | ||||
| func (li *Line) LineNumber() uint32 { | ||||
| 	return li.lineNumber | ||||
| } | ||||
| 
 | ||||
| func (li *Line) LineColumn() uint32 { | ||||
| 	return li.lineColumn | ||||
| } | ||||
| 
 | ||||
| func (li *Line) String() string { | ||||
| 	return li.line | ||||
| } | ||||
| 
 | ||||
| type lineInfo struct { | ||||
| 	line   *Line | ||||
| 	offset asm.RawInstructionOffset | ||||
| } | ||||
| 
 | ||||
| // Constants for the format of bpfLineInfo.LineCol. | ||||
| const ( | ||||
| 	bpfLineShift = 10 | ||||
| 	bpfLineMax   = (1 << (32 - bpfLineShift)) - 1 | ||||
| 	bpfColumnMax = (1 << bpfLineShift) - 1 | ||||
| ) | ||||
| 
 | ||||
| type bpfLineInfo struct { | ||||
| 	// Instruction offset of the line within the whole instruction stream, in instructions. | ||||
| 	InsnOff     uint32 | ||||
| 	FileNameOff uint32 | ||||
| 	LineOff     uint32 | ||||
| 	LineCol     uint32 | ||||
| } | ||||
| 
 | ||||
| func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) { | ||||
| 	line, err := strings.Lookup(li.LineOff) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("lookup of line: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	fileName, err := strings.Lookup(li.FileNameOff) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("lookup of filename: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	lineNumber := li.LineCol >> bpfLineShift | ||||
| 	lineColumn := li.LineCol & bpfColumnMax | ||||
| 
 | ||||
| 	return &lineInfo{ | ||||
| 		&Line{ | ||||
| 			fileName, | ||||
| 			line, | ||||
| 			lineNumber, | ||||
| 			lineColumn, | ||||
| 			li.FileNameOff, | ||||
| 			li.LineOff, | ||||
| 		}, | ||||
| 		asm.RawInstructionOffset(li.InsnOff), | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error) { | ||||
| 	lis := make([]lineInfo, 0, len(blis)) | ||||
| 	for _, bli := range blis { | ||||
| 		li, err := newLineInfo(bli, strings) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("offset %d: %w", bli.InsnOff, err) | ||||
| 		} | ||||
| 		lis = append(lis, *li) | ||||
| 	} | ||||
| 	sort.Slice(lis, func(i, j int) bool { | ||||
| 		return lis[i].offset <= lis[j].offset | ||||
| 	}) | ||||
| 	return lis, nil | ||||
| } | ||||
| 
 | ||||
| // marshal writes the binary representation of the LineInfo to w. | ||||
| func (li *lineInfo) marshal(w io.Writer) error { | ||||
| 	line := li.line | ||||
| 	if line.lineNumber > bpfLineMax { | ||||
| 		return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) | ||||
| 	} | ||||
| 
 | ||||
| 	if line.lineColumn > bpfColumnMax { | ||||
| 		return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) | ||||
| 	} | ||||
| 
 | ||||
| 	bli := bpfLineInfo{ | ||||
| 		uint32(li.offset), | ||||
| 		line.fileNameOff, | ||||
| 		line.lineOff, | ||||
| 		(line.lineNumber << bpfLineShift) | line.lineColumn, | ||||
| 	} | ||||
| 	return binary.Write(w, internal.NativeEndian, &bli) | ||||
| } | ||||
| 
 | ||||
| // parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of | ||||
| // line infos indexed by section name. | ||||
| func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { | ||||
| 	recordSize, err := parseExtInfoRecordSize(r, bo) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	result := make(map[string][]bpfLineInfo) | ||||
| 	for { | ||||
| 		secName, infoHeader, err := parseExtInfoSec(r, bo, strings) | ||||
| 		if errors.Is(err, io.EOF) { | ||||
| 			return result, nil | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("section %v: %w", secName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		result[secName] = records | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // parseLineInfoRecords parses a stream of line_infos into a lineInfos. | ||||
| // These records appear after a btf_ext_info_sec header in the line_info | ||||
| // sub-section of .BTF.ext. | ||||
| func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) { | ||||
| 	var out []bpfLineInfo | ||||
| 	var li bpfLineInfo | ||||
| 
 | ||||
| 	if exp, got := uint32(binary.Size(li)), recordSize; exp != got { | ||||
| 		// BTF blob's record size is longer than we know how to parse. | ||||
| 		return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) | ||||
| 	} | ||||
| 
 | ||||
| 	for i := uint32(0); i < recordNum; i++ { | ||||
| 		if err := binary.Read(r, bo, &li); err != nil { | ||||
| 			return nil, fmt.Errorf("can't read line info: %v", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if li.InsnOff%asm.InstructionSize != 0 { | ||||
| 			return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) | ||||
| 		} | ||||
| 
 | ||||
| 		// ELF tracks offset in bytes, the kernel expects raw BPF instructions. | ||||
| 		// Convert as early as possible. | ||||
| 		li.InsnOff /= asm.InstructionSize | ||||
| 
 | ||||
| 		out = append(out, li) | ||||
| 	} | ||||
| 
 | ||||
| 	return out, nil | ||||
| } | ||||
| 
 | ||||
| // bpfCORERelo matches the kernel's struct bpf_core_relo. | ||||
| type bpfCORERelo struct { | ||||
| 	InsnOff      uint32 | ||||
| 	TypeID       TypeID | ||||
| 	AccessStrOff uint32 | ||||
| 	Kind         coreKind | ||||
| } | ||||
| 
 | ||||
| type CORERelocation struct { | ||||
| 	typ      Type | ||||
| 	accessor coreAccessor | ||||
| 	kind     coreKind | ||||
| } | ||||
| 
 | ||||
| func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { | ||||
| 	relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) | ||||
| 	return relo | ||||
| } | ||||
| 
 | ||||
| type coreRelocationInfo struct { | ||||
| 	relo   *CORERelocation | ||||
| 	offset asm.RawInstructionOffset | ||||
| } | ||||
| 
 | ||||
| func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) { | ||||
| 	typ, err := ts.ByID(relo.TypeID) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	accessorStr, err := strings.Lookup(relo.AccessStrOff) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	accessor, err := parseCOREAccessor(accessorStr) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &coreRelocationInfo{ | ||||
| 		&CORERelocation{ | ||||
| 			typ, | ||||
| 			accessor, | ||||
| 			relo.Kind, | ||||
| 		}, | ||||
| 		asm.RawInstructionOffset(relo.InsnOff), | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) { | ||||
| 	rs := make([]coreRelocationInfo, 0, len(brs)) | ||||
| 	for _, br := range brs { | ||||
| 		relo, err := newRelocationInfo(br, ts, strings) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err) | ||||
| 		} | ||||
| 		rs = append(rs, *relo) | ||||
| 	} | ||||
| 	sort.Slice(rs, func(i, j int) bool { | ||||
| 		return rs[i].offset < rs[j].offset | ||||
| 	}) | ||||
| 	return rs, nil | ||||
| } | ||||
| 
 | ||||
| var extInfoReloSize = binary.Size(bpfCORERelo{}) | ||||
| 
 | ||||
| // parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of | ||||
| // CO-RE relocations indexed by section name. | ||||
| func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { | ||||
| 	recordSize, err := parseExtInfoRecordSize(r, bo) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if recordSize != uint32(extInfoReloSize) { | ||||
| 		return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) | ||||
| 	} | ||||
| 
 | ||||
| 	result := make(map[string][]bpfCORERelo) | ||||
| 	for { | ||||
| 		secName, infoHeader, err := parseExtInfoSec(r, bo, strings) | ||||
| 		if errors.Is(err, io.EOF) { | ||||
| 			return result, nil | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("section %v: %w", secName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		result[secName] = records | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // parseCOREReloRecords parses a stream of CO-RE relocation entries into a | ||||
| // coreRelos. These records appear after a btf_ext_info_sec header in the | ||||
| // core_relos sub-section of .BTF.ext. | ||||
| func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) { | ||||
| 	var out []bpfCORERelo | ||||
| 
 | ||||
| 	var relo bpfCORERelo | ||||
| 	for i := uint32(0); i < recordNum; i++ { | ||||
| 		if err := binary.Read(r, bo, &relo); err != nil { | ||||
| 			return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if relo.InsnOff%asm.InstructionSize != 0 { | ||||
| 			return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) | ||||
| 		} | ||||
| 
 | ||||
| 		// ELF tracks offset in bytes, the kernel expects raw BPF instructions. | ||||
| 		// Convert as early as possible. | ||||
| 		relo.InsnOff /= asm.InstructionSize | ||||
| 
 | ||||
| 		out = append(out, relo) | ||||
| 	} | ||||
| 
 | ||||
| 	return out, nil | ||||
| } | ||||
							
								
								
									
										319
									
								
								vendor/github.com/cilium/ebpf/btf/format.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										319
									
								
								vendor/github.com/cilium/ebpf/btf/format.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,319 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| var errNestedTooDeep = errors.New("nested too deep") | ||||
| 
 | ||||
| // GoFormatter converts a Type to Go syntax. | ||||
| // | ||||
| // A zero GoFormatter is valid to use. | ||||
| type GoFormatter struct { | ||||
| 	w strings.Builder | ||||
| 
 | ||||
| 	// Types present in this map are referred to using the given name if they | ||||
| 	// are encountered when outputting another type. | ||||
| 	Names map[Type]string | ||||
| 
 | ||||
| 	// Identifier is called for each field of struct-like types. By default the | ||||
| 	// field name is used as is. | ||||
| 	Identifier func(string) string | ||||
| 
 | ||||
| 	// EnumIdentifier is called for each element of an enum. By default the | ||||
| 	// name of the enum type is concatenated with Identifier(element). | ||||
| 	EnumIdentifier func(name, element string) string | ||||
| } | ||||
| 
 | ||||
| // TypeDeclaration generates a Go type declaration for a BTF type. | ||||
| func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { | ||||
| 	gf.w.Reset() | ||||
| 	if err := gf.writeTypeDecl(name, typ); err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return gf.w.String(), nil | ||||
| } | ||||
| 
 | ||||
| func (gf *GoFormatter) identifier(s string) string { | ||||
| 	if gf.Identifier != nil { | ||||
| 		return gf.Identifier(s) | ||||
| 	} | ||||
| 
 | ||||
| 	return s | ||||
| } | ||||
| 
 | ||||
| func (gf *GoFormatter) enumIdentifier(name, element string) string { | ||||
| 	if gf.EnumIdentifier != nil { | ||||
| 		return gf.EnumIdentifier(name, element) | ||||
| 	} | ||||
| 
 | ||||
| 	return name + gf.identifier(element) | ||||
| } | ||||
| 
 | ||||
| // writeTypeDecl outputs a declaration of the given type. | ||||
| // | ||||
| // It encodes https://golang.org/ref/spec#Type_declarations: | ||||
| // | ||||
| //     type foo struct { bar uint32; } | ||||
| //     type bar int32 | ||||
| func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { | ||||
| 	if name == "" { | ||||
| 		return fmt.Errorf("need a name for type %s", typ) | ||||
| 	} | ||||
| 
 | ||||
| 	switch v := skipQualifiers(typ).(type) { | ||||
| 	case *Enum: | ||||
| 		fmt.Fprintf(&gf.w, "type %s ", name) | ||||
| 		switch v.Size { | ||||
| 		case 1: | ||||
| 			gf.w.WriteString("int8") | ||||
| 		case 2: | ||||
| 			gf.w.WriteString("int16") | ||||
| 		case 4: | ||||
| 			gf.w.WriteString("int32") | ||||
| 		case 8: | ||||
| 			gf.w.WriteString("int64") | ||||
| 		default: | ||||
| 			return fmt.Errorf("%s: invalid enum size %d", typ, v.Size) | ||||
| 		} | ||||
| 
 | ||||
| 		if len(v.Values) == 0 { | ||||
| 			return nil | ||||
| 		} | ||||
| 
 | ||||
| 		gf.w.WriteString("; const ( ") | ||||
| 		for _, ev := range v.Values { | ||||
| 			id := gf.enumIdentifier(name, ev.Name) | ||||
| 			fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value) | ||||
| 		} | ||||
| 		gf.w.WriteString(")") | ||||
| 
 | ||||
| 		return nil | ||||
| 
 | ||||
| 	default: | ||||
| 		fmt.Fprintf(&gf.w, "type %s ", name) | ||||
| 		return gf.writeTypeLit(v, 0) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // writeType outputs the name of a named type or a literal describing the type. | ||||
| // | ||||
| // It encodes https://golang.org/ref/spec#Types. | ||||
| // | ||||
| //     foo                  (if foo is a named type) | ||||
| //     uint32 | ||||
| func (gf *GoFormatter) writeType(typ Type, depth int) error { | ||||
| 	typ = skipQualifiers(typ) | ||||
| 
 | ||||
| 	name := gf.Names[typ] | ||||
| 	if name != "" { | ||||
| 		gf.w.WriteString(name) | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	return gf.writeTypeLit(typ, depth) | ||||
| } | ||||
| 
 | ||||
| // writeTypeLit outputs a literal describing the type. | ||||
| // | ||||
| // The function ignores named types. | ||||
| // | ||||
| // It encodes https://golang.org/ref/spec#TypeLit. | ||||
| // | ||||
| //     struct { bar uint32; } | ||||
| //     uint32 | ||||
| func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { | ||||
| 	depth++ | ||||
| 	if depth > maxTypeDepth { | ||||
| 		return errNestedTooDeep | ||||
| 	} | ||||
| 
 | ||||
| 	var err error | ||||
| 	switch v := skipQualifiers(typ).(type) { | ||||
| 	case *Int: | ||||
| 		gf.writeIntLit(v) | ||||
| 
 | ||||
| 	case *Enum: | ||||
| 		gf.w.WriteString("int32") | ||||
| 
 | ||||
| 	case *Typedef: | ||||
| 		err = gf.writeType(v.Type, depth) | ||||
| 
 | ||||
| 	case *Array: | ||||
| 		fmt.Fprintf(&gf.w, "[%d]", v.Nelems) | ||||
| 		err = gf.writeType(v.Type, depth) | ||||
| 
 | ||||
| 	case *Struct: | ||||
| 		err = gf.writeStructLit(v.Size, v.Members, depth) | ||||
| 
 | ||||
| 	case *Union: | ||||
| 		// Always choose the first member to represent the union in Go. | ||||
| 		err = gf.writeStructLit(v.Size, v.Members[:1], depth) | ||||
| 
 | ||||
| 	case *Datasec: | ||||
| 		err = gf.writeDatasecLit(v, depth) | ||||
| 
 | ||||
| 	default: | ||||
| 		return fmt.Errorf("type %T: %w", v, ErrNotSupported) | ||||
| 	} | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("%s: %w", typ, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (gf *GoFormatter) writeIntLit(i *Int) { | ||||
| 	// NB: Encoding.IsChar is ignored. | ||||
| 	if i.Encoding.IsBool() && i.Size == 1 { | ||||
| 		gf.w.WriteString("bool") | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	bits := i.Size * 8 | ||||
| 	if i.Encoding.IsSigned() { | ||||
| 		fmt.Fprintf(&gf.w, "int%d", bits) | ||||
| 	} else { | ||||
| 		fmt.Fprintf(&gf.w, "uint%d", bits) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { | ||||
| 	gf.w.WriteString("struct { ") | ||||
| 
 | ||||
| 	prevOffset := uint32(0) | ||||
| 	skippedBitfield := false | ||||
| 	for i, m := range members { | ||||
| 		if m.BitfieldSize > 0 { | ||||
| 			skippedBitfield = true | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		offset := m.Offset.Bytes() | ||||
| 		if n := offset - prevOffset; skippedBitfield && n > 0 { | ||||
| 			fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) | ||||
| 		} else { | ||||
| 			gf.writePadding(n) | ||||
| 		} | ||||
| 
 | ||||
| 		size, err := Sizeof(m.Type) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("field %d: %w", i, err) | ||||
| 		} | ||||
| 		prevOffset = offset + uint32(size) | ||||
| 
 | ||||
| 		if err := gf.writeStructField(m, depth); err != nil { | ||||
| 			return fmt.Errorf("field %d: %w", i, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	gf.writePadding(size - prevOffset) | ||||
| 	gf.w.WriteString("}") | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (gf *GoFormatter) writeStructField(m Member, depth int) error { | ||||
| 	if m.BitfieldSize > 0 { | ||||
| 		return fmt.Errorf("bitfields are not supported") | ||||
| 	} | ||||
| 	if m.Offset%8 != 0 { | ||||
| 		return fmt.Errorf("unsupported offset %d", m.Offset) | ||||
| 	} | ||||
| 
 | ||||
| 	if m.Name == "" { | ||||
| 		// Special case a nested anonymous union like | ||||
| 		//     struct foo { union { int bar; int baz }; } | ||||
| 		// by replacing the whole union with its first member. | ||||
| 		union, ok := m.Type.(*Union) | ||||
| 		if !ok { | ||||
| 			return fmt.Errorf("anonymous fields are not supported") | ||||
| 
 | ||||
| 		} | ||||
| 
 | ||||
| 		if len(union.Members) == 0 { | ||||
| 			return errors.New("empty anonymous union") | ||||
| 		} | ||||
| 
 | ||||
| 		depth++ | ||||
| 		if depth > maxTypeDepth { | ||||
| 			return errNestedTooDeep | ||||
| 		} | ||||
| 
 | ||||
| 		m := union.Members[0] | ||||
| 		size, err := Sizeof(m.Type) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		if err := gf.writeStructField(m, depth); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		gf.writePadding(union.Size - uint32(size)) | ||||
| 		return nil | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) | ||||
| 
 | ||||
| 	if err := gf.writeType(m.Type, depth); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	gf.w.WriteString("; ") | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { | ||||
| 	gf.w.WriteString("struct { ") | ||||
| 
 | ||||
| 	prevOffset := uint32(0) | ||||
| 	for i, vsi := range ds.Vars { | ||||
| 		v := vsi.Type.(*Var) | ||||
| 		if v.Linkage != GlobalVar { | ||||
| 			// Ignore static, extern, etc. for now. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if v.Name == "" { | ||||
| 			return fmt.Errorf("variable %d: empty name", i) | ||||
| 		} | ||||
| 
 | ||||
| 		gf.writePadding(vsi.Offset - prevOffset) | ||||
| 		prevOffset = vsi.Offset + vsi.Size | ||||
| 
 | ||||
| 		fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) | ||||
| 
 | ||||
| 		if err := gf.writeType(v.Type, depth); err != nil { | ||||
| 			return fmt.Errorf("variable %d: %w", i, err) | ||||
| 		} | ||||
| 
 | ||||
| 		gf.w.WriteString("; ") | ||||
| 	} | ||||
| 
 | ||||
| 	gf.writePadding(ds.Size - prevOffset) | ||||
| 	gf.w.WriteString("}") | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (gf *GoFormatter) writePadding(bytes uint32) { | ||||
| 	if bytes > 0 { | ||||
| 		fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func skipQualifiers(typ Type) Type { | ||||
| 	result := typ | ||||
| 	for depth := 0; depth <= maxTypeDepth; depth++ { | ||||
| 		switch v := (result).(type) { | ||||
| 		case qualifier: | ||||
| 			result = v.qualify() | ||||
| 		default: | ||||
| 			return result | ||||
| 		} | ||||
| 	} | ||||
| 	return &cycle{typ} | ||||
| } | ||||
							
								
								
									
										121
									
								
								vendor/github.com/cilium/ebpf/btf/handle.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								vendor/github.com/cilium/ebpf/btf/handle.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,121 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // HandleInfo describes a Handle. | ||||
| type HandleInfo struct { | ||||
| 	// ID of this handle in the kernel. The ID is only valid as long as the | ||||
| 	// associated handle is kept alive. | ||||
| 	ID ID | ||||
| 
 | ||||
| 	// Name is an identifying name for the BTF, currently only used by the | ||||
| 	// kernel. | ||||
| 	Name string | ||||
| 
 | ||||
| 	// IsKernel is true if the BTF originated with the kernel and not | ||||
| 	// userspace. | ||||
| 	IsKernel bool | ||||
| 
 | ||||
| 	// Size of the raw BTF in bytes. | ||||
| 	size uint32 | ||||
| } | ||||
| 
 | ||||
| func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { | ||||
| 	// We invoke the syscall once with a empty BTF and name buffers to get size | ||||
| 	// information to allocate buffers. Then we invoke it a second time with | ||||
| 	// buffers to receive the data. | ||||
| 	var btfInfo sys.BtfInfo | ||||
| 	if err := sys.ObjInfo(fd, &btfInfo); err != nil { | ||||
| 		return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) | ||||
| 	} | ||||
| 
 | ||||
| 	if btfInfo.NameLen > 0 { | ||||
| 		// NameLen doesn't account for the terminating NUL. | ||||
| 		btfInfo.NameLen++ | ||||
| 	} | ||||
| 
 | ||||
| 	// Don't pull raw BTF by default, since it may be quite large. | ||||
| 	btfSize := btfInfo.BtfSize | ||||
| 	btfInfo.BtfSize = 0 | ||||
| 
 | ||||
| 	nameBuffer := make([]byte, btfInfo.NameLen) | ||||
| 	btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) | ||||
| 	if err := sys.ObjInfo(fd, &btfInfo); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &HandleInfo{ | ||||
| 		ID:       ID(btfInfo.Id), | ||||
| 		Name:     unix.ByteSliceToString(nameBuffer), | ||||
| 		IsKernel: btfInfo.KernelBtf != 0, | ||||
| 		size:     btfSize, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // IsModule returns true if the BTF is for the kernel itself. | ||||
| func (i *HandleInfo) IsVmlinux() bool { | ||||
| 	return i.IsKernel && i.Name == "vmlinux" | ||||
| } | ||||
| 
 | ||||
| // IsModule returns true if the BTF is for a kernel module. | ||||
| func (i *HandleInfo) IsModule() bool { | ||||
| 	return i.IsKernel && i.Name != "vmlinux" | ||||
| } | ||||
| 
 | ||||
| // HandleIterator allows enumerating BTF blobs loaded into the kernel. | ||||
| type HandleIterator struct { | ||||
| 	// The ID of the last retrieved handle. Only valid after a call to Next. | ||||
| 	ID  ID | ||||
| 	err error | ||||
| } | ||||
| 
 | ||||
| // Next retrieves a handle for the next BTF blob. | ||||
| // | ||||
| // [Handle.Close] is called if *handle is non-nil to avoid leaking fds. | ||||
| // | ||||
| // Returns true if another BTF blob was found. Call [HandleIterator.Err] after | ||||
| // the function returns false. | ||||
| func (it *HandleIterator) Next(handle **Handle) bool { | ||||
| 	if *handle != nil { | ||||
| 		(*handle).Close() | ||||
| 		*handle = nil | ||||
| 	} | ||||
| 
 | ||||
| 	id := it.ID | ||||
| 	for { | ||||
| 		attr := &sys.BtfGetNextIdAttr{Id: id} | ||||
| 		err := sys.BtfGetNextId(attr) | ||||
| 		if errors.Is(err, os.ErrNotExist) { | ||||
| 			// There are no more BTF objects. | ||||
| 			return false | ||||
| 		} else if err != nil { | ||||
| 			it.err = fmt.Errorf("get next BTF ID: %w", err) | ||||
| 			return false | ||||
| 		} | ||||
| 
 | ||||
| 		id = attr.NextId | ||||
| 		*handle, err = NewHandleFromID(id) | ||||
| 		if errors.Is(err, os.ErrNotExist) { | ||||
| 			// Try again with the next ID. | ||||
| 			continue | ||||
| 		} else if err != nil { | ||||
| 			it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) | ||||
| 			return false | ||||
| 		} | ||||
| 
 | ||||
| 		it.ID = id | ||||
| 		return true | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Err returns an error if iteration failed for some reason. | ||||
| func (it *HandleIterator) Err() error { | ||||
| 	return it.err | ||||
| } | ||||
							
								
								
									
										128
									
								
								vendor/github.com/cilium/ebpf/btf/strings.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								vendor/github.com/cilium/ebpf/btf/strings.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,128 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| ) | ||||
| 
 | ||||
| type stringTable struct { | ||||
| 	base    *stringTable | ||||
| 	offsets []uint32 | ||||
| 	strings []string | ||||
| } | ||||
| 
 | ||||
| // sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. | ||||
| type sizedReader interface { | ||||
| 	io.Reader | ||||
| 	Size() int64 | ||||
| } | ||||
| 
 | ||||
| func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { | ||||
| 	// When parsing split BTF's string table, the first entry offset is derived | ||||
| 	// from the last entry offset of the base BTF. | ||||
| 	firstStringOffset := uint32(0) | ||||
| 	if base != nil { | ||||
| 		idx := len(base.offsets) - 1 | ||||
| 		firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1 | ||||
| 	} | ||||
| 
 | ||||
| 	// Derived from vmlinux BTF. | ||||
| 	const averageStringLength = 16 | ||||
| 
 | ||||
| 	n := int(r.Size() / averageStringLength) | ||||
| 	offsets := make([]uint32, 0, n) | ||||
| 	strings := make([]string, 0, n) | ||||
| 
 | ||||
| 	offset := firstStringOffset | ||||
| 	scanner := bufio.NewScanner(r) | ||||
| 	scanner.Split(splitNull) | ||||
| 	for scanner.Scan() { | ||||
| 		str := scanner.Text() | ||||
| 		offsets = append(offsets, offset) | ||||
| 		strings = append(strings, str) | ||||
| 		offset += uint32(len(str)) + 1 | ||||
| 	} | ||||
| 	if err := scanner.Err(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if len(strings) == 0 { | ||||
| 		return nil, errors.New("string table is empty") | ||||
| 	} | ||||
| 
 | ||||
| 	if firstStringOffset == 0 && strings[0] != "" { | ||||
| 		return nil, errors.New("first item in string table is non-empty") | ||||
| 	} | ||||
| 
 | ||||
| 	return &stringTable{base, offsets, strings}, nil | ||||
| } | ||||
| 
 | ||||
| func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||||
| 	i := bytes.IndexByte(data, 0) | ||||
| 	if i == -1 { | ||||
| 		if atEOF && len(data) > 0 { | ||||
| 			return 0, nil, errors.New("string table isn't null terminated") | ||||
| 		} | ||||
| 		return 0, nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	return i + 1, data[:i], nil | ||||
| } | ||||
| 
 | ||||
| func (st *stringTable) Lookup(offset uint32) (string, error) { | ||||
| 	if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] { | ||||
| 		return st.base.lookup(offset) | ||||
| 	} | ||||
| 	return st.lookup(offset) | ||||
| } | ||||
| 
 | ||||
| func (st *stringTable) lookup(offset uint32) (string, error) { | ||||
| 	i := search(st.offsets, offset) | ||||
| 	if i == len(st.offsets) || st.offsets[i] != offset { | ||||
| 		return "", fmt.Errorf("offset %d isn't start of a string", offset) | ||||
| 	} | ||||
| 
 | ||||
| 	return st.strings[i], nil | ||||
| } | ||||
| 
 | ||||
| func (st *stringTable) Length() int { | ||||
| 	last := len(st.offsets) - 1 | ||||
| 	return int(st.offsets[last]) + len(st.strings[last]) + 1 | ||||
| } | ||||
| 
 | ||||
| func (st *stringTable) Marshal(w io.Writer) error { | ||||
| 	for _, str := range st.strings { | ||||
| 		_, err := io.WriteString(w, str) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		_, err = w.Write([]byte{0}) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // search is a copy of sort.Search specialised for uint32. | ||||
| // | ||||
| // Licensed under https://go.dev/LICENSE | ||||
| func search(ints []uint32, needle uint32) int { | ||||
| 	// Define f(-1) == false and f(n) == true. | ||||
| 	// Invariant: f(i-1) == false, f(j) == true. | ||||
| 	i, j := 0, len(ints) | ||||
| 	for i < j { | ||||
| 		h := int(uint(i+j) >> 1) // avoid overflow when computing h | ||||
| 		// i ≤ h < j | ||||
| 		if !(ints[h] >= needle) { | ||||
| 			i = h + 1 // preserves f(i-1) == false | ||||
| 		} else { | ||||
| 			j = h // preserves f(j) == true | ||||
| 		} | ||||
| 	} | ||||
| 	// i == j, f(i-1) == false, and f(j) (= f(i)) == true  =>  answer is i. | ||||
| 	return i | ||||
| } | ||||
							
								
								
									
										1212
									
								
								vendor/github.com/cilium/ebpf/btf/types.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1212
									
								
								vendor/github.com/cilium/ebpf/btf/types.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										727
									
								
								vendor/github.com/cilium/ebpf/collection.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										727
									
								
								vendor/github.com/cilium/ebpf/collection.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,15 +1,14 @@ | |||
| package ebpf | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/btf" | ||||
| 	"github.com/cilium/ebpf/btf" | ||||
| ) | ||||
| 
 | ||||
| // CollectionOptions control loading a collection into the kernel. | ||||
|  | @ -18,12 +17,31 @@ import ( | |||
| type CollectionOptions struct { | ||||
| 	Maps     MapOptions | ||||
| 	Programs ProgramOptions | ||||
| 
 | ||||
| 	// MapReplacements takes a set of Maps that will be used instead of | ||||
| 	// creating new ones when loading the CollectionSpec. | ||||
| 	// | ||||
| 	// For each given Map, there must be a corresponding MapSpec in | ||||
| 	// CollectionSpec.Maps, and its type, key/value size, max entries and flags | ||||
| 	// must match the values of the MapSpec. | ||||
| 	// | ||||
| 	// The given Maps are Clone()d before being used in the Collection, so the | ||||
| 	// caller can Close() them freely when they are no longer needed. | ||||
| 	MapReplacements map[string]*Map | ||||
| } | ||||
| 
 | ||||
| // CollectionSpec describes a collection. | ||||
| type CollectionSpec struct { | ||||
| 	Maps     map[string]*MapSpec | ||||
| 	Programs map[string]*ProgramSpec | ||||
| 
 | ||||
| 	// Types holds type information about Maps and Programs. | ||||
| 	// Modifications to Types are currently undefined behaviour. | ||||
| 	Types *btf.Spec | ||||
| 
 | ||||
| 	// ByteOrder specifies whether the ELF was compiled for | ||||
| 	// big-endian or little-endian architectures. | ||||
| 	ByteOrder binary.ByteOrder | ||||
| } | ||||
| 
 | ||||
| // Copy returns a recursive copy of the spec. | ||||
|  | @ -33,8 +51,10 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { | |||
| 	} | ||||
| 
 | ||||
| 	cpy := CollectionSpec{ | ||||
| 		Maps:     make(map[string]*MapSpec, len(cs.Maps)), | ||||
| 		Programs: make(map[string]*ProgramSpec, len(cs.Programs)), | ||||
| 		Maps:      make(map[string]*MapSpec, len(cs.Maps)), | ||||
| 		Programs:  make(map[string]*ProgramSpec, len(cs.Programs)), | ||||
| 		ByteOrder: cs.ByteOrder, | ||||
| 		Types:     cs.Types, | ||||
| 	} | ||||
| 
 | ||||
| 	for name, spec := range cs.Maps { | ||||
|  | @ -54,19 +74,21 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { | |||
| // when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. | ||||
| // | ||||
| // Returns an error if a named map isn't used in at least one program. | ||||
| // | ||||
| // Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection | ||||
| // instead. | ||||
| func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { | ||||
| 	for symbol, m := range maps { | ||||
| 		// have we seen a program that uses this symbol / map | ||||
| 		seen := false | ||||
| 		fd := m.FD() | ||||
| 		for progName, progSpec := range cs.Programs { | ||||
| 			err := progSpec.Instructions.RewriteMapPtr(symbol, fd) | ||||
| 			err := progSpec.Instructions.AssociateMap(symbol, m) | ||||
| 
 | ||||
| 			switch { | ||||
| 			case err == nil: | ||||
| 				seen = true | ||||
| 
 | ||||
| 			case asm.IsUnreferencedSymbol(err): | ||||
| 			case errors.Is(err, asm.ErrUnreferencedSymbol): | ||||
| 				// Not all programs need to use the map | ||||
| 
 | ||||
| 			default: | ||||
|  | @ -89,8 +111,8 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { | |||
| // | ||||
| // The constant must be defined like so in the C program: | ||||
| // | ||||
| //    static volatile const type foobar; | ||||
| //    static volatile const type foobar = default; | ||||
| //    volatile const type foobar; | ||||
| //    volatile const type foobar = default; | ||||
| // | ||||
| // Replacement values must be of the same length as the C sizeof(type). | ||||
| // If necessary, they are marshalled according to the same rules as | ||||
|  | @ -100,48 +122,81 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { | |||
| // | ||||
| // Returns an error if a constant doesn't exist. | ||||
| func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { | ||||
| 	rodata := cs.Maps[".rodata"] | ||||
| 	if rodata == nil { | ||||
| 		return errors.New("missing .rodata section") | ||||
| 	replaced := make(map[string]bool) | ||||
| 
 | ||||
| 	for name, spec := range cs.Maps { | ||||
| 		if !strings.HasPrefix(name, ".rodata") { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		b, ds, err := spec.dataSection() | ||||
| 		if errors.Is(err, errMapNoBTFValue) { | ||||
| 			// Data sections without a BTF Datasec are valid, but don't support | ||||
| 			// constant replacements. | ||||
| 			continue | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("map %s: %w", name, err) | ||||
| 		} | ||||
| 
 | ||||
| 		// MapSpec.Copy() performs a shallow copy. Fully copy the byte slice | ||||
| 		// to avoid any changes affecting other copies of the MapSpec. | ||||
| 		cpy := make([]byte, len(b)) | ||||
| 		copy(cpy, b) | ||||
| 
 | ||||
| 		for _, v := range ds.Vars { | ||||
| 			vname := v.Type.TypeName() | ||||
| 			replacement, ok := consts[vname] | ||||
| 			if !ok { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			if replaced[vname] { | ||||
| 				return fmt.Errorf("section %s: duplicate variable %s", name, vname) | ||||
| 			} | ||||
| 
 | ||||
| 			if int(v.Offset+v.Size) > len(cpy) { | ||||
| 				return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname) | ||||
| 			} | ||||
| 
 | ||||
| 			b, err := marshalBytes(replacement, int(v.Size)) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("marshaling constant replacement %s: %w", vname, err) | ||||
| 			} | ||||
| 
 | ||||
| 			copy(cpy[v.Offset:v.Offset+v.Size], b) | ||||
| 
 | ||||
| 			replaced[vname] = true | ||||
| 		} | ||||
| 
 | ||||
| 		spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy} | ||||
| 	} | ||||
| 
 | ||||
| 	if rodata.BTF == nil { | ||||
| 		return errors.New(".rodata section has no BTF") | ||||
| 	var missing []string | ||||
| 	for c := range consts { | ||||
| 		if !replaced[c] { | ||||
| 			missing = append(missing, c) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if n := len(rodata.Contents); n != 1 { | ||||
| 		return fmt.Errorf("expected one key in .rodata, found %d", n) | ||||
| 	if len(missing) != 0 { | ||||
| 		return fmt.Errorf("spec is missing one or more constants: %s", strings.Join(missing, ",")) | ||||
| 	} | ||||
| 
 | ||||
| 	kv := rodata.Contents[0] | ||||
| 	value, ok := kv.Value.([]byte) | ||||
| 	if !ok { | ||||
| 		return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value) | ||||
| 	} | ||||
| 
 | ||||
| 	buf := make([]byte, len(value)) | ||||
| 	copy(buf, value) | ||||
| 
 | ||||
| 	err := patchValue(buf, btf.MapValue(rodata.BTF), consts) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	rodata.Contents[0] = MapKV{kv.Key, buf} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Assign the contents of a CollectionSpec to a struct. | ||||
| // | ||||
| // This function is a short-cut to manually checking the presence | ||||
| // of maps and programs in a collection spec. Consider using bpf2go if this | ||||
| // sounds useful. | ||||
| // This function is a shortcut to manually checking the presence | ||||
| // of maps and programs in a CollectionSpec. Consider using bpf2go | ||||
| // if this sounds useful. | ||||
| // | ||||
| // The argument to must be a pointer to a struct. A field of the | ||||
| // 'to' must be a pointer to a struct. A field of the | ||||
| // struct is updated with values from Programs or Maps if it | ||||
| // has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. | ||||
| // The tag gives the name of the program or map as found in | ||||
| // the CollectionSpec. | ||||
| // The tag's value specifies the name of the program or map as | ||||
| // found in the CollectionSpec. | ||||
| // | ||||
| //    struct { | ||||
| //        Foo     *ebpf.ProgramSpec `ebpf:"xdp_foo"` | ||||
|  | @ -149,42 +204,50 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error | |||
| //        Ignored int | ||||
| //    } | ||||
| // | ||||
| // Returns an error if any of the fields can't be found, or | ||||
| // if the same map or program is assigned multiple times. | ||||
| // Returns an error if any of the eBPF objects can't be found, or | ||||
| // if the same MapSpec or ProgramSpec is assigned multiple times. | ||||
| func (cs *CollectionSpec) Assign(to interface{}) error { | ||||
| 	valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { | ||||
| 	// Assign() only supports assigning ProgramSpecs and MapSpecs, | ||||
| 	// so doesn't load any resources into the kernel. | ||||
| 	getValue := func(typ reflect.Type, name string) (interface{}, error) { | ||||
| 		switch typ { | ||||
| 
 | ||||
| 		case reflect.TypeOf((*ProgramSpec)(nil)): | ||||
| 			p := cs.Programs[name] | ||||
| 			if p == nil { | ||||
| 				return reflect.Value{}, fmt.Errorf("missing program %q", name) | ||||
| 			if p := cs.Programs[name]; p != nil { | ||||
| 				return p, nil | ||||
| 			} | ||||
| 			return reflect.ValueOf(p), nil | ||||
| 			return nil, fmt.Errorf("missing program %q", name) | ||||
| 
 | ||||
| 		case reflect.TypeOf((*MapSpec)(nil)): | ||||
| 			m := cs.Maps[name] | ||||
| 			if m == nil { | ||||
| 				return reflect.Value{}, fmt.Errorf("missing map %q", name) | ||||
| 			if m := cs.Maps[name]; m != nil { | ||||
| 				return m, nil | ||||
| 			} | ||||
| 			return reflect.ValueOf(m), nil | ||||
| 			return nil, fmt.Errorf("missing map %q", name) | ||||
| 
 | ||||
| 		default: | ||||
| 			return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) | ||||
| 			return nil, fmt.Errorf("unsupported type %s", typ) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return assignValues(to, valueOf) | ||||
| 	return assignValues(to, getValue) | ||||
| } | ||||
| 
 | ||||
| // LoadAndAssign maps and programs into the kernel and assign them to a struct. | ||||
| // LoadAndAssign loads Maps and Programs into the kernel and assigns them | ||||
| // to a struct. | ||||
| // | ||||
| // This function is a short-cut to manually checking the presence | ||||
| // of maps and programs in a collection spec. Consider using bpf2go if this | ||||
| // sounds useful. | ||||
| // Omitting Map/Program.Close() during application shutdown is an error. | ||||
| // See the package documentation for details around Map and Program lifecycle. | ||||
| // | ||||
| // The argument to must be a pointer to a struct. A field of the | ||||
| // struct is updated with values from Programs or Maps if it | ||||
| // has an `ebpf` tag and its type is *Program or *Map. | ||||
| // The tag gives the name of the program or map as found in | ||||
| // the CollectionSpec. | ||||
| // This function is a shortcut to manually checking the presence | ||||
| // of maps and programs in a CollectionSpec. Consider using bpf2go | ||||
| // if this sounds useful. | ||||
| // | ||||
| // 'to' must be a pointer to a struct. A field of the struct is updated with | ||||
| // a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. | ||||
| // The tag's value specifies the name of the program or map as found in the | ||||
| // CollectionSpec. Before updating the struct, the requested objects and their | ||||
| // dependent resources are loaded into the kernel and populated with values if | ||||
| // specified. | ||||
| // | ||||
| //    struct { | ||||
| //        Foo     *ebpf.Program `ebpf:"xdp_foo"` | ||||
|  | @ -195,39 +258,70 @@ func (cs *CollectionSpec) Assign(to interface{}) error { | |||
| // opts may be nil. | ||||
| // | ||||
| // Returns an error if any of the fields can't be found, or | ||||
| // if the same map or program is assigned multiple times. | ||||
| // if the same Map or Program is assigned multiple times. | ||||
| func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { | ||||
| 	if opts == nil { | ||||
| 		opts = &CollectionOptions{} | ||||
| 	loader, err := newCollectionLoader(cs, opts) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer loader.close() | ||||
| 
 | ||||
| 	loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts) | ||||
| 	defer cleanup() | ||||
| 	// Support assigning Programs and Maps, lazy-loading the required objects. | ||||
| 	assignedMaps := make(map[string]bool) | ||||
| 	assignedProgs := make(map[string]bool) | ||||
| 
 | ||||
| 	valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { | ||||
| 	getValue := func(typ reflect.Type, name string) (interface{}, error) { | ||||
| 		switch typ { | ||||
| 
 | ||||
| 		case reflect.TypeOf((*Program)(nil)): | ||||
| 			p, err := loadProgram(name) | ||||
| 			if err != nil { | ||||
| 				return reflect.Value{}, err | ||||
| 			} | ||||
| 			return reflect.ValueOf(p), nil | ||||
| 			assignedProgs[name] = true | ||||
| 			return loader.loadProgram(name) | ||||
| 
 | ||||
| 		case reflect.TypeOf((*Map)(nil)): | ||||
| 			m, err := loadMap(name) | ||||
| 			if err != nil { | ||||
| 				return reflect.Value{}, err | ||||
| 			} | ||||
| 			return reflect.ValueOf(m), nil | ||||
| 			assignedMaps[name] = true | ||||
| 			return loader.loadMap(name) | ||||
| 
 | ||||
| 		default: | ||||
| 			return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) | ||||
| 			return nil, fmt.Errorf("unsupported type %s", typ) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if err := assignValues(to, valueOf); err != nil { | ||||
| 	// Load the Maps and Programs requested by the annotated struct. | ||||
| 	if err := assignValues(to, getValue); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	done() | ||||
| 	// Populate the requested maps. Has a chance of lazy-loading other dependent maps. | ||||
| 	if err := loader.populateMaps(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// Evaluate the loader's objects after all (lazy)loading has taken place. | ||||
| 	for n, m := range loader.maps { | ||||
| 		switch m.typ { | ||||
| 		case ProgramArray: | ||||
| 			// Require all lazy-loaded ProgramArrays to be assigned to the given object. | ||||
| 			// The kernel empties a ProgramArray once the last user space reference | ||||
| 			// to it closes, which leads to failed tail calls. Combined with the library | ||||
| 			// closing map fds via GC finalizers this can lead to surprising behaviour. | ||||
| 			// Only allow unassigned ProgramArrays when the library hasn't pre-populated | ||||
| 			// any entries from static value declarations. At this point, we know the map | ||||
| 			// is empty and there's no way for the caller to interact with the map going | ||||
| 			// forward. | ||||
| 			if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { | ||||
| 				return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Prevent loader.cleanup() from closing assigned Maps and Programs. | ||||
| 	for m := range assignedMaps { | ||||
| 		delete(loader.maps, m) | ||||
| 	} | ||||
| 	for p := range assignedProgs { | ||||
| 		delete(loader.programs, p) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
|  | @ -238,42 +332,73 @@ type Collection struct { | |||
| 	Maps     map[string]*Map | ||||
| } | ||||
| 
 | ||||
| // NewCollection creates a Collection from a specification. | ||||
| // NewCollection creates a Collection from the given spec, creating and | ||||
| // loading its declared resources into the kernel. | ||||
| // | ||||
| // Omitting Collection.Close() during application shutdown is an error. | ||||
| // See the package documentation for details around Map and Program lifecycle. | ||||
| func NewCollection(spec *CollectionSpec) (*Collection, error) { | ||||
| 	return NewCollectionWithOptions(spec, CollectionOptions{}) | ||||
| } | ||||
| 
 | ||||
| // NewCollectionWithOptions creates a Collection from a specification. | ||||
| // NewCollectionWithOptions creates a Collection from the given spec using | ||||
| // options, creating and loading its declared resources into the kernel. | ||||
| // | ||||
| // Omitting Collection.Close() during application shutdown is an error. | ||||
| // See the package documentation for details around Map and Program lifecycle. | ||||
| func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { | ||||
| 	loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts) | ||||
| 	defer cleanup() | ||||
| 	loader, err := newCollectionLoader(spec, &opts) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer loader.close() | ||||
| 
 | ||||
| 	// Create maps first, as their fds need to be linked into programs. | ||||
| 	for mapName := range spec.Maps { | ||||
| 		_, err := loadMap(mapName) | ||||
| 		if err != nil { | ||||
| 		if _, err := loader.loadMap(mapName); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for progName := range spec.Programs { | ||||
| 		_, err := loadProgram(progName) | ||||
| 		if err != nil { | ||||
| 	for progName, prog := range spec.Programs { | ||||
| 		if prog.Type == UnspecifiedProgram { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if _, err := loader.loadProgram(progName); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	maps, progs := done() | ||||
| 	// Maps can contain Program and Map stubs, so populate them after | ||||
| 	// all Maps and Programs have been successfully loaded. | ||||
| 	if err := loader.populateMaps(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// Prevent loader.cleanup from closing maps and programs. | ||||
| 	maps, progs := loader.maps, loader.programs | ||||
| 	loader.maps, loader.programs = nil, nil | ||||
| 
 | ||||
| 	return &Collection{ | ||||
| 		progs, | ||||
| 		maps, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| type btfHandleCache map[*btf.Spec]*btf.Handle | ||||
| type handleCache struct { | ||||
| 	btfHandles map[*btf.Spec]*btf.Handle | ||||
| } | ||||
| 
 | ||||
| func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) { | ||||
| 	if btfs[spec] != nil { | ||||
| 		return btfs[spec], nil | ||||
| func newHandleCache() *handleCache { | ||||
| 	return &handleCache{ | ||||
| 		btfHandles: make(map[*btf.Spec]*btf.Handle), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) { | ||||
| 	if hc.btfHandles[spec] != nil { | ||||
| 		return hc.btfHandles[spec], nil | ||||
| 	} | ||||
| 
 | ||||
| 	handle, err := btf.NewHandle(spec) | ||||
|  | @ -281,122 +406,202 @@ func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) { | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	btfs[spec] = handle | ||||
| 	hc.btfHandles[spec] = handle | ||||
| 	return handle, nil | ||||
| } | ||||
| 
 | ||||
| func (btfs btfHandleCache) close() { | ||||
| 	for _, handle := range btfs { | ||||
| func (hc handleCache) close() { | ||||
| 	for _, handle := range hc.btfHandles { | ||||
| 		handle.Close() | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) ( | ||||
| 	loadMap func(string) (*Map, error), | ||||
| 	loadProgram func(string) (*Program, error), | ||||
| 	done func() (map[string]*Map, map[string]*Program), | ||||
| 	cleanup func(), | ||||
| ) { | ||||
| 	var ( | ||||
| 		maps             = make(map[string]*Map) | ||||
| 		progs            = make(map[string]*Program) | ||||
| 		btfs             = make(btfHandleCache) | ||||
| 		skipMapsAndProgs = false | ||||
| 	) | ||||
| type collectionLoader struct { | ||||
| 	coll     *CollectionSpec | ||||
| 	opts     *CollectionOptions | ||||
| 	maps     map[string]*Map | ||||
| 	programs map[string]*Program | ||||
| 	handles  *handleCache | ||||
| } | ||||
| 
 | ||||
| 	cleanup = func() { | ||||
| 		btfs.close() | ||||
| func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { | ||||
| 	if opts == nil { | ||||
| 		opts = &CollectionOptions{} | ||||
| 	} | ||||
| 
 | ||||
| 		if skipMapsAndProgs { | ||||
| 			return | ||||
| 	// Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. | ||||
| 	for name, m := range opts.MapReplacements { | ||||
| 		spec, ok := coll.Maps[name] | ||||
| 		if !ok { | ||||
| 			return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) | ||||
| 		} | ||||
| 
 | ||||
| 		for _, m := range maps { | ||||
| 			m.Close() | ||||
| 		} | ||||
| 
 | ||||
| 		for _, p := range progs { | ||||
| 			p.Close() | ||||
| 		if err := spec.checkCompatibility(m); err != nil { | ||||
| 			return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	done = func() (map[string]*Map, map[string]*Program) { | ||||
| 		skipMapsAndProgs = true | ||||
| 		return maps, progs | ||||
| 	return &collectionLoader{ | ||||
| 		coll, | ||||
| 		opts, | ||||
| 		make(map[string]*Map), | ||||
| 		make(map[string]*Program), | ||||
| 		newHandleCache(), | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // close all resources left over in the collectionLoader. | ||||
| func (cl *collectionLoader) close() { | ||||
| 	cl.handles.close() | ||||
| 	for _, m := range cl.maps { | ||||
| 		m.Close() | ||||
| 	} | ||||
| 	for _, p := range cl.programs { | ||||
| 		p.Close() | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 	loadMap = func(mapName string) (*Map, error) { | ||||
| 		if m := maps[mapName]; m != nil { | ||||
| 			return m, nil | ||||
| 		} | ||||
| 
 | ||||
| 		mapSpec := coll.Maps[mapName] | ||||
| 		if mapSpec == nil { | ||||
| 			return nil, fmt.Errorf("missing map %s", mapName) | ||||
| 		} | ||||
| 
 | ||||
| 		m, err := newMapWithOptions(mapSpec, opts.Maps, btfs) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("map %s: %w", mapName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		maps[mapName] = m | ||||
| func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { | ||||
| 	if m := cl.maps[mapName]; m != nil { | ||||
| 		return m, nil | ||||
| 	} | ||||
| 
 | ||||
| 	loadProgram = func(progName string) (*Program, error) { | ||||
| 		if prog := progs[progName]; prog != nil { | ||||
| 			return prog, nil | ||||
| 	mapSpec := cl.coll.Maps[mapName] | ||||
| 	if mapSpec == nil { | ||||
| 		return nil, fmt.Errorf("missing map %s", mapName) | ||||
| 	} | ||||
| 
 | ||||
| 	if mapSpec.BTF != nil && cl.coll.Types != mapSpec.BTF { | ||||
| 		return nil, fmt.Errorf("map %s: BTF doesn't match collection", mapName) | ||||
| 	} | ||||
| 
 | ||||
| 	if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { | ||||
| 		// Clone the map to avoid closing user's map later on. | ||||
| 		m, err := replaceMap.Clone() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		progSpec := coll.Programs[progName] | ||||
| 		if progSpec == nil { | ||||
| 			return nil, fmt.Errorf("unknown program %s", progName) | ||||
| 		cl.maps[mapName] = m | ||||
| 		return m, nil | ||||
| 	} | ||||
| 
 | ||||
| 	m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("map %s: %w", mapName, err) | ||||
| 	} | ||||
| 
 | ||||
| 	cl.maps[mapName] = m | ||||
| 	return m, nil | ||||
| } | ||||
| 
 | ||||
| func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { | ||||
| 	if prog := cl.programs[progName]; prog != nil { | ||||
| 		return prog, nil | ||||
| 	} | ||||
| 
 | ||||
| 	progSpec := cl.coll.Programs[progName] | ||||
| 	if progSpec == nil { | ||||
| 		return nil, fmt.Errorf("unknown program %s", progName) | ||||
| 	} | ||||
| 
 | ||||
| 	// Bail out early if we know the kernel is going to reject the program. | ||||
| 	// This skips loading map dependencies, saving some cleanup work later. | ||||
| 	if progSpec.Type == UnspecifiedProgram { | ||||
| 		return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) | ||||
| 	} | ||||
| 
 | ||||
| 	if progSpec.BTF != nil && cl.coll.Types != progSpec.BTF { | ||||
| 		return nil, fmt.Errorf("program %s: BTF doesn't match collection", progName) | ||||
| 	} | ||||
| 
 | ||||
| 	progSpec = progSpec.Copy() | ||||
| 
 | ||||
| 	// Rewrite any reference to a valid map in the program's instructions, | ||||
| 	// which includes all of its dependencies. | ||||
| 	for i := range progSpec.Instructions { | ||||
| 		ins := &progSpec.Instructions[i] | ||||
| 
 | ||||
| 		if !ins.IsLoadFromMap() || ins.Reference() == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		progSpec = progSpec.Copy() | ||||
| 
 | ||||
| 		// Rewrite any reference to a valid map. | ||||
| 		for i := range progSpec.Instructions { | ||||
| 			ins := &progSpec.Instructions[i] | ||||
| 
 | ||||
| 			if ins.OpCode != asm.LoadImmOp(asm.DWord) || ins.Reference == "" { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			if uint32(ins.Constant) != math.MaxUint32 { | ||||
| 				// Don't overwrite maps already rewritten, users can | ||||
| 				// rewrite programs in the spec themselves | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			m, err := loadMap(ins.Reference) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("program %s: %s", progName, err) | ||||
| 			} | ||||
| 
 | ||||
| 			fd := m.FD() | ||||
| 			if fd < 0 { | ||||
| 				return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) | ||||
| 			} | ||||
| 			if err := ins.RewriteMapPtr(m.FD()); err != nil { | ||||
| 				return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err) | ||||
| 			} | ||||
| 		// Don't overwrite map loads containing non-zero map fd's, | ||||
| 		// they can be manually included by the caller. | ||||
| 		// Map FDs/IDs are placed in the lower 32 bits of Constant. | ||||
| 		if int32(ins.Constant) > 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		prog, err := newProgramWithOptions(progSpec, opts.Programs, btfs) | ||||
| 		m, err := cl.loadMap(ins.Reference()) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("program %s: %w", progName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		progs[progName] = prog | ||||
| 		return prog, nil | ||||
| 		if err := ins.AssociateMap(m); err != nil { | ||||
| 			return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return | ||||
| 	prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("program %s: %w", progName, err) | ||||
| 	} | ||||
| 
 | ||||
| 	cl.programs[progName] = prog | ||||
| 	return prog, nil | ||||
| } | ||||
| 
 | ||||
| // LoadCollection parses an object file and converts it to a collection. | ||||
| func (cl *collectionLoader) populateMaps() error { | ||||
| 	for mapName, m := range cl.maps { | ||||
| 		mapSpec, ok := cl.coll.Maps[mapName] | ||||
| 		if !ok { | ||||
| 			return fmt.Errorf("missing map spec %s", mapName) | ||||
| 		} | ||||
| 
 | ||||
| 		mapSpec = mapSpec.Copy() | ||||
| 
 | ||||
| 		// MapSpecs that refer to inner maps or programs within the same | ||||
| 		// CollectionSpec do so using strings. These strings are used as the key | ||||
| 		// to look up the respective object in the Maps or Programs fields. | ||||
| 		// Resolve those references to actual Map or Program resources that | ||||
| 		// have been loaded into the kernel. | ||||
| 		for i, kv := range mapSpec.Contents { | ||||
| 			if objName, ok := kv.Value.(string); ok { | ||||
| 				switch mapSpec.Type { | ||||
| 				case ProgramArray: | ||||
| 					// loadProgram is idempotent and could return an existing Program. | ||||
| 					prog, err := cl.loadProgram(objName) | ||||
| 					if err != nil { | ||||
| 						return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) | ||||
| 					} | ||||
| 					mapSpec.Contents[i] = MapKV{kv.Key, prog} | ||||
| 
 | ||||
| 				case ArrayOfMaps, HashOfMaps: | ||||
| 					// loadMap is idempotent and could return an existing Map. | ||||
| 					innerMap, err := cl.loadMap(objName) | ||||
| 					if err != nil { | ||||
| 						return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) | ||||
| 					} | ||||
| 					mapSpec.Contents[i] = MapKV{kv.Key, innerMap} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// Populate and freeze the map if specified. | ||||
| 		if err := m.finalize(mapSpec); err != nil { | ||||
| 			return fmt.Errorf("populating map %s: %w", mapName, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // LoadCollection reads an object file and creates and loads its declared | ||||
| // resources into the kernel. | ||||
| // | ||||
| // Omitting Collection.Close() during application shutdown is an error. | ||||
| // See the package documentation for details around Map and Program lifecycle. | ||||
| func LoadCollection(file string) (*Collection, error) { | ||||
| 	spec, err := LoadCollectionSpec(file) | ||||
| 	if err != nil { | ||||
|  | @ -439,108 +644,81 @@ func (coll *Collection) DetachProgram(name string) *Program { | |||
| 	return p | ||||
| } | ||||
| 
 | ||||
| // Assign the contents of a collection to a struct. | ||||
| // | ||||
| // Deprecated: use CollectionSpec.Assign instead. It provides the same | ||||
| // functionality but creates only the maps and programs requested. | ||||
| func (coll *Collection) Assign(to interface{}) error { | ||||
| 	assignedMaps := make(map[string]struct{}) | ||||
| 	assignedPrograms := make(map[string]struct{}) | ||||
| 	valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { | ||||
| 		switch typ { | ||||
| 		case reflect.TypeOf((*Program)(nil)): | ||||
| 			p := coll.Programs[name] | ||||
| 			if p == nil { | ||||
| 				return reflect.Value{}, fmt.Errorf("missing program %q", name) | ||||
| 			} | ||||
| 			assignedPrograms[name] = struct{}{} | ||||
| 			return reflect.ValueOf(p), nil | ||||
| 		case reflect.TypeOf((*Map)(nil)): | ||||
| 			m := coll.Maps[name] | ||||
| 			if m == nil { | ||||
| 				return reflect.Value{}, fmt.Errorf("missing map %q", name) | ||||
| 			} | ||||
| 			assignedMaps[name] = struct{}{} | ||||
| 			return reflect.ValueOf(m), nil | ||||
| 		default: | ||||
| 			return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if err := assignValues(to, valueOf); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	for name := range assignedPrograms { | ||||
| 		coll.DetachProgram(name) | ||||
| 	} | ||||
| 
 | ||||
| 	for name := range assignedMaps { | ||||
| 		coll.DetachMap(name) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| // structField represents a struct field containing the ebpf struct tag. | ||||
| type structField struct { | ||||
| 	reflect.StructField | ||||
| 	value reflect.Value | ||||
| } | ||||
| 
 | ||||
| func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error { | ||||
| 	type structField struct { | ||||
| 		reflect.StructField | ||||
| 		value reflect.Value | ||||
| // ebpfFields extracts field names tagged with 'ebpf' from a struct type. | ||||
| // Keep track of visited types to avoid infinite recursion. | ||||
| func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { | ||||
| 	if visited == nil { | ||||
| 		visited = make(map[reflect.Type]bool) | ||||
| 	} | ||||
| 
 | ||||
| 	var ( | ||||
| 		fields        []structField | ||||
| 		visitedTypes  = make(map[reflect.Type]bool) | ||||
| 		flattenStruct func(reflect.Value) error | ||||
| 	) | ||||
| 	structType := structVal.Type() | ||||
| 	if structType.Kind() != reflect.Struct { | ||||
| 		return nil, fmt.Errorf("%s is not a struct", structType) | ||||
| 	} | ||||
| 
 | ||||
| 	flattenStruct = func(structVal reflect.Value) error { | ||||
| 		structType := structVal.Type() | ||||
| 		if structType.Kind() != reflect.Struct { | ||||
| 			return fmt.Errorf("%s is not a struct", structType) | ||||
| 	if visited[structType] { | ||||
| 		return nil, fmt.Errorf("recursion on type %s", structType) | ||||
| 	} | ||||
| 
 | ||||
| 	fields := make([]structField, 0, structType.NumField()) | ||||
| 	for i := 0; i < structType.NumField(); i++ { | ||||
| 		field := structField{structType.Field(i), structVal.Field(i)} | ||||
| 
 | ||||
| 		// If the field is tagged, gather it and move on. | ||||
| 		name := field.Tag.Get("ebpf") | ||||
| 		if name != "" { | ||||
| 			fields = append(fields, field) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if visitedTypes[structType] { | ||||
| 			return fmt.Errorf("recursion on type %s", structType) | ||||
| 		} | ||||
| 
 | ||||
| 		for i := 0; i < structType.NumField(); i++ { | ||||
| 			field := structField{structType.Field(i), structVal.Field(i)} | ||||
| 
 | ||||
| 			name := field.Tag.Get("ebpf") | ||||
| 			if name != "" { | ||||
| 				fields = append(fields, field) | ||||
| 		// If the field does not have an ebpf tag, but is a struct or a pointer | ||||
| 		// to a struct, attempt to gather its fields as well. | ||||
| 		var v reflect.Value | ||||
| 		switch field.Type.Kind() { | ||||
| 		case reflect.Ptr: | ||||
| 			if field.Type.Elem().Kind() != reflect.Struct { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			var err error | ||||
| 			switch field.Type.Kind() { | ||||
| 			case reflect.Ptr: | ||||
| 				if field.Type.Elem().Kind() != reflect.Struct { | ||||
| 					continue | ||||
| 				} | ||||
| 
 | ||||
| 				if field.value.IsNil() { | ||||
| 					return fmt.Errorf("nil pointer to %s", structType) | ||||
| 				} | ||||
| 
 | ||||
| 				err = flattenStruct(field.value.Elem()) | ||||
| 
 | ||||
| 			case reflect.Struct: | ||||
| 				err = flattenStruct(field.value) | ||||
| 
 | ||||
| 			default: | ||||
| 				continue | ||||
| 			if field.value.IsNil() { | ||||
| 				return nil, fmt.Errorf("nil pointer to %s", structType) | ||||
| 			} | ||||
| 
 | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("field %s: %s", field.Name, err) | ||||
| 			} | ||||
| 			// Obtain the destination type of the pointer. | ||||
| 			v = field.value.Elem() | ||||
| 
 | ||||
| 		case reflect.Struct: | ||||
| 			// Reference the value's type directly. | ||||
| 			v = field.value | ||||
| 
 | ||||
| 		default: | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		return nil | ||||
| 		inner, err := ebpfFields(v, visited) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("field %s: %w", field.Name, err) | ||||
| 		} | ||||
| 
 | ||||
| 		fields = append(fields, inner...) | ||||
| 	} | ||||
| 
 | ||||
| 	return fields, nil | ||||
| } | ||||
| 
 | ||||
| // assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. | ||||
| // | ||||
| // getValue is called for every tagged field of 'to' and must return the value | ||||
| // to be assigned to the field with the given typ and name. | ||||
| func assignValues(to interface{}, | ||||
| 	getValue func(typ reflect.Type, name string) (interface{}, error)) error { | ||||
| 
 | ||||
| 	toValue := reflect.ValueOf(to) | ||||
| 	if toValue.Type().Kind() != reflect.Ptr { | ||||
| 		return fmt.Errorf("%T is not a pointer to struct", to) | ||||
|  | @ -550,7 +728,8 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va | |||
| 		return fmt.Errorf("nil pointer to %T", to) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := flattenStruct(toValue.Elem()); err != nil { | ||||
| 	fields, err := ebpfFields(toValue.Elem(), nil) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
|  | @ -560,19 +739,23 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va | |||
| 		name string | ||||
| 	} | ||||
| 
 | ||||
| 	assignedTo := make(map[elem]string) | ||||
| 	assigned := make(map[elem]string) | ||||
| 	for _, field := range fields { | ||||
| 		name := field.Tag.Get("ebpf") | ||||
| 		if strings.Contains(name, ",") { | ||||
| 		// Get string value the field is tagged with. | ||||
| 		tag := field.Tag.Get("ebpf") | ||||
| 		if strings.Contains(tag, ",") { | ||||
| 			return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) | ||||
| 		} | ||||
| 
 | ||||
| 		e := elem{field.Type, name} | ||||
| 		if assignedField := assignedTo[e]; assignedField != "" { | ||||
| 			return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField) | ||||
| 		// Check if the eBPF object with the requested | ||||
| 		// type and tag was already assigned elsewhere. | ||||
| 		e := elem{field.Type, tag} | ||||
| 		if af := assigned[e]; af != "" { | ||||
| 			return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) | ||||
| 		} | ||||
| 
 | ||||
| 		value, err := valueOf(field.Type, name) | ||||
| 		// Get the eBPF object referred to by the tag. | ||||
| 		value, err := getValue(field.Type, tag) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("field %s: %w", field.Name, err) | ||||
| 		} | ||||
|  | @ -580,9 +763,9 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va | |||
| 		if !field.value.CanSet() { | ||||
| 			return fmt.Errorf("field %s: can't set value", field.Name) | ||||
| 		} | ||||
| 		field.value.Set(reflect.ValueOf(value)) | ||||
| 
 | ||||
| 		field.value.Set(value) | ||||
| 		assignedTo[e] = field.Name | ||||
| 		assigned[e] = field.Name | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
|  |  | |||
							
								
								
									
										9
									
								
								vendor/github.com/cilium/ebpf/doc.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								vendor/github.com/cilium/ebpf/doc.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -13,4 +13,13 @@ | |||
| // your application as any other resource. | ||||
| // | ||||
| // Use the link subpackage to attach a loaded program to a hook in the kernel. | ||||
| // | ||||
| // Note that losing all references to Map and Program resources will cause | ||||
| // their underlying file descriptors to be closed, potentially removing those | ||||
| // objects from the kernel. Always retain a reference by e.g. deferring a | ||||
| // Close() of a Collection or LoadAndAssign object until application exit. | ||||
| // | ||||
| // Special care needs to be taken when handling maps of type ProgramArray, | ||||
| // as the kernel erases its contents when the last userspace or bpffs | ||||
| // reference disappears, regardless of the map being in active use. | ||||
| package ebpf | ||||
|  |  | |||
							
								
								
									
										835
									
								
								vendor/github.com/cilium/ebpf/elf_reader.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										835
									
								
								vendor/github.com/cilium/ebpf/elf_reader.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										21
									
								
								vendor/github.com/cilium/ebpf/elf_reader_fuzz.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										21
									
								
								vendor/github.com/cilium/ebpf/elf_reader_fuzz.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,21 +0,0 @@ | |||
| // +build gofuzz | ||||
| 
 | ||||
| // Use with https://github.com/dvyukov/go-fuzz | ||||
| 
 | ||||
| package ebpf | ||||
| 
 | ||||
| import "bytes" | ||||
| 
 | ||||
| func FuzzLoadCollectionSpec(data []byte) int { | ||||
| 	spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data)) | ||||
| 	if err != nil { | ||||
| 		if spec != nil { | ||||
| 			panic("spec is not nil") | ||||
| 		} | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if spec == nil { | ||||
| 		panic("spec is nil") | ||||
| 	} | ||||
| 	return 1 | ||||
| } | ||||
							
								
								
									
										158
									
								
								vendor/github.com/cilium/ebpf/info.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										158
									
								
								vendor/github.com/cilium/ebpf/info.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -2,6 +2,7 @@ package ebpf | |||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"encoding/hex" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
|  | @ -10,8 +11,13 @@ import ( | |||
| 	"strings" | ||||
| 	"syscall" | ||||
| 	"time" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/btf" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // MapInfo describes a map. | ||||
|  | @ -22,12 +28,13 @@ type MapInfo struct { | |||
| 	ValueSize  uint32 | ||||
| 	MaxEntries uint32 | ||||
| 	Flags      uint32 | ||||
| 	// Name as supplied by user space at load time. | ||||
| 	// Name as supplied by user space at load time. Available from 4.15. | ||||
| 	Name string | ||||
| } | ||||
| 
 | ||||
| func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { | ||||
| 	info, err := bpfGetMapInfoByFD(fd) | ||||
| func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { | ||||
| 	var info sys.MapInfo | ||||
| 	err := sys.ObjInfo(fd, &info) | ||||
| 	if errors.Is(err, syscall.EINVAL) { | ||||
| 		return newMapInfoFromProc(fd) | ||||
| 	} | ||||
|  | @ -36,18 +43,17 @@ func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { | |||
| 	} | ||||
| 
 | ||||
| 	return &MapInfo{ | ||||
| 		MapType(info.map_type), | ||||
| 		MapID(info.id), | ||||
| 		info.key_size, | ||||
| 		info.value_size, | ||||
| 		info.max_entries, | ||||
| 		info.map_flags, | ||||
| 		// name is available from 4.15. | ||||
| 		internal.CString(info.name[:]), | ||||
| 		MapType(info.Type), | ||||
| 		MapID(info.Id), | ||||
| 		info.KeySize, | ||||
| 		info.ValueSize, | ||||
| 		info.MaxEntries, | ||||
| 		info.MapFlags, | ||||
| 		unix.ByteSliceToString(info.Name[:]), | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) { | ||||
| func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { | ||||
| 	var mi MapInfo | ||||
| 	err := scanFdInfo(fd, map[string]interface{}{ | ||||
| 		"map_type":    &mi.Type, | ||||
|  | @ -83,16 +89,21 @@ type programStats struct { | |||
| type ProgramInfo struct { | ||||
| 	Type ProgramType | ||||
| 	id   ProgramID | ||||
| 	// Truncated hash of the BPF bytecode. | ||||
| 	// Truncated hash of the BPF bytecode. Available from 4.13. | ||||
| 	Tag string | ||||
| 	// Name as supplied by user space at load time. | ||||
| 	// Name as supplied by user space at load time. Available from 4.15. | ||||
| 	Name string | ||||
| 
 | ||||
| 	btf   btf.ID | ||||
| 	stats *programStats | ||||
| 
 | ||||
| 	maps  []MapID | ||||
| 	insns []byte | ||||
| } | ||||
| 
 | ||||
| func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { | ||||
| 	info, err := bpfGetProgInfoByFD(fd) | ||||
| func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { | ||||
| 	var info sys.ProgInfo | ||||
| 	err := sys.ObjInfo(fd, &info) | ||||
| 	if errors.Is(err, syscall.EINVAL) { | ||||
| 		return newProgramInfoFromProc(fd) | ||||
| 	} | ||||
|  | @ -100,21 +111,43 @@ func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &ProgramInfo{ | ||||
| 		Type: ProgramType(info.prog_type), | ||||
| 		id:   ProgramID(info.id), | ||||
| 		// tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD. | ||||
| 		Tag: hex.EncodeToString(info.tag[:]), | ||||
| 		// name is available from 4.15. | ||||
| 		Name: internal.CString(info.name[:]), | ||||
| 	pi := ProgramInfo{ | ||||
| 		Type: ProgramType(info.Type), | ||||
| 		id:   ProgramID(info.Id), | ||||
| 		Tag:  hex.EncodeToString(info.Tag[:]), | ||||
| 		Name: unix.ByteSliceToString(info.Name[:]), | ||||
| 		btf:  btf.ID(info.BtfId), | ||||
| 		stats: &programStats{ | ||||
| 			runtime:  time.Duration(info.run_time_ns), | ||||
| 			runCount: info.run_cnt, | ||||
| 			runtime:  time.Duration(info.RunTimeNs), | ||||
| 			runCount: info.RunCnt, | ||||
| 		}, | ||||
| 	}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Start with a clean struct for the second call, otherwise we may get EFAULT. | ||||
| 	var info2 sys.ProgInfo | ||||
| 
 | ||||
| 	if info.NrMapIds > 0 { | ||||
| 		pi.maps = make([]MapID, info.NrMapIds) | ||||
| 		info2.NrMapIds = info.NrMapIds | ||||
| 		info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) | ||||
| 	} | ||||
| 
 | ||||
| 	if info.XlatedProgLen > 0 { | ||||
| 		pi.insns = make([]byte, info.XlatedProgLen) | ||||
| 		info2.XlatedProgLen = info.XlatedProgLen | ||||
| 		info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) | ||||
| 	} | ||||
| 
 | ||||
| 	if info.NrMapIds > 0 || info.XlatedProgLen > 0 { | ||||
| 		if err := sys.ObjInfo(fd, &info2); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return &pi, nil | ||||
| } | ||||
| 
 | ||||
| func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) { | ||||
| func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { | ||||
| 	var info ProgramInfo | ||||
| 	err := scanFdInfo(fd, map[string]interface{}{ | ||||
| 		"prog_type": &info.Type, | ||||
|  | @ -142,6 +175,18 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) { | |||
| 	return pi.id, pi.id > 0 | ||||
| } | ||||
| 
 | ||||
| // BTFID returns the BTF ID associated with the program. | ||||
| // | ||||
| // The ID is only valid as long as the associated program is kept alive. | ||||
| // Available from 5.0. | ||||
| // | ||||
| // The bool return value indicates whether this optional field is available and | ||||
| // populated. (The field may be available but not populated if the kernel | ||||
| // supports the field but the program was loaded without BTF information.) | ||||
| func (pi *ProgramInfo) BTFID() (btf.ID, bool) { | ||||
| 	return pi.btf, pi.btf > 0 | ||||
| } | ||||
| 
 | ||||
| // RunCount returns the total number of times the program was called. | ||||
| // | ||||
| // Can return 0 if the collection of statistics is not enabled. See EnableStats(). | ||||
|  | @ -164,13 +209,50 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) { | |||
| 	return time.Duration(0), false | ||||
| } | ||||
| 
 | ||||
| func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error { | ||||
| 	raw, err := fd.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| // Instructions returns the 'xlated' instruction stream of the program | ||||
| // after it has been verified and rewritten by the kernel. These instructions | ||||
| // cannot be loaded back into the kernel as-is, this is mainly used for | ||||
| // inspecting loaded programs for troubleshooting, dumping, etc. | ||||
| // | ||||
| // For example, map accesses are made to reference their kernel map IDs, | ||||
| // not the FDs they had when the program was inserted. Note that before | ||||
| // the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated | ||||
| // instructions were not sanitized, making the output even less reusable | ||||
| // and less likely to round-trip or evaluate to the same program Tag. | ||||
| // | ||||
| // The first instruction is marked as a symbol using the Program's name. | ||||
| // | ||||
| // Available from 4.13. Requires CAP_BPF or equivalent. | ||||
| func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { | ||||
| 	// If the calling process is not BPF-capable or if the kernel doesn't | ||||
| 	// support getting xlated instructions, the field will be zero. | ||||
| 	if len(pi.insns) == 0 { | ||||
| 		return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) | ||||
| 	} | ||||
| 
 | ||||
| 	fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw)) | ||||
| 	r := bytes.NewReader(pi.insns) | ||||
| 	var insns asm.Instructions | ||||
| 	if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { | ||||
| 		return nil, fmt.Errorf("unmarshaling instructions: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Tag the first instruction with the name of the program, if available. | ||||
| 	insns[0] = insns[0].WithSymbol(pi.Name) | ||||
| 
 | ||||
| 	return insns, nil | ||||
| } | ||||
| 
 | ||||
| // MapIDs returns the maps related to the program. | ||||
| // | ||||
| // Available from 4.15. | ||||
| // | ||||
| // The bool return value indicates whether this optional field is available. | ||||
| func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { | ||||
| 	return pi.maps, pi.maps != nil | ||||
| } | ||||
| 
 | ||||
| func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { | ||||
| 	fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | @ -213,6 +295,10 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { | |||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if len(fields) > 0 && scanned == 0 { | ||||
| 		return ErrNotSupported | ||||
| 	} | ||||
| 
 | ||||
| 	if scanned != len(fields) { | ||||
| 		return errMissingFields | ||||
| 	} | ||||
|  | @ -227,11 +313,9 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { | |||
| // | ||||
| // Requires at least 5.8. | ||||
| func EnableStats(which uint32) (io.Closer, error) { | ||||
| 	attr := internal.BPFEnableStatsAttr{ | ||||
| 		StatsType: which, | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := internal.BPFEnableStats(&attr) | ||||
| 	fd, err := sys.EnableStats(&sys.EnableStatsAttr{ | ||||
| 		Type: which, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  |  | |||
							
								
								
									
										6
									
								
								vendor/github.com/cilium/ebpf/internal/align.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/cilium/ebpf/internal/align.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,6 @@ | |||
| package internal | ||||
| 
 | ||||
| // Align returns 'n' updated to 'alignment' boundary. | ||||
| func Align(n, alignment int) int { | ||||
| 	return (int(n) + alignment - 1) / alignment * alignment | ||||
| } | ||||
							
								
								
									
										791
									
								
								vendor/github.com/cilium/ebpf/internal/btf/btf.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										791
									
								
								vendor/github.com/cilium/ebpf/internal/btf/btf.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,791 +0,0 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"debug/elf" | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"math" | ||||
| 	"os" | ||||
| 	"reflect" | ||||
| 	"sync" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| const btfMagic = 0xeB9F | ||||
| 
 | ||||
| // Errors returned by BTF functions. | ||||
| var ( | ||||
| 	ErrNotSupported   = internal.ErrNotSupported | ||||
| 	ErrNotFound       = errors.New("not found") | ||||
| 	ErrNoExtendedInfo = errors.New("no extended info") | ||||
| ) | ||||
| 
 | ||||
| // Spec represents decoded BTF. | ||||
| type Spec struct { | ||||
| 	rawTypes   []rawType | ||||
| 	strings    stringTable | ||||
| 	types      []Type | ||||
| 	namedTypes map[string][]namedType | ||||
| 	funcInfos  map[string]extInfo | ||||
| 	lineInfos  map[string]extInfo | ||||
| 	coreRelos  map[string]bpfCoreRelos | ||||
| 	byteOrder  binary.ByteOrder | ||||
| } | ||||
| 
 | ||||
| type btfHeader struct { | ||||
| 	Magic   uint16 | ||||
| 	Version uint8 | ||||
| 	Flags   uint8 | ||||
| 	HdrLen  uint32 | ||||
| 
 | ||||
| 	TypeOff   uint32 | ||||
| 	TypeLen   uint32 | ||||
| 	StringOff uint32 | ||||
| 	StringLen uint32 | ||||
| } | ||||
| 
 | ||||
| // LoadSpecFromReader reads BTF sections from an ELF. | ||||
| // | ||||
| // Returns a nil Spec and no error if no BTF was present. | ||||
| func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { | ||||
| 	file, err := internal.NewSafeELFFile(rd) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer file.Close() | ||||
| 
 | ||||
| 	btfSection, btfExtSection, sectionSizes, err := findBtfSections(file) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if btfSection == nil { | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	symbols, err := file.Symbols() | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't read symbols: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	variableOffsets := make(map[variable]uint32) | ||||
| 	for _, symbol := range symbols { | ||||
| 		if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { | ||||
| 			// Ignore things like SHN_ABS | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if int(symbol.Section) >= len(file.Sections) { | ||||
| 			return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) | ||||
| 		} | ||||
| 
 | ||||
| 		secName := file.Sections[symbol.Section].Name | ||||
| 		if _, ok := sectionSizes[secName]; !ok { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if symbol.Value > math.MaxUint32 { | ||||
| 			return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) | ||||
| 		} | ||||
| 
 | ||||
| 		variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) | ||||
| 	} | ||||
| 
 | ||||
| 	spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if btfExtSection == nil { | ||||
| 		return spec, nil | ||||
| 	} | ||||
| 
 | ||||
| 	spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't read ext info: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return spec, nil | ||||
| } | ||||
| 
 | ||||
| func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) { | ||||
| 	var ( | ||||
| 		btfSection    *elf.Section | ||||
| 		btfExtSection *elf.Section | ||||
| 		sectionSizes  = make(map[string]uint32) | ||||
| 	) | ||||
| 
 | ||||
| 	for _, sec := range file.Sections { | ||||
| 		switch sec.Name { | ||||
| 		case ".BTF": | ||||
| 			btfSection = sec | ||||
| 		case ".BTF.ext": | ||||
| 			btfExtSection = sec | ||||
| 		default: | ||||
| 			if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { | ||||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			if sec.Size > math.MaxUint32 { | ||||
| 				return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) | ||||
| 			} | ||||
| 
 | ||||
| 			sectionSizes[sec.Name] = uint32(sec.Size) | ||||
| 		} | ||||
| 	} | ||||
| 	return btfSection, btfExtSection, sectionSizes, nil | ||||
| } | ||||
| 
 | ||||
| func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) { | ||||
| 	file, err := internal.NewSafeELFFile(rd) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer file.Close() | ||||
| 
 | ||||
| 	btfSection, _, _, err := findBtfSections(file) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf(".BTF ELF section: %s", err) | ||||
| 	} | ||||
| 	if btfSection == nil { | ||||
| 		return nil, fmt.Errorf("unable to find .BTF ELF section") | ||||
| 	} | ||||
| 	return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil) | ||||
| } | ||||
| 
 | ||||
| func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { | ||||
| 	rawTypes, rawStrings, err := parseBTF(btf, bo) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	types, typesByName, err := inflateRawTypes(rawTypes, rawStrings) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &Spec{ | ||||
| 		rawTypes:   rawTypes, | ||||
| 		namedTypes: typesByName, | ||||
| 		types:      types, | ||||
| 		strings:    rawStrings, | ||||
| 		byteOrder:  bo, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| var kernelBTF struct { | ||||
| 	sync.Mutex | ||||
| 	*Spec | ||||
| } | ||||
| 
 | ||||
| // LoadKernelSpec returns the current kernel's BTF information. | ||||
| // | ||||
| // Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns | ||||
| // ErrNotSupported if BTF is not enabled. | ||||
| func LoadKernelSpec() (*Spec, error) { | ||||
| 	kernelBTF.Lock() | ||||
| 	defer kernelBTF.Unlock() | ||||
| 
 | ||||
| 	if kernelBTF.Spec != nil { | ||||
| 		return kernelBTF.Spec, nil | ||||
| 	} | ||||
| 
 | ||||
| 	var err error | ||||
| 	kernelBTF.Spec, err = loadKernelSpec() | ||||
| 	return kernelBTF.Spec, err | ||||
| } | ||||
| 
 | ||||
| func loadKernelSpec() (*Spec, error) { | ||||
| 	release, err := unix.KernelRelease() | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't read kernel release number: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	fh, err := os.Open("/sys/kernel/btf/vmlinux") | ||||
| 	if err == nil { | ||||
| 		defer fh.Close() | ||||
| 
 | ||||
| 		return loadNakedSpec(fh, internal.NativeEndian, nil, nil) | ||||
| 	} | ||||
| 
 | ||||
| 	// use same list of locations as libbpf | ||||
| 	// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 | ||||
| 	locations := []string{ | ||||
| 		"/boot/vmlinux-%s", | ||||
| 		"/lib/modules/%s/vmlinux-%[1]s", | ||||
| 		"/lib/modules/%s/build/vmlinux", | ||||
| 		"/usr/lib/modules/%s/kernel/vmlinux", | ||||
| 		"/usr/lib/debug/boot/vmlinux-%s", | ||||
| 		"/usr/lib/debug/boot/vmlinux-%s.debug", | ||||
| 		"/usr/lib/debug/lib/modules/%s/vmlinux", | ||||
| 	} | ||||
| 
 | ||||
| 	for _, loc := range locations { | ||||
| 		path := fmt.Sprintf(loc, release) | ||||
| 
 | ||||
| 		fh, err := os.Open(path) | ||||
| 		if err != nil { | ||||
| 			continue | ||||
| 		} | ||||
| 		defer fh.Close() | ||||
| 
 | ||||
| 		return loadSpecFromVmlinux(fh) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) { | ||||
| 	rawBTF, err := ioutil.ReadAll(btf) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't read BTF: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	rd := bytes.NewReader(rawBTF) | ||||
| 
 | ||||
| 	var header btfHeader | ||||
| 	if err := binary.Read(rd, bo, &header); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't read header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Magic != btfMagic { | ||||
| 		return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Version != 1 { | ||||
| 		return nil, nil, fmt.Errorf("unexpected version %v", header.Version) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Flags != 0 { | ||||
| 		return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) | ||||
| 	} | ||||
| 
 | ||||
| 	remainder := int64(header.HdrLen) - int64(binary.Size(&header)) | ||||
| 	if remainder < 0 { | ||||
| 		return nil, nil, errors.New("header is too short") | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("header padding: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen))) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't read type names: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, fmt.Errorf("can't read types: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return rawTypes, rawStrings, nil | ||||
| } | ||||
| 
 | ||||
| type variable struct { | ||||
| 	section string | ||||
| 	name    string | ||||
| } | ||||
| 
 | ||||
| func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { | ||||
| 	for i, rawType := range rawTypes { | ||||
| 		if rawType.Kind() != kindDatasec { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		name, err := rawStrings.Lookup(rawType.NameOff) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		if name == ".kconfig" || name == ".ksyms" { | ||||
| 			return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) | ||||
| 		} | ||||
| 
 | ||||
| 		if rawTypes[i].SizeType != 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		size, ok := sectionSizes[name] | ||||
| 		if !ok { | ||||
| 			return fmt.Errorf("data section %s: missing size", name) | ||||
| 		} | ||||
| 
 | ||||
| 		rawTypes[i].SizeType = size | ||||
| 
 | ||||
| 		secinfos := rawType.data.([]btfVarSecinfo) | ||||
| 		for j, secInfo := range secinfos { | ||||
| 			id := int(secInfo.Type - 1) | ||||
| 			if id >= len(rawTypes) { | ||||
| 				return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) | ||||
| 			} | ||||
| 
 | ||||
| 			varName, err := rawStrings.Lookup(rawTypes[id].NameOff) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) | ||||
| 			} | ||||
| 
 | ||||
| 			offset, ok := variableOffsets[variable{name, varName}] | ||||
| 			if !ok { | ||||
| 				return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) | ||||
| 			} | ||||
| 
 | ||||
| 			secinfos[j].Offset = offset | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| type marshalOpts struct { | ||||
| 	ByteOrder        binary.ByteOrder | ||||
| 	StripFuncLinkage bool | ||||
| } | ||||
| 
 | ||||
| func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { | ||||
| 	var ( | ||||
| 		buf       bytes.Buffer | ||||
| 		header    = new(btfHeader) | ||||
| 		headerLen = binary.Size(header) | ||||
| 	) | ||||
| 
 | ||||
| 	// Reserve space for the header. We have to write it last since | ||||
| 	// we don't know the size of the type section yet. | ||||
| 	_, _ = buf.Write(make([]byte, headerLen)) | ||||
| 
 | ||||
| 	// Write type section, just after the header. | ||||
| 	for _, raw := range s.rawTypes { | ||||
| 		switch { | ||||
| 		case opts.StripFuncLinkage && raw.Kind() == kindFunc: | ||||
| 			raw.SetLinkage(linkageStatic) | ||||
| 		} | ||||
| 
 | ||||
| 		if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { | ||||
| 			return nil, fmt.Errorf("can't marshal BTF: %w", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	typeLen := uint32(buf.Len() - headerLen) | ||||
| 
 | ||||
| 	// Write string section after type section. | ||||
| 	_, _ = buf.Write(s.strings) | ||||
| 
 | ||||
| 	// Fill out the header, and write it out. | ||||
| 	header = &btfHeader{ | ||||
| 		Magic:     btfMagic, | ||||
| 		Version:   1, | ||||
| 		Flags:     0, | ||||
| 		HdrLen:    uint32(headerLen), | ||||
| 		TypeOff:   0, | ||||
| 		TypeLen:   typeLen, | ||||
| 		StringOff: typeLen, | ||||
| 		StringLen: uint32(len(s.strings)), | ||||
| 	} | ||||
| 
 | ||||
| 	raw := buf.Bytes() | ||||
| 	err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't write header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return raw, nil | ||||
| } | ||||
| 
 | ||||
| type sliceWriter []byte | ||||
| 
 | ||||
| func (sw sliceWriter) Write(p []byte) (int, error) { | ||||
| 	if len(p) != len(sw) { | ||||
| 		return 0, errors.New("size doesn't match") | ||||
| 	} | ||||
| 
 | ||||
| 	return copy(sw, p), nil | ||||
| } | ||||
| 
 | ||||
| // Program finds the BTF for a specific section. | ||||
| // | ||||
| // Length is the number of bytes in the raw BPF instruction stream. | ||||
| // | ||||
| // Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't | ||||
| // contain extended BTF info. | ||||
| func (s *Spec) Program(name string, length uint64) (*Program, error) { | ||||
| 	if length == 0 { | ||||
| 		return nil, errors.New("length musn't be zero") | ||||
| 	} | ||||
| 
 | ||||
| 	if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil { | ||||
| 		return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo) | ||||
| 	} | ||||
| 
 | ||||
| 	funcInfos, funcOK := s.funcInfos[name] | ||||
| 	lineInfos, lineOK := s.lineInfos[name] | ||||
| 	coreRelos, coreOK := s.coreRelos[name] | ||||
| 
 | ||||
| 	if !funcOK && !lineOK && !coreOK { | ||||
| 		return nil, fmt.Errorf("no extended BTF info for section %s", name) | ||||
| 	} | ||||
| 
 | ||||
| 	return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil | ||||
| } | ||||
| 
 | ||||
| // Datasec returns the BTF required to create maps which represent data sections. | ||||
| func (s *Spec) Datasec(name string) (*Map, error) { | ||||
| 	var datasec Datasec | ||||
| 	if err := s.FindType(name, &datasec); err != nil { | ||||
| 		return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err) | ||||
| 	} | ||||
| 
 | ||||
| 	m := NewMap(s, &Void{}, &datasec) | ||||
| 	return &m, nil | ||||
| } | ||||
| 
 | ||||
| // FindType searches for a type with a specific name. | ||||
| // | ||||
| // hint determines the type of the returned Type. | ||||
| // | ||||
| // Returns an error wrapping ErrNotFound if no matching | ||||
| // type exists in spec. | ||||
| func (s *Spec) FindType(name string, typ Type) error { | ||||
| 	var ( | ||||
| 		wanted    = reflect.TypeOf(typ) | ||||
| 		candidate Type | ||||
| 	) | ||||
| 
 | ||||
| 	for _, typ := range s.namedTypes[essentialName(name)] { | ||||
| 		if reflect.TypeOf(typ) != wanted { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Match against the full name, not just the essential one. | ||||
| 		if typ.name() != name { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if candidate != nil { | ||||
| 			return fmt.Errorf("type %s: multiple candidates for %T", name, typ) | ||||
| 		} | ||||
| 
 | ||||
| 		candidate = typ | ||||
| 	} | ||||
| 
 | ||||
| 	if candidate == nil { | ||||
| 		return fmt.Errorf("type %s: %w", name, ErrNotFound) | ||||
| 	} | ||||
| 
 | ||||
| 	value := reflect.Indirect(reflect.ValueOf(copyType(candidate))) | ||||
| 	reflect.Indirect(reflect.ValueOf(typ)).Set(value) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Handle is a reference to BTF loaded into the kernel. | ||||
| type Handle struct { | ||||
| 	fd *internal.FD | ||||
| } | ||||
| 
 | ||||
| // NewHandle loads BTF into the kernel. | ||||
| // | ||||
| // Returns ErrNotSupported if BTF is not supported. | ||||
| func NewHandle(spec *Spec) (*Handle, error) { | ||||
| 	if err := haveBTF(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if spec.byteOrder != internal.NativeEndian { | ||||
| 		return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) | ||||
| 	} | ||||
| 
 | ||||
| 	btf, err := spec.marshal(marshalOpts{ | ||||
| 		ByteOrder:        internal.NativeEndian, | ||||
| 		StripFuncLinkage: haveFuncLinkage() != nil, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't marshal BTF: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if uint64(len(btf)) > math.MaxUint32 { | ||||
| 		return nil, errors.New("BTF exceeds the maximum size") | ||||
| 	} | ||||
| 
 | ||||
| 	attr := &bpfLoadBTFAttr{ | ||||
| 		btf:     internal.NewSlicePointer(btf), | ||||
| 		btfSize: uint32(len(btf)), | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := bpfLoadBTF(attr) | ||||
| 	if err != nil { | ||||
| 		logBuf := make([]byte, 64*1024) | ||||
| 		attr.logBuf = internal.NewSlicePointer(logBuf) | ||||
| 		attr.btfLogSize = uint32(len(logBuf)) | ||||
| 		attr.btfLogLevel = 1 | ||||
| 		_, logErr := bpfLoadBTF(attr) | ||||
| 		return nil, internal.ErrorWithLog(err, logBuf, logErr) | ||||
| 	} | ||||
| 
 | ||||
| 	return &Handle{fd}, nil | ||||
| } | ||||
| 
 | ||||
| // Close destroys the handle. | ||||
| // | ||||
| // Subsequent calls to FD will return an invalid value. | ||||
| func (h *Handle) Close() error { | ||||
| 	return h.fd.Close() | ||||
| } | ||||
| 
 | ||||
| // FD returns the file descriptor for the handle. | ||||
| func (h *Handle) FD() int { | ||||
| 	value, err := h.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return -1 | ||||
| 	} | ||||
| 
 | ||||
| 	return int(value) | ||||
| } | ||||
| 
 | ||||
| // Map is the BTF for a map. | ||||
| type Map struct { | ||||
| 	spec       *Spec | ||||
| 	key, value Type | ||||
| } | ||||
| 
 | ||||
| // NewMap returns a new Map containing the given values. | ||||
| // The key and value arguments are initialized to Void if nil values are given. | ||||
| func NewMap(spec *Spec, key Type, value Type) Map { | ||||
| 	if key == nil { | ||||
| 		key = &Void{} | ||||
| 	} | ||||
| 	if value == nil { | ||||
| 		value = &Void{} | ||||
| 	} | ||||
| 
 | ||||
| 	return Map{ | ||||
| 		spec:  spec, | ||||
| 		key:   key, | ||||
| 		value: value, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // MapSpec should be a method on Map, but is a free function | ||||
| // to hide it from users of the ebpf package. | ||||
| func MapSpec(m *Map) *Spec { | ||||
| 	return m.spec | ||||
| } | ||||
| 
 | ||||
| // MapKey should be a method on Map, but is a free function | ||||
| // to hide it from users of the ebpf package. | ||||
| func MapKey(m *Map) Type { | ||||
| 	return m.key | ||||
| } | ||||
| 
 | ||||
| // MapValue should be a method on Map, but is a free function | ||||
| // to hide it from users of the ebpf package. | ||||
| func MapValue(m *Map) Type { | ||||
| 	return m.value | ||||
| } | ||||
| 
 | ||||
| // Program is the BTF information for a stream of instructions. | ||||
| type Program struct { | ||||
| 	spec                 *Spec | ||||
| 	length               uint64 | ||||
| 	funcInfos, lineInfos extInfo | ||||
| 	coreRelos            bpfCoreRelos | ||||
| } | ||||
| 
 | ||||
| // ProgramSpec returns the Spec needed for loading function and line infos into the kernel. | ||||
| // | ||||
| // This is a free function instead of a method to hide it from users | ||||
| // of package ebpf. | ||||
| func ProgramSpec(s *Program) *Spec { | ||||
| 	return s.spec | ||||
| } | ||||
| 
 | ||||
| // ProgramAppend the information from other to the Program. | ||||
| // | ||||
| // This is a free function instead of a method to hide it from users | ||||
| // of package ebpf. | ||||
| func ProgramAppend(s, other *Program) error { | ||||
| 	funcInfos, err := s.funcInfos.append(other.funcInfos, s.length) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("func infos: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	lineInfos, err := s.lineInfos.append(other.lineInfos, s.length) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("line infos: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	s.funcInfos = funcInfos | ||||
| 	s.lineInfos = lineInfos | ||||
| 	s.coreRelos = s.coreRelos.append(other.coreRelos, s.length) | ||||
| 	s.length += other.length | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // ProgramFuncInfos returns the binary form of BTF function infos. | ||||
| // | ||||
| // This is a free function instead of a method to hide it from users | ||||
| // of package ebpf. | ||||
| func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) { | ||||
| 	bytes, err = s.funcInfos.MarshalBinary() | ||||
| 	if err != nil { | ||||
| 		return 0, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return s.funcInfos.recordSize, bytes, nil | ||||
| } | ||||
| 
 | ||||
| // ProgramLineInfos returns the binary form of BTF line infos. | ||||
| // | ||||
| // This is a free function instead of a method to hide it from users | ||||
| // of package ebpf. | ||||
| func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) { | ||||
| 	bytes, err = s.lineInfos.MarshalBinary() | ||||
| 	if err != nil { | ||||
| 		return 0, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return s.lineInfos.recordSize, bytes, nil | ||||
| } | ||||
| 
 | ||||
| // ProgramRelocations returns the CO-RE relocations required to adjust the | ||||
| // program to the target. | ||||
| // | ||||
| // This is a free function instead of a method to hide it from users | ||||
| // of package ebpf. | ||||
| func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) { | ||||
| 	if len(s.coreRelos) == 0 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	return coreRelocate(s.spec, target, s.coreRelos) | ||||
| } | ||||
| 
 | ||||
| type bpfLoadBTFAttr struct { | ||||
| 	btf         internal.Pointer | ||||
| 	logBuf      internal.Pointer | ||||
| 	btfSize     uint32 | ||||
| 	btfLogSize  uint32 | ||||
| 	btfLogLevel uint32 | ||||
| } | ||||
| 
 | ||||
| func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) { | ||||
| 	fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return internal.NewFD(uint32(fd)), nil | ||||
| } | ||||
| 
 | ||||
| func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { | ||||
| 	const minHeaderLength = 24 | ||||
| 
 | ||||
| 	typesLen := uint32(binary.Size(types)) | ||||
| 	header := btfHeader{ | ||||
| 		Magic:     btfMagic, | ||||
| 		Version:   1, | ||||
| 		HdrLen:    minHeaderLength, | ||||
| 		TypeOff:   0, | ||||
| 		TypeLen:   typesLen, | ||||
| 		StringOff: typesLen, | ||||
| 		StringLen: uint32(len(strings)), | ||||
| 	} | ||||
| 
 | ||||
| 	buf := new(bytes.Buffer) | ||||
| 	_ = binary.Write(buf, bo, &header) | ||||
| 	_ = binary.Write(buf, bo, types) | ||||
| 	buf.Write(strings) | ||||
| 
 | ||||
| 	return buf.Bytes() | ||||
| } | ||||
| 
 | ||||
| var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { | ||||
| 	var ( | ||||
| 		types struct { | ||||
| 			Integer btfType | ||||
| 			Var     btfType | ||||
| 			btfVar  struct{ Linkage uint32 } | ||||
| 		} | ||||
| 		strings = []byte{0, 'a', 0} | ||||
| 	) | ||||
| 
 | ||||
| 	// We use a BTF_KIND_VAR here, to make sure that | ||||
| 	// the kernel understands BTF at least as well as we | ||||
| 	// do. BTF_KIND_VAR was introduced ~5.1. | ||||
| 	types.Integer.SetKind(kindPointer) | ||||
| 	types.Var.NameOff = 1 | ||||
| 	types.Var.SetKind(kindVar) | ||||
| 	types.Var.SizeType = 1 | ||||
| 
 | ||||
| 	btf := marshalBTF(&types, strings, internal.NativeEndian) | ||||
| 
 | ||||
| 	fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ | ||||
| 		btf:     internal.NewSlicePointer(btf), | ||||
| 		btfSize: uint32(len(btf)), | ||||
| 	}) | ||||
| 	if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { | ||||
| 		// Treat both EINVAL and EPERM as not supported: loading the program | ||||
| 		// might still succeed without BTF. | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	fd.Close() | ||||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { | ||||
| 	if err := haveBTF(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	var ( | ||||
| 		types struct { | ||||
| 			FuncProto btfType | ||||
| 			Func      btfType | ||||
| 		} | ||||
| 		strings = []byte{0, 'a', 0} | ||||
| 	) | ||||
| 
 | ||||
| 	types.FuncProto.SetKind(kindFuncProto) | ||||
| 	types.Func.SetKind(kindFunc) | ||||
| 	types.Func.SizeType = 1 // aka FuncProto | ||||
| 	types.Func.NameOff = 1 | ||||
| 	types.Func.SetLinkage(linkageGlobal) | ||||
| 
 | ||||
| 	btf := marshalBTF(&types, strings, internal.NativeEndian) | ||||
| 
 | ||||
| 	fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ | ||||
| 		btf:     internal.NewSlicePointer(btf), | ||||
| 		btfSize: uint32(len(btf)), | ||||
| 	}) | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	fd.Close() | ||||
| 	return nil | ||||
| }) | ||||
							
								
								
									
										388
									
								
								vendor/github.com/cilium/ebpf/internal/btf/core.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										388
									
								
								vendor/github.com/cilium/ebpf/internal/btf/core.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,388 +0,0 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // Code in this file is derived from libbpf, which is available under a BSD | ||||
| // 2-Clause license. | ||||
| 
 | ||||
| // Relocation describes a CO-RE relocation. | ||||
| type Relocation struct { | ||||
| 	Current uint32 | ||||
| 	New     uint32 | ||||
| } | ||||
| 
 | ||||
| func (r Relocation) equal(other Relocation) bool { | ||||
| 	return r.Current == other.Current && r.New == other.New | ||||
| } | ||||
| 
 | ||||
| // coreReloKind is the type of CO-RE relocation | ||||
| type coreReloKind uint32 | ||||
| 
 | ||||
| const ( | ||||
| 	reloFieldByteOffset coreReloKind = iota /* field byte offset */ | ||||
| 	reloFieldByteSize                       /* field size in bytes */ | ||||
| 	reloFieldExists                         /* field existence in target kernel */ | ||||
| 	reloFieldSigned                         /* field signedness (0 - unsigned, 1 - signed) */ | ||||
| 	reloFieldLShiftU64                      /* bitfield-specific left bitshift */ | ||||
| 	reloFieldRShiftU64                      /* bitfield-specific right bitshift */ | ||||
| 	reloTypeIDLocal                         /* type ID in local BPF object */ | ||||
| 	reloTypeIDTarget                        /* type ID in target kernel */ | ||||
| 	reloTypeExists                          /* type existence in target kernel */ | ||||
| 	reloTypeSize                            /* type size in bytes */ | ||||
| 	reloEnumvalExists                       /* enum value existence in target kernel */ | ||||
| 	reloEnumvalValue                        /* enum value integer value */ | ||||
| ) | ||||
| 
 | ||||
| func (k coreReloKind) String() string { | ||||
| 	switch k { | ||||
| 	case reloFieldByteOffset: | ||||
| 		return "byte_off" | ||||
| 	case reloFieldByteSize: | ||||
| 		return "byte_sz" | ||||
| 	case reloFieldExists: | ||||
| 		return "field_exists" | ||||
| 	case reloFieldSigned: | ||||
| 		return "signed" | ||||
| 	case reloFieldLShiftU64: | ||||
| 		return "lshift_u64" | ||||
| 	case reloFieldRShiftU64: | ||||
| 		return "rshift_u64" | ||||
| 	case reloTypeIDLocal: | ||||
| 		return "local_type_id" | ||||
| 	case reloTypeIDTarget: | ||||
| 		return "target_type_id" | ||||
| 	case reloTypeExists: | ||||
| 		return "type_exists" | ||||
| 	case reloTypeSize: | ||||
| 		return "type_size" | ||||
| 	case reloEnumvalExists: | ||||
| 		return "enumval_exists" | ||||
| 	case reloEnumvalValue: | ||||
| 		return "enumval_value" | ||||
| 	default: | ||||
| 		return "unknown" | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) { | ||||
| 	if target == nil { | ||||
| 		var err error | ||||
| 		target, err = loadKernelSpec() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if local.byteOrder != target.byteOrder { | ||||
| 		return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) | ||||
| 	} | ||||
| 
 | ||||
| 	relocations := make(map[uint64]Relocation, len(coreRelos)) | ||||
| 	for _, relo := range coreRelos { | ||||
| 		accessorStr, err := local.strings.Lookup(relo.AccessStrOff) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		accessor, err := parseCoreAccessor(accessorStr) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) | ||||
| 		} | ||||
| 
 | ||||
| 		if int(relo.TypeID) >= len(local.types) { | ||||
| 			return nil, fmt.Errorf("invalid type id %d", relo.TypeID) | ||||
| 		} | ||||
| 
 | ||||
| 		typ := local.types[relo.TypeID] | ||||
| 
 | ||||
| 		if relo.ReloKind == reloTypeIDLocal { | ||||
| 			relocations[uint64(relo.InsnOff)] = Relocation{ | ||||
| 				uint32(typ.ID()), | ||||
| 				uint32(typ.ID()), | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		named, ok := typ.(namedType) | ||||
| 		if !ok || named.name() == "" { | ||||
| 			return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported) | ||||
| 		} | ||||
| 
 | ||||
| 		name := essentialName(named.name()) | ||||
| 		res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("relocate %s: %w", name, err) | ||||
| 		} | ||||
| 
 | ||||
| 		relocations[uint64(relo.InsnOff)] = res | ||||
| 	} | ||||
| 
 | ||||
| 	return relocations, nil | ||||
| } | ||||
| 
 | ||||
| var errAmbiguousRelocation = errors.New("ambiguous relocation") | ||||
| 
 | ||||
| func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) { | ||||
| 	var relos []Relocation | ||||
| 	var matches []Type | ||||
| 	for _, target := range targets { | ||||
| 		switch kind { | ||||
| 		case reloTypeIDTarget: | ||||
| 			if localAccessor[0] != 0 { | ||||
| 				return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind) | ||||
| 			} | ||||
| 
 | ||||
| 			if compat, err := coreAreTypesCompatible(local, target); err != nil { | ||||
| 				return Relocation{}, fmt.Errorf("%s: %s", kind, err) | ||||
| 			} else if !compat { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())}) | ||||
| 
 | ||||
| 		default: | ||||
| 			return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported) | ||||
| 		} | ||||
| 		matches = append(matches, target) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(relos) == 0 { | ||||
| 		// TODO: Add switch for existence checks like reloEnumvalExists here. | ||||
| 
 | ||||
| 		// TODO: This might have to be poisoned. | ||||
| 		return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets) | ||||
| 	} | ||||
| 
 | ||||
| 	relo := relos[0] | ||||
| 	for _, altRelo := range relos[1:] { | ||||
| 		if !altRelo.equal(relo) { | ||||
| 			return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return relo, nil | ||||
| } | ||||
| 
 | ||||
| /* coreAccessor contains a path through a struct. It contains at least one index. | ||||
|  * | ||||
|  * The interpretation depends on the kind of the relocation. The following is | ||||
|  * taken from struct bpf_core_relo in libbpf_internal.h: | ||||
|  * | ||||
|  * - for field-based relocations, string encodes an accessed field using | ||||
|  *   a sequence of field and array indices, separated by colon (:). It's | ||||
|  *   conceptually very close to LLVM's getelementptr ([0]) instruction's | ||||
|  *   arguments for identifying offset to a field. | ||||
|  * - for type-based relocations, strings is expected to be just "0"; | ||||
|  * - for enum value-based relocations, string contains an index of enum | ||||
|  *   value within its enum type; | ||||
|  * | ||||
|  * Example to provide a better feel. | ||||
|  * | ||||
|  *   struct sample { | ||||
|  *       int a; | ||||
|  *       struct { | ||||
|  *           int b[10]; | ||||
|  *       }; | ||||
|  *   }; | ||||
|  * | ||||
|  *   struct sample s = ...; | ||||
|  *   int x = &s->a;     // encoded as "0:0" (a is field #0) | ||||
|  *   int y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1, | ||||
|  *                      // b is field #0 inside anon struct, accessing elem #5) | ||||
|  *   int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) | ||||
|  */ | ||||
| type coreAccessor []int | ||||
| 
 | ||||
| func parseCoreAccessor(accessor string) (coreAccessor, error) { | ||||
| 	if accessor == "" { | ||||
| 		return nil, fmt.Errorf("empty accessor") | ||||
| 	} | ||||
| 
 | ||||
| 	var result coreAccessor | ||||
| 	parts := strings.Split(accessor, ":") | ||||
| 	for _, part := range parts { | ||||
| 		// 31 bits to avoid overflowing int on 32 bit platforms. | ||||
| 		index, err := strconv.ParseUint(part, 10, 31) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("accessor index %q: %s", part, err) | ||||
| 		} | ||||
| 
 | ||||
| 		result = append(result, int(index)) | ||||
| 	} | ||||
| 
 | ||||
| 	return result, nil | ||||
| } | ||||
| 
 | ||||
| /* The comment below is from bpf_core_types_are_compat in libbpf.c: | ||||
|  * | ||||
|  * Check local and target types for compatibility. This check is used for | ||||
|  * type-based CO-RE relocations and follow slightly different rules than | ||||
|  * field-based relocations. This function assumes that root types were already | ||||
|  * checked for name match. Beyond that initial root-level name check, names | ||||
|  * are completely ignored. Compatibility rules are as follows: | ||||
|  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but | ||||
|  *     kind should match for local and target types (i.e., STRUCT is not | ||||
|  *     compatible with UNION); | ||||
|  *   - for ENUMs, the size is ignored; | ||||
|  *   - for INT, size and signedness are ignored; | ||||
|  *   - for ARRAY, dimensionality is ignored, element types are checked for | ||||
|  *     compatibility recursively; | ||||
|  *   - CONST/VOLATILE/RESTRICT modifiers are ignored; | ||||
|  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; | ||||
|  *   - FUNC_PROTOs are compatible if they have compatible signature: same | ||||
|  *     number of input args and compatible return and argument types. | ||||
|  * These rules are not set in stone and probably will be adjusted as we get | ||||
|  * more experience with using BPF CO-RE relocations. | ||||
|  */ | ||||
| func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) { | ||||
| 	var ( | ||||
| 		localTs, targetTs typeDeque | ||||
| 		l, t              = &localType, &targetType | ||||
| 		depth             = 0 | ||||
| 	) | ||||
| 
 | ||||
| 	for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() { | ||||
| 		if depth >= maxTypeDepth { | ||||
| 			return false, errors.New("types are nested too deep") | ||||
| 		} | ||||
| 
 | ||||
| 		localType = skipQualifierAndTypedef(*l) | ||||
| 		targetType = skipQualifierAndTypedef(*t) | ||||
| 
 | ||||
| 		if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { | ||||
| 			return false, nil | ||||
| 		} | ||||
| 
 | ||||
| 		switch lv := (localType).(type) { | ||||
| 		case *Void, *Struct, *Union, *Enum, *Fwd: | ||||
| 			// Nothing to do here | ||||
| 
 | ||||
| 		case *Int: | ||||
| 			tv := targetType.(*Int) | ||||
| 			if lv.isBitfield() || tv.isBitfield() { | ||||
| 				return false, nil | ||||
| 			} | ||||
| 
 | ||||
| 		case *Pointer, *Array: | ||||
| 			depth++ | ||||
| 			localType.walk(&localTs) | ||||
| 			targetType.walk(&targetTs) | ||||
| 
 | ||||
| 		case *FuncProto: | ||||
| 			tv := targetType.(*FuncProto) | ||||
| 			if len(lv.Params) != len(tv.Params) { | ||||
| 				return false, nil | ||||
| 			} | ||||
| 
 | ||||
| 			depth++ | ||||
| 			localType.walk(&localTs) | ||||
| 			targetType.walk(&targetTs) | ||||
| 
 | ||||
| 		default: | ||||
| 			return false, fmt.Errorf("unsupported type %T", localType) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if l != nil { | ||||
| 		return false, fmt.Errorf("dangling local type %T", *l) | ||||
| 	} | ||||
| 
 | ||||
| 	if t != nil { | ||||
| 		return false, fmt.Errorf("dangling target type %T", *t) | ||||
| 	} | ||||
| 
 | ||||
| 	return true, nil | ||||
| } | ||||
| 
 | ||||
| /* The comment below is from bpf_core_fields_are_compat in libbpf.c: | ||||
|  * | ||||
|  * Check two types for compatibility for the purpose of field access | ||||
|  * relocation. const/volatile/restrict and typedefs are skipped to ensure we | ||||
|  * are relocating semantically compatible entities: | ||||
|  *   - any two STRUCTs/UNIONs are compatible and can be mixed; | ||||
|  *   - any two FWDs are compatible, if their names match (modulo flavor suffix); | ||||
|  *   - any two PTRs are always compatible; | ||||
|  *   - for ENUMs, names should be the same (ignoring flavor suffix) or at | ||||
|  *     least one of enums should be anonymous; | ||||
|  *   - for ENUMs, check sizes, names are ignored; | ||||
|  *   - for INT, size and signedness are ignored; | ||||
|  *   - for ARRAY, dimensionality is ignored, element types are checked for | ||||
|  *     compatibility recursively; | ||||
|  *   - everything else shouldn't be ever a target of relocation. | ||||
|  * These rules are not set in stone and probably will be adjusted as we get | ||||
|  * more experience with using BPF CO-RE relocations. | ||||
|  */ | ||||
| func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) { | ||||
| 	doNamesMatch := func(a, b string) bool { | ||||
| 		if a == "" || b == "" { | ||||
| 			// allow anonymous and named type to match | ||||
| 			return true | ||||
| 		} | ||||
| 
 | ||||
| 		return essentialName(a) == essentialName(b) | ||||
| 	} | ||||
| 
 | ||||
| 	for depth := 0; depth <= maxTypeDepth; depth++ { | ||||
| 		localType = skipQualifierAndTypedef(localType) | ||||
| 		targetType = skipQualifierAndTypedef(targetType) | ||||
| 
 | ||||
| 		_, lok := localType.(composite) | ||||
| 		_, tok := targetType.(composite) | ||||
| 		if lok && tok { | ||||
| 			return true, nil | ||||
| 		} | ||||
| 
 | ||||
| 		if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { | ||||
| 			return false, nil | ||||
| 		} | ||||
| 
 | ||||
| 		switch lv := localType.(type) { | ||||
| 		case *Pointer: | ||||
| 			return true, nil | ||||
| 
 | ||||
| 		case *Enum: | ||||
| 			tv := targetType.(*Enum) | ||||
| 			return doNamesMatch(lv.name(), tv.name()), nil | ||||
| 
 | ||||
| 		case *Fwd: | ||||
| 			tv := targetType.(*Fwd) | ||||
| 			return doNamesMatch(lv.name(), tv.name()), nil | ||||
| 
 | ||||
| 		case *Int: | ||||
| 			tv := targetType.(*Int) | ||||
| 			return !lv.isBitfield() && !tv.isBitfield(), nil | ||||
| 
 | ||||
| 		case *Array: | ||||
| 			tv := targetType.(*Array) | ||||
| 
 | ||||
| 			localType = lv.Type | ||||
| 			targetType = tv.Type | ||||
| 
 | ||||
| 		default: | ||||
| 			return false, fmt.Errorf("unsupported type %T", localType) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return false, errors.New("types are nested too deep") | ||||
| } | ||||
| 
 | ||||
| func skipQualifierAndTypedef(typ Type) Type { | ||||
| 	result := typ | ||||
| 	for depth := 0; depth <= maxTypeDepth; depth++ { | ||||
| 		switch v := (result).(type) { | ||||
| 		case qualifier: | ||||
| 			result = v.qualify() | ||||
| 		case *Typedef: | ||||
| 			result = v.Type | ||||
| 		default: | ||||
| 			return result | ||||
| 		} | ||||
| 	} | ||||
| 	return typ | ||||
| } | ||||
							
								
								
									
										281
									
								
								vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										281
									
								
								vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,281 +0,0 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| ) | ||||
| 
 | ||||
| type btfExtHeader struct { | ||||
| 	Magic   uint16 | ||||
| 	Version uint8 | ||||
| 	Flags   uint8 | ||||
| 	HdrLen  uint32 | ||||
| 
 | ||||
| 	FuncInfoOff uint32 | ||||
| 	FuncInfoLen uint32 | ||||
| 	LineInfoOff uint32 | ||||
| 	LineInfoLen uint32 | ||||
| } | ||||
| 
 | ||||
| type btfExtCoreHeader struct { | ||||
| 	CoreReloOff uint32 | ||||
| 	CoreReloLen uint32 | ||||
| } | ||||
| 
 | ||||
| func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) { | ||||
| 	var header btfExtHeader | ||||
| 	var coreHeader btfExtCoreHeader | ||||
| 	if err := binary.Read(r, bo, &header); err != nil { | ||||
| 		return nil, nil, nil, fmt.Errorf("can't read header: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Magic != btfMagic { | ||||
| 		return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Version != 1 { | ||||
| 		return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version) | ||||
| 	} | ||||
| 
 | ||||
| 	if header.Flags != 0 { | ||||
| 		return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) | ||||
| 	} | ||||
| 
 | ||||
| 	remainder := int64(header.HdrLen) - int64(binary.Size(&header)) | ||||
| 	if remainder < 0 { | ||||
| 		return nil, nil, nil, errors.New("header is too short") | ||||
| 	} | ||||
| 
 | ||||
| 	coreHdrSize := int64(binary.Size(&coreHeader)) | ||||
| 	if remainder >= coreHdrSize { | ||||
| 		if err := binary.Read(r, bo, &coreHeader); err != nil { | ||||
| 			return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err) | ||||
| 		} | ||||
| 		remainder -= coreHdrSize | ||||
| 	} | ||||
| 
 | ||||
| 	// Of course, the .BTF.ext header has different semantics than the | ||||
| 	// .BTF ext header. We need to ignore non-null values. | ||||
| 	_, err = io.CopyN(ioutil.Discard, r, remainder) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, nil, fmt.Errorf("header padding: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil { | ||||
| 		return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen))) | ||||
| 	funcInfo, err = parseExtInfo(buf, bo, strings) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, nil, fmt.Errorf("function info: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil { | ||||
| 		return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen))) | ||||
| 	lineInfo, err = parseExtInfo(buf, bo, strings) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, nil, fmt.Errorf("line info: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { | ||||
| 		if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil { | ||||
| 			return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err) | ||||
| 		} | ||||
| 
 | ||||
| 		coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings) | ||||
| 		if err != nil { | ||||
| 			return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return funcInfo, lineInfo, coreRelos, nil | ||||
| } | ||||
| 
 | ||||
| type btfExtInfoSec struct { | ||||
| 	SecNameOff uint32 | ||||
| 	NumInfo    uint32 | ||||
| } | ||||
| 
 | ||||
| type extInfoRecord struct { | ||||
| 	InsnOff uint64 | ||||
| 	Opaque  []byte | ||||
| } | ||||
| 
 | ||||
| type extInfo struct { | ||||
| 	recordSize uint32 | ||||
| 	records    []extInfoRecord | ||||
| } | ||||
| 
 | ||||
| func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { | ||||
| 	if other.recordSize != ei.recordSize { | ||||
| 		return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize) | ||||
| 	} | ||||
| 
 | ||||
| 	records := make([]extInfoRecord, 0, len(ei.records)+len(other.records)) | ||||
| 	records = append(records, ei.records...) | ||||
| 	for _, info := range other.records { | ||||
| 		records = append(records, extInfoRecord{ | ||||
| 			InsnOff: info.InsnOff + offset, | ||||
| 			Opaque:  info.Opaque, | ||||
| 		}) | ||||
| 	} | ||||
| 	return extInfo{ei.recordSize, records}, nil | ||||
| } | ||||
| 
 | ||||
| func (ei extInfo) MarshalBinary() ([]byte, error) { | ||||
| 	if len(ei.records) == 0 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records))) | ||||
| 	for _, info := range ei.records { | ||||
| 		// The kernel expects offsets in number of raw bpf instructions, | ||||
| 		// while the ELF tracks it in bytes. | ||||
| 		insnOff := uint32(info.InsnOff / asm.InstructionSize) | ||||
| 		if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil { | ||||
| 			return nil, fmt.Errorf("can't write instruction offset: %v", err) | ||||
| 		} | ||||
| 
 | ||||
| 		buf.Write(info.Opaque) | ||||
| 	} | ||||
| 
 | ||||
| 	return buf.Bytes(), nil | ||||
| } | ||||
| 
 | ||||
| func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) { | ||||
| 	const maxRecordSize = 256 | ||||
| 
 | ||||
| 	var recordSize uint32 | ||||
| 	if err := binary.Read(r, bo, &recordSize); err != nil { | ||||
| 		return nil, fmt.Errorf("can't read record size: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if recordSize < 4 { | ||||
| 		// Need at least insnOff | ||||
| 		return nil, errors.New("record size too short") | ||||
| 	} | ||||
| 	if recordSize > maxRecordSize { | ||||
| 		return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) | ||||
| 	} | ||||
| 
 | ||||
| 	result := make(map[string]extInfo) | ||||
| 	for { | ||||
| 		secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) | ||||
| 		if errors.Is(err, io.EOF) { | ||||
| 			return result, nil | ||||
| 		} | ||||
| 
 | ||||
| 		var records []extInfoRecord | ||||
| 		for i := uint32(0); i < infoHeader.NumInfo; i++ { | ||||
| 			var byteOff uint32 | ||||
| 			if err := binary.Read(r, bo, &byteOff); err != nil { | ||||
| 				return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err) | ||||
| 			} | ||||
| 
 | ||||
| 			buf := make([]byte, int(recordSize-4)) | ||||
| 			if _, err := io.ReadFull(r, buf); err != nil { | ||||
| 				return nil, fmt.Errorf("section %v: can't read record: %v", secName, err) | ||||
| 			} | ||||
| 
 | ||||
| 			if byteOff%asm.InstructionSize != 0 { | ||||
| 				return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff) | ||||
| 			} | ||||
| 
 | ||||
| 			records = append(records, extInfoRecord{uint64(byteOff), buf}) | ||||
| 		} | ||||
| 
 | ||||
| 		result[secName] = extInfo{ | ||||
| 			recordSize, | ||||
| 			records, | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // bpfCoreRelo matches `struct bpf_core_relo` from the kernel | ||||
| type bpfCoreRelo struct { | ||||
| 	InsnOff      uint32 | ||||
| 	TypeID       TypeID | ||||
| 	AccessStrOff uint32 | ||||
| 	ReloKind     coreReloKind | ||||
| } | ||||
| 
 | ||||
| type bpfCoreRelos []bpfCoreRelo | ||||
| 
 | ||||
| // append two slices of extInfoRelo to each other. The InsnOff of b are adjusted | ||||
| // by offset. | ||||
| func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos { | ||||
| 	result := make([]bpfCoreRelo, 0, len(r)+len(other)) | ||||
| 	result = append(result, r...) | ||||
| 	for _, relo := range other { | ||||
| 		relo.InsnOff += uint32(offset) | ||||
| 		result = append(result, relo) | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
| 
 | ||||
| var extInfoReloSize = binary.Size(bpfCoreRelo{}) | ||||
| 
 | ||||
| func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) { | ||||
| 	var recordSize uint32 | ||||
| 	if err := binary.Read(r, bo, &recordSize); err != nil { | ||||
| 		return nil, fmt.Errorf("read record size: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if recordSize != uint32(extInfoReloSize) { | ||||
| 		return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) | ||||
| 	} | ||||
| 
 | ||||
| 	result := make(map[string]bpfCoreRelos) | ||||
| 	for { | ||||
| 		secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) | ||||
| 		if errors.Is(err, io.EOF) { | ||||
| 			return result, nil | ||||
| 		} | ||||
| 
 | ||||
| 		var relos []bpfCoreRelo | ||||
| 		for i := uint32(0); i < infoHeader.NumInfo; i++ { | ||||
| 			var relo bpfCoreRelo | ||||
| 			if err := binary.Read(r, bo, &relo); err != nil { | ||||
| 				return nil, fmt.Errorf("section %v: read record: %v", secName, err) | ||||
| 			} | ||||
| 
 | ||||
| 			if relo.InsnOff%asm.InstructionSize != 0 { | ||||
| 				return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff) | ||||
| 			} | ||||
| 
 | ||||
| 			relos = append(relos, relo) | ||||
| 		} | ||||
| 
 | ||||
| 		result[secName] = relos | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { | ||||
| 	var infoHeader btfExtInfoSec | ||||
| 	if err := binary.Read(r, bo, &infoHeader); err != nil { | ||||
| 		return "", nil, fmt.Errorf("read ext info header: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	secName, err := strings.Lookup(infoHeader.SecNameOff) | ||||
| 	if err != nil { | ||||
| 		return "", nil, fmt.Errorf("get section name: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if infoHeader.NumInfo == 0 { | ||||
| 		return "", nil, fmt.Errorf("section %s has zero records", secName) | ||||
| 	} | ||||
| 
 | ||||
| 	return secName, &infoHeader, nil | ||||
| } | ||||
							
								
								
									
										49
									
								
								vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										49
									
								
								vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,49 +0,0 @@ | |||
| // +build gofuzz | ||||
| 
 | ||||
| // Use with https://github.com/dvyukov/go-fuzz | ||||
| 
 | ||||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/binary" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| ) | ||||
| 
 | ||||
| func FuzzSpec(data []byte) int { | ||||
| 	if len(data) < binary.Size(btfHeader{}) { | ||||
| 		return -1 | ||||
| 	} | ||||
| 
 | ||||
| 	spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil) | ||||
| 	if err != nil { | ||||
| 		if spec != nil { | ||||
| 			panic("spec is not nil") | ||||
| 		} | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if spec == nil { | ||||
| 		panic("spec is nil") | ||||
| 	} | ||||
| 	return 1 | ||||
| } | ||||
| 
 | ||||
| func FuzzExtInfo(data []byte) int { | ||||
| 	if len(data) < binary.Size(btfExtHeader{}) { | ||||
| 		return -1 | ||||
| 	} | ||||
| 
 | ||||
| 	table := stringTable("\x00foo\x00barfoo\x00") | ||||
| 	info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table) | ||||
| 	if err != nil { | ||||
| 		if info != nil { | ||||
| 			panic("info is not nil") | ||||
| 		} | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if info == nil { | ||||
| 		panic("info is nil") | ||||
| 	} | ||||
| 	return 1 | ||||
| } | ||||
							
								
								
									
										60
									
								
								vendor/github.com/cilium/ebpf/internal/btf/strings.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										60
									
								
								vendor/github.com/cilium/ebpf/internal/btf/strings.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,60 +0,0 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| ) | ||||
| 
 | ||||
| type stringTable []byte | ||||
| 
 | ||||
| func readStringTable(r io.Reader) (stringTable, error) { | ||||
| 	contents, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't read string table: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(contents) < 1 { | ||||
| 		return nil, errors.New("string table is empty") | ||||
| 	} | ||||
| 
 | ||||
| 	if contents[0] != '\x00' { | ||||
| 		return nil, errors.New("first item in string table is non-empty") | ||||
| 	} | ||||
| 
 | ||||
| 	if contents[len(contents)-1] != '\x00' { | ||||
| 		return nil, errors.New("string table isn't null terminated") | ||||
| 	} | ||||
| 
 | ||||
| 	return stringTable(contents), nil | ||||
| } | ||||
| 
 | ||||
| func (st stringTable) Lookup(offset uint32) (string, error) { | ||||
| 	if int64(offset) > int64(^uint(0)>>1) { | ||||
| 		return "", fmt.Errorf("offset %d overflows int", offset) | ||||
| 	} | ||||
| 
 | ||||
| 	pos := int(offset) | ||||
| 	if pos >= len(st) { | ||||
| 		return "", fmt.Errorf("offset %d is out of bounds", offset) | ||||
| 	} | ||||
| 
 | ||||
| 	if pos > 0 && st[pos-1] != '\x00' { | ||||
| 		return "", fmt.Errorf("offset %d isn't start of a string", offset) | ||||
| 	} | ||||
| 
 | ||||
| 	str := st[pos:] | ||||
| 	end := bytes.IndexByte(str, '\x00') | ||||
| 	if end == -1 { | ||||
| 		return "", fmt.Errorf("offset %d isn't null terminated", offset) | ||||
| 	} | ||||
| 
 | ||||
| 	return string(str[:end]), nil | ||||
| } | ||||
| 
 | ||||
| func (st stringTable) LookupName(offset uint32) (Name, error) { | ||||
| 	str, err := st.Lookup(offset) | ||||
| 	return Name(str), err | ||||
| } | ||||
							
								
								
									
										871
									
								
								vendor/github.com/cilium/ebpf/internal/btf/types.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										871
									
								
								vendor/github.com/cilium/ebpf/internal/btf/types.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,871 +0,0 @@ | |||
| package btf | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| const maxTypeDepth = 32 | ||||
| 
 | ||||
| // TypeID identifies a type in a BTF section. | ||||
| type TypeID uint32 | ||||
| 
 | ||||
| // ID implements part of the Type interface. | ||||
| func (tid TypeID) ID() TypeID { | ||||
| 	return tid | ||||
| } | ||||
| 
 | ||||
| // Type represents a type described by BTF. | ||||
| type Type interface { | ||||
| 	ID() TypeID | ||||
| 
 | ||||
| 	String() string | ||||
| 
 | ||||
| 	// Make a copy of the type, without copying Type members. | ||||
| 	copy() Type | ||||
| 
 | ||||
| 	// Enumerate all nested Types. Repeated calls must visit nested | ||||
| 	// types in the same order. | ||||
| 	walk(*typeDeque) | ||||
| } | ||||
| 
 | ||||
| // namedType is a type with a name. | ||||
| // | ||||
| // Most named types simply embed Name. | ||||
| type namedType interface { | ||||
| 	Type | ||||
| 	name() string | ||||
| } | ||||
| 
 | ||||
| // Name identifies a type. | ||||
| // | ||||
| // Anonymous types have an empty name. | ||||
| type Name string | ||||
| 
 | ||||
| func (n Name) name() string { | ||||
| 	return string(n) | ||||
| } | ||||
| 
 | ||||
| // Void is the unit type of BTF. | ||||
| type Void struct{} | ||||
| 
 | ||||
| func (v *Void) ID() TypeID      { return 0 } | ||||
| func (v *Void) String() string  { return "void#0" } | ||||
| func (v *Void) size() uint32    { return 0 } | ||||
| func (v *Void) copy() Type      { return (*Void)(nil) } | ||||
| func (v *Void) walk(*typeDeque) {} | ||||
| 
 | ||||
| type IntEncoding byte | ||||
| 
 | ||||
| const ( | ||||
| 	Signed IntEncoding = 1 << iota | ||||
| 	Char | ||||
| 	Bool | ||||
| ) | ||||
| 
 | ||||
| // Int is an integer of a given length. | ||||
| type Int struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 
 | ||||
| 	// The size of the integer in bytes. | ||||
| 	Size     uint32 | ||||
| 	Encoding IntEncoding | ||||
| 	// Offset is the starting bit offset. Currently always 0. | ||||
| 	// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int | ||||
| 	Offset uint32 | ||||
| 	Bits   byte | ||||
| } | ||||
| 
 | ||||
| var _ namedType = (*Int)(nil) | ||||
| 
 | ||||
| func (i *Int) String() string { | ||||
| 	var s strings.Builder | ||||
| 
 | ||||
| 	switch { | ||||
| 	case i.Encoding&Char != 0: | ||||
| 		s.WriteString("char") | ||||
| 	case i.Encoding&Bool != 0: | ||||
| 		s.WriteString("bool") | ||||
| 	default: | ||||
| 		if i.Encoding&Signed == 0 { | ||||
| 			s.WriteRune('u') | ||||
| 		} | ||||
| 		s.WriteString("int") | ||||
| 		fmt.Fprintf(&s, "%d", i.Size*8) | ||||
| 	} | ||||
| 
 | ||||
| 	fmt.Fprintf(&s, "#%d", i.TypeID) | ||||
| 
 | ||||
| 	if i.Bits > 0 { | ||||
| 		fmt.Fprintf(&s, "[bits=%d]", i.Bits) | ||||
| 	} | ||||
| 
 | ||||
| 	return s.String() | ||||
| } | ||||
| 
 | ||||
| func (i *Int) size() uint32    { return i.Size } | ||||
| func (i *Int) walk(*typeDeque) {} | ||||
| func (i *Int) copy() Type { | ||||
| 	cpy := *i | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| func (i *Int) isBitfield() bool { | ||||
| 	return i.Offset > 0 | ||||
| } | ||||
| 
 | ||||
| // Pointer is a pointer to another type. | ||||
| type Pointer struct { | ||||
| 	TypeID | ||||
| 	Target Type | ||||
| } | ||||
| 
 | ||||
| func (p *Pointer) String() string { | ||||
| 	return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID()) | ||||
| } | ||||
| 
 | ||||
| func (p *Pointer) size() uint32        { return 8 } | ||||
| func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } | ||||
| func (p *Pointer) copy() Type { | ||||
| 	cpy := *p | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Array is an array with a fixed number of elements. | ||||
| type Array struct { | ||||
| 	TypeID | ||||
| 	Type   Type | ||||
| 	Nelems uint32 | ||||
| } | ||||
| 
 | ||||
| func (arr *Array) String() string { | ||||
| 	return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems) | ||||
| } | ||||
| 
 | ||||
| func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) } | ||||
| func (arr *Array) copy() Type { | ||||
| 	cpy := *arr | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Struct is a compound type of consecutive members. | ||||
| type Struct struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	// The size of the struct including padding, in bytes | ||||
| 	Size    uint32 | ||||
| 	Members []Member | ||||
| } | ||||
| 
 | ||||
| func (s *Struct) String() string { | ||||
| 	return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name) | ||||
| } | ||||
| 
 | ||||
| func (s *Struct) size() uint32 { return s.Size } | ||||
| 
 | ||||
| func (s *Struct) walk(tdq *typeDeque) { | ||||
| 	for i := range s.Members { | ||||
| 		tdq.push(&s.Members[i].Type) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (s *Struct) copy() Type { | ||||
| 	cpy := *s | ||||
| 	cpy.Members = make([]Member, len(s.Members)) | ||||
| 	copy(cpy.Members, s.Members) | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| func (s *Struct) members() []Member { | ||||
| 	return s.Members | ||||
| } | ||||
| 
 | ||||
| // Union is a compound type where members occupy the same memory. | ||||
| type Union struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	// The size of the union including padding, in bytes. | ||||
| 	Size    uint32 | ||||
| 	Members []Member | ||||
| } | ||||
| 
 | ||||
| func (u *Union) String() string { | ||||
| 	return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name) | ||||
| } | ||||
| 
 | ||||
| func (u *Union) size() uint32 { return u.Size } | ||||
| 
 | ||||
| func (u *Union) walk(tdq *typeDeque) { | ||||
| 	for i := range u.Members { | ||||
| 		tdq.push(&u.Members[i].Type) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (u *Union) copy() Type { | ||||
| 	cpy := *u | ||||
| 	cpy.Members = make([]Member, len(u.Members)) | ||||
| 	copy(cpy.Members, u.Members) | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| func (u *Union) members() []Member { | ||||
| 	return u.Members | ||||
| } | ||||
| 
 | ||||
| type composite interface { | ||||
| 	members() []Member | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	_ composite = (*Struct)(nil) | ||||
| 	_ composite = (*Union)(nil) | ||||
| ) | ||||
| 
 | ||||
| // Member is part of a Struct or Union. | ||||
| // | ||||
| // It is not a valid Type. | ||||
| type Member struct { | ||||
| 	Name | ||||
| 	Type Type | ||||
| 	// Offset is the bit offset of this member | ||||
| 	Offset       uint32 | ||||
| 	BitfieldSize uint32 | ||||
| } | ||||
| 
 | ||||
| // Enum lists possible values. | ||||
| type Enum struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	Values []EnumValue | ||||
| } | ||||
| 
 | ||||
| func (e *Enum) String() string { | ||||
| 	return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name) | ||||
| } | ||||
| 
 | ||||
| // EnumValue is part of an Enum | ||||
| // | ||||
| // Is is not a valid Type | ||||
| type EnumValue struct { | ||||
| 	Name | ||||
| 	Value int32 | ||||
| } | ||||
| 
 | ||||
| func (e *Enum) size() uint32    { return 4 } | ||||
| func (e *Enum) walk(*typeDeque) {} | ||||
| func (e *Enum) copy() Type { | ||||
| 	cpy := *e | ||||
| 	cpy.Values = make([]EnumValue, len(e.Values)) | ||||
| 	copy(cpy.Values, e.Values) | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // FwdKind is the type of forward declaration. | ||||
| type FwdKind int | ||||
| 
 | ||||
| // Valid types of forward declaration. | ||||
| const ( | ||||
| 	FwdStruct FwdKind = iota | ||||
| 	FwdUnion | ||||
| ) | ||||
| 
 | ||||
| func (fk FwdKind) String() string { | ||||
| 	switch fk { | ||||
| 	case FwdStruct: | ||||
| 		return "struct" | ||||
| 	case FwdUnion: | ||||
| 		return "union" | ||||
| 	default: | ||||
| 		return fmt.Sprintf("%T(%d)", fk, int(fk)) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Fwd is a forward declaration of a Type. | ||||
| type Fwd struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	Kind FwdKind | ||||
| } | ||||
| 
 | ||||
| func (f *Fwd) String() string { | ||||
| 	return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name) | ||||
| } | ||||
| 
 | ||||
| func (f *Fwd) walk(*typeDeque) {} | ||||
| func (f *Fwd) copy() Type { | ||||
| 	cpy := *f | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Typedef is an alias of a Type. | ||||
| type Typedef struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| func (td *Typedef) String() string { | ||||
| 	return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID()) | ||||
| } | ||||
| 
 | ||||
| func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } | ||||
| func (td *Typedef) copy() Type { | ||||
| 	cpy := *td | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Volatile is a qualifier. | ||||
| type Volatile struct { | ||||
| 	TypeID | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| func (v *Volatile) String() string { | ||||
| 	return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID()) | ||||
| } | ||||
| 
 | ||||
| func (v *Volatile) qualify() Type       { return v.Type } | ||||
| func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } | ||||
| func (v *Volatile) copy() Type { | ||||
| 	cpy := *v | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Const is a qualifier. | ||||
| type Const struct { | ||||
| 	TypeID | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| func (c *Const) String() string { | ||||
| 	return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID()) | ||||
| } | ||||
| 
 | ||||
| func (c *Const) qualify() Type       { return c.Type } | ||||
| func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } | ||||
| func (c *Const) copy() Type { | ||||
| 	cpy := *c | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Restrict is a qualifier. | ||||
| type Restrict struct { | ||||
| 	TypeID | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| func (r *Restrict) String() string { | ||||
| 	return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID()) | ||||
| } | ||||
| 
 | ||||
| func (r *Restrict) qualify() Type       { return r.Type } | ||||
| func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } | ||||
| func (r *Restrict) copy() Type { | ||||
| 	cpy := *r | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Func is a function definition. | ||||
| type Func struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| func (f *Func) String() string { | ||||
| 	return fmt.Sprintf("func#%d[%q proto=#%d]", f.TypeID, f.Name, f.Type.ID()) | ||||
| } | ||||
| 
 | ||||
| func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } | ||||
| func (f *Func) copy() Type { | ||||
| 	cpy := *f | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // FuncProto is a function declaration. | ||||
| type FuncProto struct { | ||||
| 	TypeID | ||||
| 	Return Type | ||||
| 	Params []FuncParam | ||||
| } | ||||
| 
 | ||||
| func (fp *FuncProto) String() string { | ||||
| 	var s strings.Builder | ||||
| 	fmt.Fprintf(&s, "proto#%d[", fp.TypeID) | ||||
| 	for _, param := range fp.Params { | ||||
| 		fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID()) | ||||
| 	} | ||||
| 	fmt.Fprintf(&s, "return=#%d]", fp.Return.ID()) | ||||
| 	return s.String() | ||||
| } | ||||
| 
 | ||||
| func (fp *FuncProto) walk(tdq *typeDeque) { | ||||
| 	tdq.push(&fp.Return) | ||||
| 	for i := range fp.Params { | ||||
| 		tdq.push(&fp.Params[i].Type) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (fp *FuncProto) copy() Type { | ||||
| 	cpy := *fp | ||||
| 	cpy.Params = make([]FuncParam, len(fp.Params)) | ||||
| 	copy(cpy.Params, fp.Params) | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| type FuncParam struct { | ||||
| 	Name | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| // Var is a global variable. | ||||
| type Var struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	Type Type | ||||
| } | ||||
| 
 | ||||
| func (v *Var) String() string { | ||||
| 	// TODO: Linkage | ||||
| 	return fmt.Sprintf("var#%d[%q]", v.TypeID, v.Name) | ||||
| } | ||||
| 
 | ||||
| func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } | ||||
| func (v *Var) copy() Type { | ||||
| 	cpy := *v | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // Datasec is a global program section containing data. | ||||
| type Datasec struct { | ||||
| 	TypeID | ||||
| 	Name | ||||
| 	Size uint32 | ||||
| 	Vars []VarSecinfo | ||||
| } | ||||
| 
 | ||||
| func (ds *Datasec) String() string { | ||||
| 	return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name) | ||||
| } | ||||
| 
 | ||||
| func (ds *Datasec) size() uint32 { return ds.Size } | ||||
| 
 | ||||
| func (ds *Datasec) walk(tdq *typeDeque) { | ||||
| 	for i := range ds.Vars { | ||||
| 		tdq.push(&ds.Vars[i].Type) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (ds *Datasec) copy() Type { | ||||
| 	cpy := *ds | ||||
| 	cpy.Vars = make([]VarSecinfo, len(ds.Vars)) | ||||
| 	copy(cpy.Vars, ds.Vars) | ||||
| 	return &cpy | ||||
| } | ||||
| 
 | ||||
| // VarSecinfo describes variable in a Datasec | ||||
| // | ||||
| // It is not a valid Type. | ||||
| type VarSecinfo struct { | ||||
| 	Type   Type | ||||
| 	Offset uint32 | ||||
| 	Size   uint32 | ||||
| } | ||||
| 
 | ||||
| type sizer interface { | ||||
| 	size() uint32 | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	_ sizer = (*Int)(nil) | ||||
| 	_ sizer = (*Pointer)(nil) | ||||
| 	_ sizer = (*Struct)(nil) | ||||
| 	_ sizer = (*Union)(nil) | ||||
| 	_ sizer = (*Enum)(nil) | ||||
| 	_ sizer = (*Datasec)(nil) | ||||
| ) | ||||
| 
 | ||||
| type qualifier interface { | ||||
| 	qualify() Type | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	_ qualifier = (*Const)(nil) | ||||
| 	_ qualifier = (*Restrict)(nil) | ||||
| 	_ qualifier = (*Volatile)(nil) | ||||
| ) | ||||
| 
 | ||||
| // Sizeof returns the size of a type in bytes. | ||||
| // | ||||
| // Returns an error if the size can't be computed. | ||||
| func Sizeof(typ Type) (int, error) { | ||||
| 	var ( | ||||
| 		n    = int64(1) | ||||
| 		elem int64 | ||||
| 	) | ||||
| 
 | ||||
| 	for i := 0; i < maxTypeDepth; i++ { | ||||
| 		switch v := typ.(type) { | ||||
| 		case *Array: | ||||
| 			if n > 0 && int64(v.Nelems) > math.MaxInt64/n { | ||||
| 				return 0, errors.New("overflow") | ||||
| 			} | ||||
| 
 | ||||
| 			// Arrays may be of zero length, which allows | ||||
| 			// n to be zero as well. | ||||
| 			n *= int64(v.Nelems) | ||||
| 			typ = v.Type | ||||
| 			continue | ||||
| 
 | ||||
| 		case sizer: | ||||
| 			elem = int64(v.size()) | ||||
| 
 | ||||
| 		case *Typedef: | ||||
| 			typ = v.Type | ||||
| 			continue | ||||
| 
 | ||||
| 		case qualifier: | ||||
| 			typ = v.qualify() | ||||
| 			continue | ||||
| 
 | ||||
| 		default: | ||||
| 			return 0, fmt.Errorf("unrecognized type %T", typ) | ||||
| 		} | ||||
| 
 | ||||
| 		if n > 0 && elem > math.MaxInt64/n { | ||||
| 			return 0, errors.New("overflow") | ||||
| 		} | ||||
| 
 | ||||
| 		size := n * elem | ||||
| 		if int64(int(size)) != size { | ||||
| 			return 0, errors.New("overflow") | ||||
| 		} | ||||
| 
 | ||||
| 		return int(size), nil | ||||
| 	} | ||||
| 
 | ||||
| 	return 0, errors.New("exceeded type depth") | ||||
| } | ||||
| 
 | ||||
| // copy a Type recursively. | ||||
| // | ||||
| // typ may form a cycle. | ||||
| func copyType(typ Type) Type { | ||||
| 	var ( | ||||
| 		copies = make(map[Type]Type) | ||||
| 		work   typeDeque | ||||
| 	) | ||||
| 
 | ||||
| 	for t := &typ; t != nil; t = work.pop() { | ||||
| 		// *t is the identity of the type. | ||||
| 		if cpy := copies[*t]; cpy != nil { | ||||
| 			*t = cpy | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		cpy := (*t).copy() | ||||
| 		copies[*t] = cpy | ||||
| 		*t = cpy | ||||
| 
 | ||||
| 		// Mark any nested types for copying. | ||||
| 		cpy.walk(&work) | ||||
| 	} | ||||
| 
 | ||||
| 	return typ | ||||
| } | ||||
| 
 | ||||
| // typeDeque keeps track of pointers to types which still | ||||
| // need to be visited. | ||||
| type typeDeque struct { | ||||
| 	types       []*Type | ||||
| 	read, write uint64 | ||||
| 	mask        uint64 | ||||
| } | ||||
| 
 | ||||
| // push adds a type to the stack. | ||||
| func (dq *typeDeque) push(t *Type) { | ||||
| 	if dq.write-dq.read < uint64(len(dq.types)) { | ||||
| 		dq.types[dq.write&dq.mask] = t | ||||
| 		dq.write++ | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	new := len(dq.types) * 2 | ||||
| 	if new == 0 { | ||||
| 		new = 8 | ||||
| 	} | ||||
| 
 | ||||
| 	types := make([]*Type, new) | ||||
| 	pivot := dq.read & dq.mask | ||||
| 	n := copy(types, dq.types[pivot:]) | ||||
| 	n += copy(types[n:], dq.types[:pivot]) | ||||
| 	types[n] = t | ||||
| 
 | ||||
| 	dq.types = types | ||||
| 	dq.mask = uint64(new) - 1 | ||||
| 	dq.read, dq.write = 0, uint64(n+1) | ||||
| } | ||||
| 
 | ||||
| // shift returns the first element or null. | ||||
| func (dq *typeDeque) shift() *Type { | ||||
| 	if dq.read == dq.write { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	index := dq.read & dq.mask | ||||
| 	t := dq.types[index] | ||||
| 	dq.types[index] = nil | ||||
| 	dq.read++ | ||||
| 	return t | ||||
| } | ||||
| 
 | ||||
| // pop returns the last element or null. | ||||
| func (dq *typeDeque) pop() *Type { | ||||
| 	if dq.read == dq.write { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	dq.write-- | ||||
| 	index := dq.write & dq.mask | ||||
| 	t := dq.types[index] | ||||
| 	dq.types[index] = nil | ||||
| 	return t | ||||
| } | ||||
| 
 | ||||
| // all returns all elements. | ||||
| // | ||||
| // The deque is empty after calling this method. | ||||
| func (dq *typeDeque) all() []*Type { | ||||
| 	length := dq.write - dq.read | ||||
| 	types := make([]*Type, 0, length) | ||||
| 	for t := dq.shift(); t != nil; t = dq.shift() { | ||||
| 		types = append(types, t) | ||||
| 	} | ||||
| 	return types | ||||
| } | ||||
| 
 | ||||
| // inflateRawTypes takes a list of raw btf types linked via type IDs, and turns | ||||
| // it into a graph of Types connected via pointers. | ||||
| // | ||||
| // Returns a map of named types (so, where NameOff is non-zero) and a slice of types | ||||
| // indexed by TypeID. Since BTF ignores compilation units, multiple types may share | ||||
| // the same name. A Type may form a cyclic graph by pointing at itself. | ||||
| func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) { | ||||
| 	type fixupDef struct { | ||||
| 		id           TypeID | ||||
| 		expectedKind btfKind | ||||
| 		typ          *Type | ||||
| 	} | ||||
| 
 | ||||
| 	var fixups []fixupDef | ||||
| 	fixup := func(id TypeID, expectedKind btfKind, typ *Type) { | ||||
| 		fixups = append(fixups, fixupDef{id, expectedKind, typ}) | ||||
| 	} | ||||
| 
 | ||||
| 	convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { | ||||
| 		// NB: The fixup below relies on pre-allocating this array to | ||||
| 		// work, since otherwise append might re-allocate members. | ||||
| 		members := make([]Member, 0, len(raw)) | ||||
| 		for i, btfMember := range raw { | ||||
| 			name, err := rawStrings.LookupName(btfMember.NameOff) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("can't get name for member %d: %w", i, err) | ||||
| 			} | ||||
| 			m := Member{ | ||||
| 				Name:   name, | ||||
| 				Offset: btfMember.Offset, | ||||
| 			} | ||||
| 			if kindFlag { | ||||
| 				m.BitfieldSize = btfMember.Offset >> 24 | ||||
| 				m.Offset &= 0xffffff | ||||
| 			} | ||||
| 			members = append(members, m) | ||||
| 		} | ||||
| 		for i := range members { | ||||
| 			fixup(raw[i].Type, kindUnknown, &members[i].Type) | ||||
| 		} | ||||
| 		return members, nil | ||||
| 	} | ||||
| 
 | ||||
| 	types = make([]Type, 0, len(rawTypes)) | ||||
| 	types = append(types, (*Void)(nil)) | ||||
| 	namedTypes = make(map[string][]namedType) | ||||
| 
 | ||||
| 	for i, raw := range rawTypes { | ||||
| 		var ( | ||||
| 			// Void is defined to always be type ID 0, and is thus | ||||
| 			// omitted from BTF. | ||||
| 			id  = TypeID(i + 1) | ||||
| 			typ Type | ||||
| 		) | ||||
| 
 | ||||
| 		name, err := rawStrings.LookupName(raw.NameOff) | ||||
| 		if err != nil { | ||||
| 			return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err) | ||||
| 		} | ||||
| 
 | ||||
| 		switch raw.Kind() { | ||||
| 		case kindInt: | ||||
| 			encoding, offset, bits := intEncoding(*raw.data.(*uint32)) | ||||
| 			typ = &Int{id, name, raw.Size(), encoding, offset, bits} | ||||
| 
 | ||||
| 		case kindPointer: | ||||
| 			ptr := &Pointer{id, nil} | ||||
| 			fixup(raw.Type(), kindUnknown, &ptr.Target) | ||||
| 			typ = ptr | ||||
| 
 | ||||
| 		case kindArray: | ||||
| 			btfArr := raw.data.(*btfArray) | ||||
| 
 | ||||
| 			// IndexType is unused according to btf.rst. | ||||
| 			// Don't make it available right now. | ||||
| 			arr := &Array{id, nil, btfArr.Nelems} | ||||
| 			fixup(btfArr.Type, kindUnknown, &arr.Type) | ||||
| 			typ = arr | ||||
| 
 | ||||
| 		case kindStruct: | ||||
| 			members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) | ||||
| 			if err != nil { | ||||
| 				return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) | ||||
| 			} | ||||
| 			typ = &Struct{id, name, raw.Size(), members} | ||||
| 
 | ||||
| 		case kindUnion: | ||||
| 			members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) | ||||
| 			if err != nil { | ||||
| 				return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err) | ||||
| 			} | ||||
| 			typ = &Union{id, name, raw.Size(), members} | ||||
| 
 | ||||
| 		case kindEnum: | ||||
| 			rawvals := raw.data.([]btfEnum) | ||||
| 			vals := make([]EnumValue, 0, len(rawvals)) | ||||
| 			for i, btfVal := range rawvals { | ||||
| 				name, err := rawStrings.LookupName(btfVal.NameOff) | ||||
| 				if err != nil { | ||||
| 					return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err) | ||||
| 				} | ||||
| 				vals = append(vals, EnumValue{ | ||||
| 					Name:  name, | ||||
| 					Value: btfVal.Val, | ||||
| 				}) | ||||
| 			} | ||||
| 			typ = &Enum{id, name, vals} | ||||
| 
 | ||||
| 		case kindForward: | ||||
| 			if raw.KindFlag() { | ||||
| 				typ = &Fwd{id, name, FwdUnion} | ||||
| 			} else { | ||||
| 				typ = &Fwd{id, name, FwdStruct} | ||||
| 			} | ||||
| 
 | ||||
| 		case kindTypedef: | ||||
| 			typedef := &Typedef{id, name, nil} | ||||
| 			fixup(raw.Type(), kindUnknown, &typedef.Type) | ||||
| 			typ = typedef | ||||
| 
 | ||||
| 		case kindVolatile: | ||||
| 			volatile := &Volatile{id, nil} | ||||
| 			fixup(raw.Type(), kindUnknown, &volatile.Type) | ||||
| 			typ = volatile | ||||
| 
 | ||||
| 		case kindConst: | ||||
| 			cnst := &Const{id, nil} | ||||
| 			fixup(raw.Type(), kindUnknown, &cnst.Type) | ||||
| 			typ = cnst | ||||
| 
 | ||||
| 		case kindRestrict: | ||||
| 			restrict := &Restrict{id, nil} | ||||
| 			fixup(raw.Type(), kindUnknown, &restrict.Type) | ||||
| 			typ = restrict | ||||
| 
 | ||||
| 		case kindFunc: | ||||
| 			fn := &Func{id, name, nil} | ||||
| 			fixup(raw.Type(), kindFuncProto, &fn.Type) | ||||
| 			typ = fn | ||||
| 
 | ||||
| 		case kindFuncProto: | ||||
| 			rawparams := raw.data.([]btfParam) | ||||
| 			params := make([]FuncParam, 0, len(rawparams)) | ||||
| 			for i, param := range rawparams { | ||||
| 				name, err := rawStrings.LookupName(param.NameOff) | ||||
| 				if err != nil { | ||||
| 					return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) | ||||
| 				} | ||||
| 				params = append(params, FuncParam{ | ||||
| 					Name: name, | ||||
| 				}) | ||||
| 			} | ||||
| 			for i := range params { | ||||
| 				fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type) | ||||
| 			} | ||||
| 
 | ||||
| 			fp := &FuncProto{id, nil, params} | ||||
| 			fixup(raw.Type(), kindUnknown, &fp.Return) | ||||
| 			typ = fp | ||||
| 
 | ||||
| 		case kindVar: | ||||
| 			v := &Var{id, name, nil} | ||||
| 			fixup(raw.Type(), kindUnknown, &v.Type) | ||||
| 			typ = v | ||||
| 
 | ||||
| 		case kindDatasec: | ||||
| 			btfVars := raw.data.([]btfVarSecinfo) | ||||
| 			vars := make([]VarSecinfo, 0, len(btfVars)) | ||||
| 			for _, btfVar := range btfVars { | ||||
| 				vars = append(vars, VarSecinfo{ | ||||
| 					Offset: btfVar.Offset, | ||||
| 					Size:   btfVar.Size, | ||||
| 				}) | ||||
| 			} | ||||
| 			for i := range vars { | ||||
| 				fixup(btfVars[i].Type, kindVar, &vars[i].Type) | ||||
| 			} | ||||
| 			typ = &Datasec{id, name, raw.SizeType, vars} | ||||
| 
 | ||||
| 		default: | ||||
| 			return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) | ||||
| 		} | ||||
| 
 | ||||
| 		types = append(types, typ) | ||||
| 
 | ||||
| 		if named, ok := typ.(namedType); ok { | ||||
| 			if name := essentialName(named.name()); name != "" { | ||||
| 				namedTypes[name] = append(namedTypes[name], named) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for _, fixup := range fixups { | ||||
| 		i := int(fixup.id) | ||||
| 		if i >= len(types) { | ||||
| 			return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) | ||||
| 		} | ||||
| 
 | ||||
| 		// Default void (id 0) to unknown | ||||
| 		rawKind := kindUnknown | ||||
| 		if i > 0 { | ||||
| 			rawKind = rawTypes[i-1].Kind() | ||||
| 		} | ||||
| 
 | ||||
| 		if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected { | ||||
| 			return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind) | ||||
| 		} | ||||
| 
 | ||||
| 		*fixup.typ = types[i] | ||||
| 	} | ||||
| 
 | ||||
| 	return types, namedTypes, nil | ||||
| } | ||||
| 
 | ||||
| // essentialName returns name without a ___ suffix. | ||||
| func essentialName(name string) string { | ||||
| 	lastIdx := strings.LastIndex(name, "___") | ||||
| 	if lastIdx > 0 { | ||||
| 		return name[:lastIdx] | ||||
| 	} | ||||
| 	return name | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/cilium/ebpf/internal/cpu.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/cilium/ebpf/internal/cpu.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -2,7 +2,7 @@ package internal | |||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| ) | ||||
|  | @ -24,7 +24,7 @@ func PossibleCPUs() (int, error) { | |||
| } | ||||
| 
 | ||||
| func parseCPUsFromFile(path string) (int, error) { | ||||
| 	spec, err := ioutil.ReadFile(path) | ||||
| 	spec, err := os.ReadFile(path) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  |  | |||
							
								
								
									
										50
									
								
								vendor/github.com/cilium/ebpf/internal/elf.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										50
									
								
								vendor/github.com/cilium/ebpf/internal/elf.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -35,6 +35,29 @@ func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { | |||
| 	return &SafeELFFile{file}, nil | ||||
| } | ||||
| 
 | ||||
| // OpenSafeELFFile reads an ELF from a file. | ||||
| // | ||||
| // It works like NewSafeELFFile, with the exception that safe.Close will | ||||
| // close the underlying file. | ||||
| func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { | ||||
| 	defer func() { | ||||
| 		r := recover() | ||||
| 		if r == nil { | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		safe = nil | ||||
| 		err = fmt.Errorf("reading ELF file panicked: %s", r) | ||||
| 	}() | ||||
| 
 | ||||
| 	file, err := elf.Open(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &SafeELFFile{file}, nil | ||||
| } | ||||
| 
 | ||||
| // Symbols is the safe version of elf.File.Symbols. | ||||
| func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { | ||||
| 	defer func() { | ||||
|  | @ -50,3 +73,30 @@ func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { | |||
| 	syms, err = se.File.Symbols() | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| // DynamicSymbols is the safe version of elf.File.DynamicSymbols. | ||||
| func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { | ||||
| 	defer func() { | ||||
| 		r := recover() | ||||
| 		if r == nil { | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		syms = nil | ||||
| 		err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r) | ||||
| 	}() | ||||
| 
 | ||||
| 	syms, err = se.File.DynamicSymbols() | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| // SectionsByType returns all sections in the file with the specified section type. | ||||
| func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { | ||||
| 	sections := make([]*elf.Section, 0, 1) | ||||
| 	for _, section := range se.Sections { | ||||
| 		if section.Type == typ { | ||||
| 			sections = append(sections, section) | ||||
| 		} | ||||
| 	} | ||||
| 	return sections | ||||
| } | ||||
|  |  | |||
							
								
								
									
										24
									
								
								vendor/github.com/cilium/ebpf/internal/endian.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/cilium/ebpf/internal/endian.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,24 +0,0 @@ | |||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"unsafe" | ||||
| ) | ||||
| 
 | ||||
| // NativeEndian is set to either binary.BigEndian or binary.LittleEndian, | ||||
| // depending on the host's endianness. | ||||
| var NativeEndian binary.ByteOrder | ||||
| 
 | ||||
| func init() { | ||||
| 	if isBigEndian() { | ||||
| 		NativeEndian = binary.BigEndian | ||||
| 	} else { | ||||
| 		NativeEndian = binary.LittleEndian | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func isBigEndian() (ret bool) { | ||||
| 	i := int(0x1) | ||||
| 	bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i)) | ||||
| 	return bs[0] == 0 | ||||
| } | ||||
							
								
								
									
										13
									
								
								vendor/github.com/cilium/ebpf/internal/endian_be.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								vendor/github.com/cilium/ebpf/internal/endian_be.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,13 @@ | |||
| //go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 | ||||
| // +build armbe arm64be mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64 | ||||
| 
 | ||||
| package internal | ||||
| 
 | ||||
| import "encoding/binary" | ||||
| 
 | ||||
| // NativeEndian is set to either binary.BigEndian or binary.LittleEndian, | ||||
| // depending on the host's endianness. | ||||
| var NativeEndian binary.ByteOrder = binary.BigEndian | ||||
| 
 | ||||
| // ClangEndian is set to either "el" or "eb" depending on the host's endianness. | ||||
| const ClangEndian = "eb" | ||||
							
								
								
									
										13
									
								
								vendor/github.com/cilium/ebpf/internal/endian_le.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								vendor/github.com/cilium/ebpf/internal/endian_le.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,13 @@ | |||
| //go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 | ||||
| // +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv64 | ||||
| 
 | ||||
| package internal | ||||
| 
 | ||||
| import "encoding/binary" | ||||
| 
 | ||||
| // NativeEndian is set to either binary.BigEndian or binary.LittleEndian, | ||||
| // depending on the host's endianness. | ||||
| var NativeEndian binary.ByteOrder = binary.LittleEndian | ||||
| 
 | ||||
| // ClangEndian is set to either "el" or "eb" depending on the host's endianness. | ||||
| const ClangEndian = "el" | ||||
							
								
								
									
										205
									
								
								vendor/github.com/cilium/ebpf/internal/errors.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										205
									
								
								vendor/github.com/cilium/ebpf/internal/errors.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -2,46 +2,205 @@ package internal | |||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // ErrorWithLog returns an error that includes logs from the | ||||
| // kernel verifier. | ||||
| // ErrorWithLog returns an error which includes logs from the kernel verifier. | ||||
| // | ||||
| // logErr should be the error returned by the syscall that generated | ||||
| // the log. It is used to check for truncation of the output. | ||||
| func ErrorWithLog(err error, log []byte, logErr error) error { | ||||
| 	logStr := strings.Trim(CString(log), "\t\r\n ") | ||||
| 	if errors.Is(logErr, unix.ENOSPC) { | ||||
| 		logStr += " (truncated...)" | ||||
| // The default error output is a summary of the full log. The latter can be | ||||
| // accessed via VerifierError.Log or by formatting the error, see Format. | ||||
| // | ||||
| // A set of heuristics is used to determine whether the log has been truncated. | ||||
| func ErrorWithLog(err error, log []byte) *VerifierError { | ||||
| 	const whitespace = "\t\r\v\n " | ||||
| 
 | ||||
| 	// Convert verifier log C string by truncating it on the first 0 byte | ||||
| 	// and trimming trailing whitespace before interpreting as a Go string. | ||||
| 	truncated := false | ||||
| 	if i := bytes.IndexByte(log, 0); i != -1 { | ||||
| 		if i == len(log)-1 && !bytes.HasSuffix(log[:i], []byte{'\n'}) { | ||||
| 			// The null byte is at the end of the buffer and it's not preceded | ||||
| 			// by a newline character. Most likely the buffer was too short. | ||||
| 			truncated = true | ||||
| 		} | ||||
| 
 | ||||
| 		log = log[:i] | ||||
| 	} else if len(log) > 0 { | ||||
| 		// No null byte? Dodgy! | ||||
| 		truncated = true | ||||
| 	} | ||||
| 
 | ||||
| 	return &VerifierError{err, logStr} | ||||
| 	log = bytes.Trim(log, whitespace) | ||||
| 	logLines := bytes.Split(log, []byte{'\n'}) | ||||
| 	lines := make([]string, 0, len(logLines)) | ||||
| 	for _, line := range logLines { | ||||
| 		// Don't remove leading white space on individual lines. We rely on it | ||||
| 		// when outputting logs. | ||||
| 		lines = append(lines, string(bytes.TrimRight(line, whitespace))) | ||||
| 	} | ||||
| 
 | ||||
| 	return &VerifierError{err, lines, truncated} | ||||
| } | ||||
| 
 | ||||
| // VerifierError includes information from the eBPF verifier. | ||||
| // | ||||
| // It summarises the log output, see Format if you want to output the full contents. | ||||
| type VerifierError struct { | ||||
| 	cause error | ||||
| 	log   string | ||||
| 	// The error which caused this error. | ||||
| 	Cause error | ||||
| 	// The verifier output split into lines. | ||||
| 	Log []string | ||||
| 	// Whether the log output is truncated, based on several heuristics. | ||||
| 	Truncated bool | ||||
| } | ||||
| 
 | ||||
| func (le *VerifierError) Unwrap() error { | ||||
| 	return le.Cause | ||||
| } | ||||
| 
 | ||||
| func (le *VerifierError) Error() string { | ||||
| 	if le.log == "" { | ||||
| 		return le.cause.Error() | ||||
| 	log := le.Log | ||||
| 	if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { | ||||
| 		// Get rid of "processed 39 insns (limit 1000000) ..." from summary. | ||||
| 		log = log[:n-1] | ||||
| 	} | ||||
| 
 | ||||
| 	return fmt.Sprintf("%s: %s", le.cause, le.log) | ||||
| 	n := len(log) | ||||
| 	if n == 0 { | ||||
| 		return le.Cause.Error() | ||||
| 	} | ||||
| 
 | ||||
| 	lines := log[n-1:] | ||||
| 	if n >= 2 && (includePreviousLine(log[n-1]) || le.Truncated) { | ||||
| 		// Add one more line of context if it aids understanding the error. | ||||
| 		lines = log[n-2:] | ||||
| 	} | ||||
| 
 | ||||
| 	var b strings.Builder | ||||
| 	fmt.Fprintf(&b, "%s: ", le.Cause.Error()) | ||||
| 
 | ||||
| 	for i, line := range lines { | ||||
| 		b.WriteString(strings.TrimSpace(line)) | ||||
| 		if i != len(lines)-1 { | ||||
| 			b.WriteString(": ") | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	omitted := len(le.Log) - len(lines) | ||||
| 	if omitted == 0 && !le.Truncated { | ||||
| 		return b.String() | ||||
| 	} | ||||
| 
 | ||||
| 	b.WriteString(" (") | ||||
| 	if le.Truncated { | ||||
| 		b.WriteString("truncated") | ||||
| 	} | ||||
| 
 | ||||
| 	if omitted > 0 { | ||||
| 		if le.Truncated { | ||||
| 			b.WriteString(", ") | ||||
| 		} | ||||
| 		fmt.Fprintf(&b, "%d line(s) omitted", omitted) | ||||
| 	} | ||||
| 	b.WriteString(")") | ||||
| 
 | ||||
| 	return b.String() | ||||
| } | ||||
| 
 | ||||
| // CString turns a NUL / zero terminated byte buffer into a string. | ||||
| func CString(in []byte) string { | ||||
| 	inLen := bytes.IndexByte(in, 0) | ||||
| 	if inLen == -1 { | ||||
| 		return "" | ||||
| // includePreviousLine returns true if the given line likely is better | ||||
| // understood with additional context from the preceding line. | ||||
| func includePreviousLine(line string) bool { | ||||
| 	// We need to find a good trade off between understandable error messages | ||||
| 	// and too much complexity here. Checking the string prefix is ok, requiring | ||||
| 	// regular expressions to do it is probably overkill. | ||||
| 
 | ||||
| 	if strings.HasPrefix(line, "\t") { | ||||
| 		// [13] STRUCT drm_rect size=16 vlen=4 | ||||
| 		// \tx1 type_id=2 | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { | ||||
| 		// 0: (95) exit | ||||
| 		// R0 !read_ok | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	if strings.HasPrefix(line, "invalid bpf_context access") { | ||||
| 		// 0: (79) r6 = *(u64 *)(r1 +0) | ||||
| 		// func '__x64_sys_recvfrom' arg0 type FWD is not a struct | ||||
| 		// invalid bpf_context access off=0 size=8 | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // Format the error. | ||||
| // | ||||
| // Understood verbs are %s and %v, which are equivalent to calling Error(). %v | ||||
| // allows outputting additional information using the following flags: | ||||
| // | ||||
| //     +   Output the first <width> lines, or all lines if no width is given. | ||||
| //     -   Output the last <width> lines, or all lines if no width is given. | ||||
| // | ||||
| // Use width to specify how many lines to output. Use the '-' flag to output | ||||
| // lines from the end of the log instead of the beginning. | ||||
| func (le *VerifierError) Format(f fmt.State, verb rune) { | ||||
| 	switch verb { | ||||
| 	case 's': | ||||
| 		_, _ = io.WriteString(f, le.Error()) | ||||
| 
 | ||||
| 	case 'v': | ||||
| 		n, haveWidth := f.Width() | ||||
| 		if !haveWidth || n > len(le.Log) { | ||||
| 			n = len(le.Log) | ||||
| 		} | ||||
| 
 | ||||
| 		if !f.Flag('+') && !f.Flag('-') { | ||||
| 			if haveWidth { | ||||
| 				_, _ = io.WriteString(f, "%!v(BADWIDTH)") | ||||
| 				return | ||||
| 			} | ||||
| 
 | ||||
| 			_, _ = io.WriteString(f, le.Error()) | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		if f.Flag('+') && f.Flag('-') { | ||||
| 			_, _ = io.WriteString(f, "%!v(BADFLAG)") | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		fmt.Fprintf(f, "%s:", le.Cause.Error()) | ||||
| 
 | ||||
| 		omitted := len(le.Log) - n | ||||
| 		lines := le.Log[:n] | ||||
| 		if f.Flag('-') { | ||||
| 			// Print last instead of first lines. | ||||
| 			lines = le.Log[len(le.Log)-n:] | ||||
| 			if omitted > 0 { | ||||
| 				fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		for _, line := range lines { | ||||
| 			fmt.Fprintf(f, "\n\t%s", line) | ||||
| 		} | ||||
| 
 | ||||
| 		if !f.Flag('-') { | ||||
| 			if omitted > 0 { | ||||
| 				fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if le.Truncated { | ||||
| 			fmt.Fprintf(f, "\n\t(truncated)") | ||||
| 		} | ||||
| 
 | ||||
| 	default: | ||||
| 		fmt.Fprintf(f, "%%!%c(BADVERB)", verb) | ||||
| 	} | ||||
| 	return string(in[:inLen]) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										69
									
								
								vendor/github.com/cilium/ebpf/internal/fd.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										69
									
								
								vendor/github.com/cilium/ebpf/internal/fd.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,69 +0,0 @@ | |||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"runtime" | ||||
| 	"strconv" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| var ErrClosedFd = errors.New("use of closed file descriptor") | ||||
| 
 | ||||
| type FD struct { | ||||
| 	raw int64 | ||||
| } | ||||
| 
 | ||||
| func NewFD(value uint32) *FD { | ||||
| 	fd := &FD{int64(value)} | ||||
| 	runtime.SetFinalizer(fd, (*FD).Close) | ||||
| 	return fd | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) String() string { | ||||
| 	return strconv.FormatInt(fd.raw, 10) | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Value() (uint32, error) { | ||||
| 	if fd.raw < 0 { | ||||
| 		return 0, ErrClosedFd | ||||
| 	} | ||||
| 
 | ||||
| 	return uint32(fd.raw), nil | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Close() error { | ||||
| 	if fd.raw < 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	value := int(fd.raw) | ||||
| 	fd.raw = -1 | ||||
| 
 | ||||
| 	fd.Forget() | ||||
| 	return unix.Close(value) | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Forget() { | ||||
| 	runtime.SetFinalizer(fd, nil) | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Dup() (*FD, error) { | ||||
| 	if fd.raw < 0 { | ||||
| 		return nil, ErrClosedFd | ||||
| 	} | ||||
| 
 | ||||
| 	dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't dup fd: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return NewFD(uint32(dup)), nil | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) File(name string) *os.File { | ||||
| 	fd.Forget() | ||||
| 	return os.NewFile(uintptr(fd.raw), name) | ||||
| } | ||||
							
								
								
									
										48
									
								
								vendor/github.com/cilium/ebpf/internal/feature.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										48
									
								
								vendor/github.com/cilium/ebpf/internal/feature.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -54,11 +54,6 @@ type FeatureTestFn func() error | |||
| // | ||||
| // Returns an error wrapping ErrNotSupported if the feature is not supported. | ||||
| func FeatureTest(name, version string, fn FeatureTestFn) func() error { | ||||
| 	v, err := NewVersion(version) | ||||
| 	if err != nil { | ||||
| 		return func() error { return err } | ||||
| 	} | ||||
| 
 | ||||
| 	ft := new(featureTest) | ||||
| 	return func() error { | ||||
| 		ft.RLock() | ||||
|  | @ -79,6 +74,11 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error { | |||
| 		err := fn() | ||||
| 		switch { | ||||
| 		case errors.Is(err, ErrNotSupported): | ||||
| 			v, err := NewVersion(version) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 
 | ||||
| 			ft.result = &UnsupportedFeatureError{ | ||||
| 				MinimumVersion: v, | ||||
| 				Name:           name, | ||||
|  | @ -98,41 +98,3 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error { | |||
| 		return ft.result | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // A Version in the form Major.Minor.Patch. | ||||
| type Version [3]uint16 | ||||
| 
 | ||||
| // NewVersion creates a version from a string like "Major.Minor.Patch". | ||||
| // | ||||
| // Patch is optional. | ||||
| func NewVersion(ver string) (Version, error) { | ||||
| 	var major, minor, patch uint16 | ||||
| 	n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) | ||||
| 	if n < 2 { | ||||
| 		return Version{}, fmt.Errorf("invalid version: %s", ver) | ||||
| 	} | ||||
| 	return Version{major, minor, patch}, nil | ||||
| } | ||||
| 
 | ||||
| func (v Version) String() string { | ||||
| 	if v[2] == 0 { | ||||
| 		return fmt.Sprintf("v%d.%d", v[0], v[1]) | ||||
| 	} | ||||
| 	return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) | ||||
| } | ||||
| 
 | ||||
| // Less returns true if the version is less than another version. | ||||
| func (v Version) Less(other Version) bool { | ||||
| 	for i, a := range v { | ||||
| 		if a == other[i] { | ||||
| 			continue | ||||
| 		} | ||||
| 		return a < other[i] | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // Unspecified returns true if the version is all zero. | ||||
| func (v Version) Unspecified() bool { | ||||
| 	return v[0] == 0 && v[1] == 0 && v[2] == 0 | ||||
| } | ||||
|  |  | |||
							
								
								
									
										48
									
								
								vendor/github.com/cilium/ebpf/internal/io.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										48
									
								
								vendor/github.com/cilium/ebpf/internal/io.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,6 +1,35 @@ | |||
| package internal | ||||
| 
 | ||||
| import "errors" | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"compress/gzip" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"os" | ||||
| ) | ||||
| 
 | ||||
| // NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized | ||||
| // buffered reader. It is a convenience function for reading subsections of | ||||
| // ELF sections while minimizing the amount of read() syscalls made. | ||||
| // | ||||
| // Syscall overhead is non-negligible in continuous integration context | ||||
| // where ELFs might be accessed over virtual filesystems with poor random | ||||
| // access performance. Buffering reads makes sense because (sub)sections | ||||
| // end up being read completely anyway. | ||||
| // | ||||
| // Use instead of the r.Seek() + io.LimitReader() pattern. | ||||
| func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { | ||||
| 	// Clamp the size of the buffer to one page to avoid slurping large parts | ||||
| 	// of a file into memory. bufio.NewReader uses a hardcoded default buffer | ||||
| 	// of 4096. Allow arches with larger pages to allocate more, but don't | ||||
| 	// allocate a fixed 4k buffer if we only need to read a small segment. | ||||
| 	buf := n | ||||
| 	if ps := int64(os.Getpagesize()); n > ps { | ||||
| 		buf = ps | ||||
| 	} | ||||
| 
 | ||||
| 	return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) | ||||
| } | ||||
| 
 | ||||
| // DiscardZeroes makes sure that all written bytes are zero | ||||
| // before discarding them. | ||||
|  | @ -14,3 +43,20 @@ func (DiscardZeroes) Write(p []byte) (int, error) { | |||
| 	} | ||||
| 	return len(p), nil | ||||
| } | ||||
| 
 | ||||
| // ReadAllCompressed decompresses a gzipped file into memory. | ||||
| func ReadAllCompressed(file string) ([]byte, error) { | ||||
| 	fh, err := os.Open(file) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer fh.Close() | ||||
| 
 | ||||
| 	gz, err := gzip.NewReader(fh) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer gz.Close() | ||||
| 
 | ||||
| 	return io.ReadAll(gz) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										84
									
								
								vendor/github.com/cilium/ebpf/internal/output.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								vendor/github.com/cilium/ebpf/internal/output.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,84 @@ | |||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"go/format" | ||||
| 	"go/scanner" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| ) | ||||
| 
 | ||||
| // Identifier turns a C style type or field name into an exportable Go equivalent. | ||||
| func Identifier(str string) string { | ||||
| 	prev := rune(-1) | ||||
| 	return strings.Map(func(r rune) rune { | ||||
| 		// See https://golang.org/ref/spec#Identifiers | ||||
| 		switch { | ||||
| 		case unicode.IsLetter(r): | ||||
| 			if prev == -1 { | ||||
| 				r = unicode.ToUpper(r) | ||||
| 			} | ||||
| 
 | ||||
| 		case r == '_': | ||||
| 			switch { | ||||
| 			// The previous rune was deleted, or we are at the | ||||
| 			// beginning of the string. | ||||
| 			case prev == -1: | ||||
| 				fallthrough | ||||
| 
 | ||||
| 			// The previous rune is a lower case letter or a digit. | ||||
| 			case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): | ||||
| 				// delete the current rune, and force the | ||||
| 				// next character to be uppercased. | ||||
| 				r = -1 | ||||
| 			} | ||||
| 
 | ||||
| 		case unicode.IsDigit(r): | ||||
| 
 | ||||
| 		default: | ||||
| 			// Delete the current rune. prev is unchanged. | ||||
| 			return -1 | ||||
| 		} | ||||
| 
 | ||||
| 		prev = r | ||||
| 		return r | ||||
| 	}, str) | ||||
| } | ||||
| 
 | ||||
| // WriteFormatted outputs a formatted src into out. | ||||
| // | ||||
| // If formatting fails it returns an informative error message. | ||||
| func WriteFormatted(src []byte, out io.Writer) error { | ||||
| 	formatted, err := format.Source(src) | ||||
| 	if err == nil { | ||||
| 		_, err = out.Write(formatted) | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	var el scanner.ErrorList | ||||
| 	if !errors.As(err, &el) { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	var nel scanner.ErrorList | ||||
| 	for _, err := range el { | ||||
| 		if !err.Pos.IsValid() { | ||||
| 			nel = append(nel, err) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		buf := src[err.Pos.Offset:] | ||||
| 		nl := bytes.IndexRune(buf, '\n') | ||||
| 		if nl == -1 { | ||||
| 			nel = append(nel, err) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		err.Msg += ": " + string(buf[:nl]) | ||||
| 		nel = append(nel, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nel | ||||
| } | ||||
							
								
								
									
										77
									
								
								vendor/github.com/cilium/ebpf/internal/pinning.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/cilium/ebpf/internal/pinning.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,77 @@ | |||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| func Pin(currentPath, newPath string, fd *sys.FD) error { | ||||
| 	const bpfFSType = 0xcafe4a11 | ||||
| 
 | ||||
| 	if newPath == "" { | ||||
| 		return errors.New("given pinning path cannot be empty") | ||||
| 	} | ||||
| 	if currentPath == newPath { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	var statfs unix.Statfs_t | ||||
| 	if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	fsType := int64(statfs.Type) | ||||
| 	if unsafe.Sizeof(statfs.Type) == 4 { | ||||
| 		// We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a | ||||
| 		// negative number when interpreted as int32 so we need to cast via | ||||
| 		// uint32 to avoid sign extension. | ||||
| 		fsType = int64(uint32(statfs.Type)) | ||||
| 	} | ||||
| 
 | ||||
| 	if fsType != bpfFSType { | ||||
| 		return fmt.Errorf("%s is not on a bpf filesystem", newPath) | ||||
| 	} | ||||
| 
 | ||||
| 	defer runtime.KeepAlive(fd) | ||||
| 
 | ||||
| 	if currentPath == "" { | ||||
| 		return sys.ObjPin(&sys.ObjPinAttr{ | ||||
| 			Pathname: sys.NewStringPointer(newPath), | ||||
| 			BpfFd:    fd.Uint(), | ||||
| 		}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Renameat2 is used instead of os.Rename to disallow the new path replacing | ||||
| 	// an existing path. | ||||
| 	err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) | ||||
| 	if err == nil { | ||||
| 		// Object is now moved to the new pinning path. | ||||
| 		return nil | ||||
| 	} | ||||
| 	if !os.IsNotExist(err) { | ||||
| 		return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) | ||||
| 	} | ||||
| 	// Internal state not in sync with the file system so let's fix it. | ||||
| 	return sys.ObjPin(&sys.ObjPinAttr{ | ||||
| 		Pathname: sys.NewStringPointer(newPath), | ||||
| 		BpfFd:    fd.Uint(), | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| func Unpin(pinnedPath string) error { | ||||
| 	if pinnedPath == "" { | ||||
| 		return nil | ||||
| 	} | ||||
| 	err := os.Remove(pinnedPath) | ||||
| 	if err == nil || os.IsNotExist(err) { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
							
								
								
									
										14
									
								
								vendor/github.com/cilium/ebpf/internal/ptr_64.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/cilium/ebpf/internal/ptr_64.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,14 +0,0 @@ | |||
| // +build !386,!amd64p32,!arm,!mipsle,!mips64p32le | ||||
| // +build !armbe,!mips,!mips64p32 | ||||
| 
 | ||||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"unsafe" | ||||
| ) | ||||
| 
 | ||||
| // Pointer wraps an unsafe.Pointer to be 64bit to | ||||
| // conform to the syscall specification. | ||||
| type Pointer struct { | ||||
| 	ptr unsafe.Pointer | ||||
| } | ||||
							
								
								
									
										6
									
								
								vendor/github.com/cilium/ebpf/internal/sys/doc.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/cilium/ebpf/internal/sys/doc.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,6 @@ | |||
| // Package sys contains bindings for the BPF syscall. | ||||
| package sys | ||||
| 
 | ||||
| // Regenerate types.go by invoking go generate in the current directory. | ||||
| 
 | ||||
| //go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz | ||||
							
								
								
									
										96
									
								
								vendor/github.com/cilium/ebpf/internal/sys/fd.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										96
									
								
								vendor/github.com/cilium/ebpf/internal/sys/fd.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,96 @@ | |||
| package sys | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"os" | ||||
| 	"runtime" | ||||
| 	"strconv" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| var ErrClosedFd = unix.EBADF | ||||
| 
 | ||||
| type FD struct { | ||||
| 	raw int | ||||
| } | ||||
| 
 | ||||
| func newFD(value int) *FD { | ||||
| 	fd := &FD{value} | ||||
| 	runtime.SetFinalizer(fd, (*FD).Close) | ||||
| 	return fd | ||||
| } | ||||
| 
 | ||||
| // NewFD wraps a raw fd with a finalizer. | ||||
| // | ||||
| // You must not use the raw fd after calling this function, since the underlying | ||||
| // file descriptor number may change. This is because the BPF UAPI assumes that | ||||
| // zero is not a valid fd value. | ||||
| func NewFD(value int) (*FD, error) { | ||||
| 	if value < 0 { | ||||
| 		return nil, fmt.Errorf("invalid fd %d", value) | ||||
| 	} | ||||
| 
 | ||||
| 	fd := newFD(value) | ||||
| 	if value != 0 { | ||||
| 		return fd, nil | ||||
| 	} | ||||
| 
 | ||||
| 	dup, err := fd.Dup() | ||||
| 	_ = fd.Close() | ||||
| 	return dup, err | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) String() string { | ||||
| 	return strconv.FormatInt(int64(fd.raw), 10) | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Int() int { | ||||
| 	return fd.raw | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Uint() uint32 { | ||||
| 	if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { | ||||
| 		// Best effort: this is the number most likely to be an invalid file | ||||
| 		// descriptor. It is equal to -1 (on two's complement arches). | ||||
| 		return math.MaxUint32 | ||||
| 	} | ||||
| 	return uint32(fd.raw) | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Close() error { | ||||
| 	if fd.raw < 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	value := int(fd.raw) | ||||
| 	fd.raw = -1 | ||||
| 
 | ||||
| 	fd.Forget() | ||||
| 	return unix.Close(value) | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Forget() { | ||||
| 	runtime.SetFinalizer(fd, nil) | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) Dup() (*FD, error) { | ||||
| 	if fd.raw < 0 { | ||||
| 		return nil, ErrClosedFd | ||||
| 	} | ||||
| 
 | ||||
| 	// Always require the fd to be larger than zero: the BPF API treats the value | ||||
| 	// as "no argument provided". | ||||
| 	dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't dup fd: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return newFD(dup), nil | ||||
| } | ||||
| 
 | ||||
| func (fd *FD) File(name string) *os.File { | ||||
| 	fd.Forget() | ||||
| 	return os.NewFile(uintptr(fd.raw), name) | ||||
| } | ||||
|  | @ -1,6 +1,10 @@ | |||
| package internal | ||||
| package sys | ||||
| 
 | ||||
| import "unsafe" | ||||
| import ( | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // NewPointer creates a 64-bit pointer from an unsafe Pointer. | ||||
| func NewPointer(ptr unsafe.Pointer) Pointer { | ||||
|  | @ -16,15 +20,19 @@ func NewSlicePointer(buf []byte) Pointer { | |||
| 	return Pointer{ptr: unsafe.Pointer(&buf[0])} | ||||
| } | ||||
| 
 | ||||
| // NewSlicePointer creates a 64-bit pointer from a byte slice. | ||||
| // | ||||
| // Useful to assign both the pointer and the length in one go. | ||||
| func NewSlicePointerLen(buf []byte) (Pointer, uint32) { | ||||
| 	return NewSlicePointer(buf), uint32(len(buf)) | ||||
| } | ||||
| 
 | ||||
| // NewStringPointer creates a 64-bit pointer from a string. | ||||
| func NewStringPointer(str string) Pointer { | ||||
| 	if str == "" { | ||||
| 	p, err := unix.BytePtrFromString(str) | ||||
| 	if err != nil { | ||||
| 		return Pointer{} | ||||
| 	} | ||||
| 
 | ||||
| 	// The kernel expects strings to be zero terminated | ||||
| 	buf := make([]byte, len(str)+1) | ||||
| 	copy(buf, str) | ||||
| 
 | ||||
| 	return Pointer{ptr: unsafe.Pointer(&buf[0])} | ||||
| 	return Pointer{ptr: unsafe.Pointer(p)} | ||||
| } | ||||
|  | @ -1,6 +1,7 @@ | |||
| //go:build armbe || mips || mips64p32 | ||||
| // +build armbe mips mips64p32 | ||||
| 
 | ||||
| package internal | ||||
| package sys | ||||
| 
 | ||||
| import ( | ||||
| 	"unsafe" | ||||
|  | @ -1,6 +1,7 @@ | |||
| //go:build 386 || amd64p32 || arm || mipsle || mips64p32le | ||||
| // +build 386 amd64p32 arm mipsle mips64p32le | ||||
| 
 | ||||
| package internal | ||||
| package sys | ||||
| 
 | ||||
| import ( | ||||
| 	"unsafe" | ||||
							
								
								
									
										14
									
								
								vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,14 @@ | |||
| //go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 | ||||
| // +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32 | ||||
| 
 | ||||
| package sys | ||||
| 
 | ||||
| import ( | ||||
| 	"unsafe" | ||||
| ) | ||||
| 
 | ||||
| // Pointer wraps an unsafe.Pointer to be 64bit to | ||||
| // conform to the syscall specification. | ||||
| type Pointer struct { | ||||
| 	ptr unsafe.Pointer | ||||
| } | ||||
							
								
								
									
										126
									
								
								vendor/github.com/cilium/ebpf/internal/sys/syscall.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								vendor/github.com/cilium/ebpf/internal/sys/syscall.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,126 @@ | |||
| package sys | ||||
| 
 | ||||
| import ( | ||||
| 	"runtime" | ||||
| 	"syscall" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // BPF wraps SYS_BPF. | ||||
| // | ||||
| // Any pointers contained in attr must use the Pointer type from this package. | ||||
| func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { | ||||
| 	for { | ||||
| 		r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) | ||||
| 		runtime.KeepAlive(attr) | ||||
| 
 | ||||
| 		// As of ~4.20 the verifier can be interrupted by a signal, | ||||
| 		// and returns EAGAIN in that case. | ||||
| 		if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		var err error | ||||
| 		if errNo != 0 { | ||||
| 			err = wrappedErrno{errNo} | ||||
| 		} | ||||
| 
 | ||||
| 		return r1, err | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Info is implemented by all structs that can be passed to the ObjInfo syscall. | ||||
| // | ||||
| //    MapInfo | ||||
| //    ProgInfo | ||||
| //    LinkInfo | ||||
| //    BtfInfo | ||||
| type Info interface { | ||||
| 	info() (unsafe.Pointer, uint32) | ||||
| } | ||||
| 
 | ||||
| var _ Info = (*MapInfo)(nil) | ||||
| 
 | ||||
| func (i *MapInfo) info() (unsafe.Pointer, uint32) { | ||||
| 	return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) | ||||
| } | ||||
| 
 | ||||
| var _ Info = (*ProgInfo)(nil) | ||||
| 
 | ||||
| func (i *ProgInfo) info() (unsafe.Pointer, uint32) { | ||||
| 	return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) | ||||
| } | ||||
| 
 | ||||
| var _ Info = (*LinkInfo)(nil) | ||||
| 
 | ||||
| func (i *LinkInfo) info() (unsafe.Pointer, uint32) { | ||||
| 	return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) | ||||
| } | ||||
| 
 | ||||
| var _ Info = (*BtfInfo)(nil) | ||||
| 
 | ||||
| func (i *BtfInfo) info() (unsafe.Pointer, uint32) { | ||||
| 	return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) | ||||
| } | ||||
| 
 | ||||
| // ObjInfo retrieves information about a BPF Fd. | ||||
| // | ||||
| // info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. | ||||
| func ObjInfo(fd *FD, info Info) error { | ||||
| 	ptr, len := info.info() | ||||
| 	err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ | ||||
| 		BpfFd:   fd.Uint(), | ||||
| 		InfoLen: len, | ||||
| 		Info:    NewPointer(ptr), | ||||
| 	}) | ||||
| 	runtime.KeepAlive(fd) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| // BPFObjName is a null-terminated string made up of | ||||
| // 'A-Za-z0-9_' characters. | ||||
| type ObjName [unix.BPF_OBJ_NAME_LEN]byte | ||||
| 
 | ||||
| // NewObjName truncates the result if it is too long. | ||||
| func NewObjName(name string) ObjName { | ||||
| 	var result ObjName | ||||
| 	copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) | ||||
| 	return result | ||||
| } | ||||
| 
 | ||||
| // LinkID uniquely identifies a bpf_link. | ||||
| type LinkID uint32 | ||||
| 
 | ||||
| // BTFID uniquely identifies a BTF blob loaded into the kernel. | ||||
| type BTFID uint32 | ||||
| 
 | ||||
| // wrappedErrno wraps syscall.Errno to prevent direct comparisons with | ||||
| // syscall.E* or unix.E* constants. | ||||
| // | ||||
| // You should never export an error of this type. | ||||
| type wrappedErrno struct { | ||||
| 	syscall.Errno | ||||
| } | ||||
| 
 | ||||
| func (we wrappedErrno) Unwrap() error { | ||||
| 	return we.Errno | ||||
| } | ||||
| 
 | ||||
| type syscallError struct { | ||||
| 	error | ||||
| 	errno syscall.Errno | ||||
| } | ||||
| 
 | ||||
| func Error(err error, errno syscall.Errno) error { | ||||
| 	return &syscallError{err, errno} | ||||
| } | ||||
| 
 | ||||
| func (se *syscallError) Is(target error) bool { | ||||
| 	return target == se.error | ||||
| } | ||||
| 
 | ||||
| func (se *syscallError) Unwrap() error { | ||||
| 	return se.errno | ||||
| } | ||||
							
								
								
									
										1052
									
								
								vendor/github.com/cilium/ebpf/internal/sys/types.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1052
									
								
								vendor/github.com/cilium/ebpf/internal/sys/types.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										179
									
								
								vendor/github.com/cilium/ebpf/internal/syscall.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										179
									
								
								vendor/github.com/cilium/ebpf/internal/syscall.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,179 +0,0 @@ | |||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| //go:generate stringer -output syscall_string.go -type=BPFCmd | ||||
| 
 | ||||
| // BPFCmd identifies a subcommand of the bpf syscall. | ||||
| type BPFCmd int | ||||
| 
 | ||||
| // Well known BPF commands. | ||||
| const ( | ||||
| 	BPF_MAP_CREATE BPFCmd = iota | ||||
| 	BPF_MAP_LOOKUP_ELEM | ||||
| 	BPF_MAP_UPDATE_ELEM | ||||
| 	BPF_MAP_DELETE_ELEM | ||||
| 	BPF_MAP_GET_NEXT_KEY | ||||
| 	BPF_PROG_LOAD | ||||
| 	BPF_OBJ_PIN | ||||
| 	BPF_OBJ_GET | ||||
| 	BPF_PROG_ATTACH | ||||
| 	BPF_PROG_DETACH | ||||
| 	BPF_PROG_TEST_RUN | ||||
| 	BPF_PROG_GET_NEXT_ID | ||||
| 	BPF_MAP_GET_NEXT_ID | ||||
| 	BPF_PROG_GET_FD_BY_ID | ||||
| 	BPF_MAP_GET_FD_BY_ID | ||||
| 	BPF_OBJ_GET_INFO_BY_FD | ||||
| 	BPF_PROG_QUERY | ||||
| 	BPF_RAW_TRACEPOINT_OPEN | ||||
| 	BPF_BTF_LOAD | ||||
| 	BPF_BTF_GET_FD_BY_ID | ||||
| 	BPF_TASK_FD_QUERY | ||||
| 	BPF_MAP_LOOKUP_AND_DELETE_ELEM | ||||
| 	BPF_MAP_FREEZE | ||||
| 	BPF_BTF_GET_NEXT_ID | ||||
| 	BPF_MAP_LOOKUP_BATCH | ||||
| 	BPF_MAP_LOOKUP_AND_DELETE_BATCH | ||||
| 	BPF_MAP_UPDATE_BATCH | ||||
| 	BPF_MAP_DELETE_BATCH | ||||
| 	BPF_LINK_CREATE | ||||
| 	BPF_LINK_UPDATE | ||||
| 	BPF_LINK_GET_FD_BY_ID | ||||
| 	BPF_LINK_GET_NEXT_ID | ||||
| 	BPF_ENABLE_STATS | ||||
| 	BPF_ITER_CREATE | ||||
| ) | ||||
| 
 | ||||
| // BPF wraps SYS_BPF. | ||||
| // | ||||
| // Any pointers contained in attr must use the Pointer type from this package. | ||||
| func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { | ||||
| 	r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) | ||||
| 	runtime.KeepAlive(attr) | ||||
| 
 | ||||
| 	var err error | ||||
| 	if errNo != 0 { | ||||
| 		err = errNo | ||||
| 	} | ||||
| 
 | ||||
| 	return r1, err | ||||
| } | ||||
| 
 | ||||
| type BPFProgAttachAttr struct { | ||||
| 	TargetFd     uint32 | ||||
| 	AttachBpfFd  uint32 | ||||
| 	AttachType   uint32 | ||||
| 	AttachFlags  uint32 | ||||
| 	ReplaceBpfFd uint32 | ||||
| } | ||||
| 
 | ||||
| func BPFProgAttach(attr *BPFProgAttachAttr) error { | ||||
| 	_, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| type BPFProgDetachAttr struct { | ||||
| 	TargetFd    uint32 | ||||
| 	AttachBpfFd uint32 | ||||
| 	AttachType  uint32 | ||||
| } | ||||
| 
 | ||||
| func BPFProgDetach(attr *BPFProgDetachAttr) error { | ||||
| 	_, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| type BPFEnableStatsAttr struct { | ||||
| 	StatsType uint32 | ||||
| } | ||||
| 
 | ||||
| func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) { | ||||
| 	ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("enable stats: %w", err) | ||||
| 	} | ||||
| 	return NewFD(uint32(ptr)), nil | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| type bpfObjAttr struct { | ||||
| 	fileName  Pointer | ||||
| 	fd        uint32 | ||||
| 	fileFlags uint32 | ||||
| } | ||||
| 
 | ||||
| const bpfFSType = 0xcafe4a11 | ||||
| 
 | ||||
| // BPFObjPin wraps BPF_OBJ_PIN. | ||||
| func BPFObjPin(fileName string, fd *FD) error { | ||||
| 	dirName := filepath.Dir(fileName) | ||||
| 	var statfs unix.Statfs_t | ||||
| 	if err := unix.Statfs(dirName, &statfs); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if uint64(statfs.Type) != bpfFSType { | ||||
| 		return fmt.Errorf("%s is not on a bpf filesystem", fileName) | ||||
| 	} | ||||
| 
 | ||||
| 	value, err := fd.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfObjAttr{ | ||||
| 		fileName: NewStringPointer(fileName), | ||||
| 		fd:       value, | ||||
| 	} | ||||
| 	_, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("pin object %s: %w", fileName, err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // BPFObjGet wraps BPF_OBJ_GET. | ||||
| func BPFObjGet(fileName string) (*FD, error) { | ||||
| 	attr := bpfObjAttr{ | ||||
| 		fileName: NewStringPointer(fileName), | ||||
| 	} | ||||
| 	ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("get object %s: %w", fileName, err) | ||||
| 	} | ||||
| 	return NewFD(uint32(ptr)), nil | ||||
| } | ||||
| 
 | ||||
| type bpfObjGetInfoByFDAttr struct { | ||||
| 	fd      uint32 | ||||
| 	infoLen uint32 | ||||
| 	info    Pointer | ||||
| } | ||||
| 
 | ||||
| // BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD. | ||||
| // | ||||
| // Available from 4.13. | ||||
| func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error { | ||||
| 	value, err := fd.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfObjGetInfoByFDAttr{ | ||||
| 		fd:      value, | ||||
| 		infoLen: uint32(size), | ||||
| 		info:    NewPointer(info), | ||||
| 	} | ||||
| 	_, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("fd %v: %w", fd, err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										56
									
								
								vendor/github.com/cilium/ebpf/internal/syscall_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										56
									
								
								vendor/github.com/cilium/ebpf/internal/syscall_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,56 +0,0 @@ | |||
| // Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT. | ||||
| 
 | ||||
| package internal | ||||
| 
 | ||||
| import "strconv" | ||||
| 
 | ||||
| func _() { | ||||
| 	// An "invalid array index" compiler error signifies that the constant values have changed. | ||||
| 	// Re-run the stringer command to generate them again. | ||||
| 	var x [1]struct{} | ||||
| 	_ = x[BPF_MAP_CREATE-0] | ||||
| 	_ = x[BPF_MAP_LOOKUP_ELEM-1] | ||||
| 	_ = x[BPF_MAP_UPDATE_ELEM-2] | ||||
| 	_ = x[BPF_MAP_DELETE_ELEM-3] | ||||
| 	_ = x[BPF_MAP_GET_NEXT_KEY-4] | ||||
| 	_ = x[BPF_PROG_LOAD-5] | ||||
| 	_ = x[BPF_OBJ_PIN-6] | ||||
| 	_ = x[BPF_OBJ_GET-7] | ||||
| 	_ = x[BPF_PROG_ATTACH-8] | ||||
| 	_ = x[BPF_PROG_DETACH-9] | ||||
| 	_ = x[BPF_PROG_TEST_RUN-10] | ||||
| 	_ = x[BPF_PROG_GET_NEXT_ID-11] | ||||
| 	_ = x[BPF_MAP_GET_NEXT_ID-12] | ||||
| 	_ = x[BPF_PROG_GET_FD_BY_ID-13] | ||||
| 	_ = x[BPF_MAP_GET_FD_BY_ID-14] | ||||
| 	_ = x[BPF_OBJ_GET_INFO_BY_FD-15] | ||||
| 	_ = x[BPF_PROG_QUERY-16] | ||||
| 	_ = x[BPF_RAW_TRACEPOINT_OPEN-17] | ||||
| 	_ = x[BPF_BTF_LOAD-18] | ||||
| 	_ = x[BPF_BTF_GET_FD_BY_ID-19] | ||||
| 	_ = x[BPF_TASK_FD_QUERY-20] | ||||
| 	_ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21] | ||||
| 	_ = x[BPF_MAP_FREEZE-22] | ||||
| 	_ = x[BPF_BTF_GET_NEXT_ID-23] | ||||
| 	_ = x[BPF_MAP_LOOKUP_BATCH-24] | ||||
| 	_ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25] | ||||
| 	_ = x[BPF_MAP_UPDATE_BATCH-26] | ||||
| 	_ = x[BPF_MAP_DELETE_BATCH-27] | ||||
| 	_ = x[BPF_LINK_CREATE-28] | ||||
| 	_ = x[BPF_LINK_UPDATE-29] | ||||
| 	_ = x[BPF_LINK_GET_FD_BY_ID-30] | ||||
| 	_ = x[BPF_LINK_GET_NEXT_ID-31] | ||||
| 	_ = x[BPF_ENABLE_STATS-32] | ||||
| 	_ = x[BPF_ITER_CREATE-33] | ||||
| } | ||||
| 
 | ||||
| const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE" | ||||
| 
 | ||||
| var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617} | ||||
| 
 | ||||
| func (i BPFCmd) String() string { | ||||
| 	if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) { | ||||
| 		return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")" | ||||
| 	} | ||||
| 	return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]] | ||||
| } | ||||
							
								
								
									
										74
									
								
								vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,9 +1,9 @@ | |||
| //go:build linux | ||||
| // +build linux | ||||
| 
 | ||||
| package unix | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"syscall" | ||||
| 
 | ||||
| 	linux "golang.org/x/sys/unix" | ||||
|  | @ -20,16 +20,27 @@ const ( | |||
| 	EPERM   = linux.EPERM | ||||
| 	ESRCH   = linux.ESRCH | ||||
| 	ENODEV  = linux.ENODEV | ||||
| 	EBADF   = linux.EBADF | ||||
| 	E2BIG   = linux.E2BIG | ||||
| 	EFAULT  = linux.EFAULT | ||||
| 	EACCES  = linux.EACCES | ||||
| 	// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP | ||||
| 	ENOTSUPP = syscall.Errno(0x20c) | ||||
| 
 | ||||
| 	EBADF                    = linux.EBADF | ||||
| 	BPF_F_NO_PREALLOC        = linux.BPF_F_NO_PREALLOC | ||||
| 	BPF_F_NUMA_NODE          = linux.BPF_F_NUMA_NODE | ||||
| 	BPF_F_RDONLY             = linux.BPF_F_RDONLY | ||||
| 	BPF_F_WRONLY             = linux.BPF_F_WRONLY | ||||
| 	BPF_F_RDONLY_PROG        = linux.BPF_F_RDONLY_PROG | ||||
| 	BPF_F_WRONLY_PROG        = linux.BPF_F_WRONLY_PROG | ||||
| 	BPF_F_SLEEPABLE          = linux.BPF_F_SLEEPABLE | ||||
| 	BPF_F_MMAPABLE           = linux.BPF_F_MMAPABLE | ||||
| 	BPF_F_INNER_MAP          = linux.BPF_F_INNER_MAP | ||||
| 	BPF_OBJ_NAME_LEN         = linux.BPF_OBJ_NAME_LEN | ||||
| 	BPF_TAG_SIZE             = linux.BPF_TAG_SIZE | ||||
| 	BPF_RINGBUF_BUSY_BIT     = linux.BPF_RINGBUF_BUSY_BIT | ||||
| 	BPF_RINGBUF_DISCARD_BIT  = linux.BPF_RINGBUF_DISCARD_BIT | ||||
| 	BPF_RINGBUF_HDR_SZ       = linux.BPF_RINGBUF_HDR_SZ | ||||
| 	SYS_BPF                  = linux.SYS_BPF | ||||
| 	F_DUPFD_CLOEXEC          = linux.F_DUPFD_CLOEXEC | ||||
| 	EPOLL_CTL_ADD            = linux.EPOLL_CTL_ADD | ||||
|  | @ -39,27 +50,36 @@ const ( | |||
| 	PROT_READ                = linux.PROT_READ | ||||
| 	PROT_WRITE               = linux.PROT_WRITE | ||||
| 	MAP_SHARED               = linux.MAP_SHARED | ||||
| 	PERF_ATTR_SIZE_VER1      = linux.PERF_ATTR_SIZE_VER1 | ||||
| 	PERF_TYPE_SOFTWARE       = linux.PERF_TYPE_SOFTWARE | ||||
| 	PERF_TYPE_TRACEPOINT     = linux.PERF_TYPE_TRACEPOINT | ||||
| 	PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT | ||||
| 	PERF_EVENT_IOC_DISABLE   = linux.PERF_EVENT_IOC_DISABLE | ||||
| 	PERF_EVENT_IOC_ENABLE    = linux.PERF_EVENT_IOC_ENABLE | ||||
| 	PERF_EVENT_IOC_SET_BPF   = linux.PERF_EVENT_IOC_SET_BPF | ||||
| 	PerfBitWatermark         = linux.PerfBitWatermark | ||||
| 	PERF_SAMPLE_RAW          = linux.PERF_SAMPLE_RAW | ||||
| 	PERF_FLAG_FD_CLOEXEC     = linux.PERF_FLAG_FD_CLOEXEC | ||||
| 	RLIM_INFINITY            = linux.RLIM_INFINITY | ||||
| 	RLIMIT_MEMLOCK           = linux.RLIMIT_MEMLOCK | ||||
| 	BPF_STATS_RUN_TIME       = linux.BPF_STATS_RUN_TIME | ||||
| 	PERF_RECORD_LOST         = linux.PERF_RECORD_LOST | ||||
| 	PERF_RECORD_SAMPLE       = linux.PERF_RECORD_SAMPLE | ||||
| 	AT_FDCWD                 = linux.AT_FDCWD | ||||
| 	RENAME_NOREPLACE         = linux.RENAME_NOREPLACE | ||||
| 	SO_ATTACH_BPF            = linux.SO_ATTACH_BPF | ||||
| 	SO_DETACH_BPF            = linux.SO_DETACH_BPF | ||||
| 	SOL_SOCKET               = linux.SOL_SOCKET | ||||
| ) | ||||
| 
 | ||||
| // Statfs_t is a wrapper | ||||
| type Statfs_t = linux.Statfs_t | ||||
| 
 | ||||
| type Stat_t = linux.Stat_t | ||||
| 
 | ||||
| // Rlimit is a wrapper | ||||
| type Rlimit = linux.Rlimit | ||||
| 
 | ||||
| // Setrlimit is a wrapper | ||||
| func Setrlimit(resource int, rlim *Rlimit) (err error) { | ||||
| 	return linux.Setrlimit(resource, rlim) | ||||
| } | ||||
| 
 | ||||
| // Syscall is a wrapper | ||||
| func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { | ||||
| 	return linux.Syscall(trap, a1, a2, a3) | ||||
|  | @ -70,6 +90,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) { | |||
| 	return linux.FcntlInt(fd, cmd, arg) | ||||
| } | ||||
| 
 | ||||
| // IoctlSetInt is a wrapper | ||||
| func IoctlSetInt(fd int, req uint, value int) error { | ||||
| 	return linux.IoctlSetInt(fd, req, value) | ||||
| } | ||||
| 
 | ||||
| // Statfs is a wrapper | ||||
| func Statfs(path string, buf *Statfs_t) (err error) { | ||||
| 	return linux.Statfs(path, buf) | ||||
|  | @ -157,14 +182,29 @@ func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { | |||
| 	return linux.Tgkill(tgid, tid, sig) | ||||
| } | ||||
| 
 | ||||
| func KernelRelease() (string, error) { | ||||
| 	var uname Utsname | ||||
| 	err := Uname(&uname) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 
 | ||||
| 	end := bytes.IndexByte(uname.Release[:], 0) | ||||
| 	release := string(uname.Release[:end]) | ||||
| 	return release, nil | ||||
| // BytePtrFromString is a wrapper | ||||
| func BytePtrFromString(s string) (*byte, error) { | ||||
| 	return linux.BytePtrFromString(s) | ||||
| } | ||||
| 
 | ||||
| // ByteSliceToString is a wrapper | ||||
| func ByteSliceToString(s []byte) string { | ||||
| 	return linux.ByteSliceToString(s) | ||||
| } | ||||
| 
 | ||||
| // Renameat2 is a wrapper | ||||
| func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { | ||||
| 	return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) | ||||
| } | ||||
| 
 | ||||
| func Prlimit(pid, resource int, new, old *Rlimit) error { | ||||
| 	return linux.Prlimit(pid, resource, new, old) | ||||
| } | ||||
| 
 | ||||
| func Open(path string, mode int, perm uint32) (int, error) { | ||||
| 	return linux.Open(path, mode, perm) | ||||
| } | ||||
| 
 | ||||
| func Fstat(fd int, stat *Stat_t) error { | ||||
| 	return linux.Fstat(fd, stat) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										64
									
								
								vendor/github.com/cilium/ebpf/internal/unix/types_other.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										64
									
								
								vendor/github.com/cilium/ebpf/internal/unix/types_other.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,3 +1,4 @@ | |||
| //go:build !linux | ||||
| // +build !linux | ||||
| 
 | ||||
| package unix | ||||
|  | @ -21,15 +22,26 @@ const ( | |||
| 	ESRCH  = syscall.ESRCH | ||||
| 	ENODEV = syscall.ENODEV | ||||
| 	EBADF  = syscall.Errno(0) | ||||
| 	E2BIG  = syscall.Errno(0) | ||||
| 	EFAULT = syscall.EFAULT | ||||
| 	EACCES = syscall.Errno(0) | ||||
| 	// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP | ||||
| 	ENOTSUPP = syscall.Errno(0x20c) | ||||
| 
 | ||||
| 	BPF_F_NO_PREALLOC        = 0 | ||||
| 	BPF_F_NUMA_NODE          = 0 | ||||
| 	BPF_F_RDONLY             = 0 | ||||
| 	BPF_F_WRONLY             = 0 | ||||
| 	BPF_F_RDONLY_PROG        = 0 | ||||
| 	BPF_F_WRONLY_PROG        = 0 | ||||
| 	BPF_F_SLEEPABLE          = 0 | ||||
| 	BPF_F_MMAPABLE           = 0 | ||||
| 	BPF_F_INNER_MAP          = 0 | ||||
| 	BPF_OBJ_NAME_LEN         = 0x10 | ||||
| 	BPF_TAG_SIZE             = 0x8 | ||||
| 	BPF_RINGBUF_BUSY_BIT     = 0 | ||||
| 	BPF_RINGBUF_DISCARD_BIT  = 0 | ||||
| 	BPF_RINGBUF_HDR_SZ       = 0 | ||||
| 	SYS_BPF                  = 321 | ||||
| 	F_DUPFD_CLOEXEC          = 0x406 | ||||
| 	EPOLLIN                  = 0x1 | ||||
|  | @ -40,14 +52,26 @@ const ( | |||
| 	PROT_READ                = 0x1 | ||||
| 	PROT_WRITE               = 0x2 | ||||
| 	MAP_SHARED               = 0x1 | ||||
| 	PERF_ATTR_SIZE_VER1      = 0 | ||||
| 	PERF_TYPE_SOFTWARE       = 0x1 | ||||
| 	PERF_TYPE_TRACEPOINT     = 0 | ||||
| 	PERF_COUNT_SW_BPF_OUTPUT = 0xa | ||||
| 	PERF_EVENT_IOC_DISABLE   = 0 | ||||
| 	PERF_EVENT_IOC_ENABLE    = 0 | ||||
| 	PERF_EVENT_IOC_SET_BPF   = 0 | ||||
| 	PerfBitWatermark         = 0x4000 | ||||
| 	PERF_SAMPLE_RAW          = 0x400 | ||||
| 	PERF_FLAG_FD_CLOEXEC     = 0x8 | ||||
| 	RLIM_INFINITY            = 0x7fffffffffffffff | ||||
| 	RLIMIT_MEMLOCK           = 8 | ||||
| 	BPF_STATS_RUN_TIME       = 0 | ||||
| 	PERF_RECORD_LOST         = 2 | ||||
| 	PERF_RECORD_SAMPLE       = 9 | ||||
| 	AT_FDCWD                 = -0x2 | ||||
| 	RENAME_NOREPLACE         = 0x1 | ||||
| 	SO_ATTACH_BPF            = 0x32 | ||||
| 	SO_DETACH_BPF            = 0x1b | ||||
| 	SOL_SOCKET               = 0x1 | ||||
| ) | ||||
| 
 | ||||
| // Statfs_t is a wrapper | ||||
|  | @ -66,17 +90,14 @@ type Statfs_t struct { | |||
| 	Spare   [4]int64 | ||||
| } | ||||
| 
 | ||||
| type Stat_t struct{} | ||||
| 
 | ||||
| // Rlimit is a wrapper | ||||
| type Rlimit struct { | ||||
| 	Cur uint64 | ||||
| 	Max uint64 | ||||
| } | ||||
| 
 | ||||
| // Setrlimit is a wrapper | ||||
| func Setrlimit(resource int, rlim *Rlimit) (err error) { | ||||
| 	return errNonLinux | ||||
| } | ||||
| 
 | ||||
| // Syscall is a wrapper | ||||
| func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { | ||||
| 	return 0, 0, syscall.Errno(1) | ||||
|  | @ -87,6 +108,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) { | |||
| 	return -1, errNonLinux | ||||
| } | ||||
| 
 | ||||
| // IoctlSetInt is a wrapper | ||||
| func IoctlSetInt(fd int, req uint, value int) error { | ||||
| 	return errNonLinux | ||||
| } | ||||
| 
 | ||||
| // Statfs is a wrapper | ||||
| func Statfs(path string, buf *Statfs_t) error { | ||||
| 	return errNonLinux | ||||
|  | @ -201,6 +227,7 @@ func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int | |||
| // Utsname is a wrapper | ||||
| type Utsname struct { | ||||
| 	Release [65]byte | ||||
| 	Version [65]byte | ||||
| } | ||||
| 
 | ||||
| // Uname is a wrapper | ||||
|  | @ -223,6 +250,29 @@ func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { | |||
| 	return errNonLinux | ||||
| } | ||||
| 
 | ||||
| func KernelRelease() (string, error) { | ||||
| 	return "", errNonLinux | ||||
| // BytePtrFromString is a wrapper | ||||
| func BytePtrFromString(s string) (*byte, error) { | ||||
| 	return nil, errNonLinux | ||||
| } | ||||
| 
 | ||||
| // ByteSliceToString is a wrapper | ||||
| func ByteSliceToString(s []byte) string { | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| // Renameat2 is a wrapper | ||||
| func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { | ||||
| 	return errNonLinux | ||||
| } | ||||
| 
 | ||||
| func Prlimit(pid, resource int, new, old *Rlimit) error { | ||||
| 	return errNonLinux | ||||
| } | ||||
| 
 | ||||
| func Open(path string, mode int, perm uint32) (int, error) { | ||||
| 	return -1, errNonLinux | ||||
| } | ||||
| 
 | ||||
| func Fstat(fd int, stat *Stat_t) error { | ||||
| 	return errNonLinux | ||||
| } | ||||
|  |  | |||
							
								
								
									
										150
									
								
								vendor/github.com/cilium/ebpf/internal/vdso.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								vendor/github.com/cilium/ebpf/internal/vdso.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,150 @@ | |||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"debug/elf" | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"os" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	errAuxvNoVDSO = errors.New("no vdso address found in auxv") | ||||
| ) | ||||
| 
 | ||||
| // vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library | ||||
| // linked into the current process image. | ||||
| func vdsoVersion() (uint32, error) { | ||||
| 	// Read data from the auxiliary vector, which is normally passed directly | ||||
| 	// to the process. Go does not expose that data, so we must read it from procfs. | ||||
| 	// https://man7.org/linux/man-pages/man3/getauxval.3.html | ||||
| 	av, err := os.Open("/proc/self/auxv") | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("opening auxv: %w", err) | ||||
| 	} | ||||
| 	defer av.Close() | ||||
| 
 | ||||
| 	vdsoAddr, err := vdsoMemoryAddress(av) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("finding vDSO memory address: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Use /proc/self/mem rather than unsafe.Pointer tricks. | ||||
| 	mem, err := os.Open("/proc/self/mem") | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("opening mem: %w", err) | ||||
| 	} | ||||
| 	defer mem.Close() | ||||
| 
 | ||||
| 	// Open ELF at provided memory address, as offset into /proc/self/mem. | ||||
| 	c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("reading linux version code: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return c, nil | ||||
| } | ||||
| 
 | ||||
| // vdsoMemoryAddress returns the memory address of the vDSO library | ||||
| // linked into the current process image. r is an io.Reader into an auxv blob. | ||||
| func vdsoMemoryAddress(r io.Reader) (uint64, error) { | ||||
| 	const ( | ||||
| 		_AT_NULL         = 0  // End of vector | ||||
| 		_AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image | ||||
| 	) | ||||
| 
 | ||||
| 	// Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, | ||||
| 	// the address of a page containing the virtual Dynamic Shared Object (vDSO). | ||||
| 	aux := struct{ Tag, Val uint64 }{} | ||||
| 	for { | ||||
| 		if err := binary.Read(r, NativeEndian, &aux); err != nil { | ||||
| 			return 0, fmt.Errorf("reading auxv entry: %w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		switch aux.Tag { | ||||
| 		case _AT_SYSINFO_EHDR: | ||||
| 			if aux.Val != 0 { | ||||
| 				return aux.Val, nil | ||||
| 			} | ||||
| 			return 0, fmt.Errorf("invalid vDSO address in auxv") | ||||
| 		// _AT_NULL is always the last tag/val pair in the aux vector | ||||
| 		// and can be treated like EOF. | ||||
| 		case _AT_NULL: | ||||
| 			return 0, errAuxvNoVDSO | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' | ||||
| type elfNoteHeader struct { | ||||
| 	NameSize int32 | ||||
| 	DescSize int32 | ||||
| 	Type     int32 | ||||
| } | ||||
| 
 | ||||
| // vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in | ||||
| // the ELF notes section of the binary provided by the reader. | ||||
| func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { | ||||
| 	hdr, err := NewSafeELFFile(r) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("reading vDSO ELF: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	sections := hdr.SectionsByType(elf.SHT_NOTE) | ||||
| 	if len(sections) == 0 { | ||||
| 		return 0, fmt.Errorf("no note section found in vDSO ELF") | ||||
| 	} | ||||
| 
 | ||||
| 	for _, sec := range sections { | ||||
| 		sr := sec.Open() | ||||
| 		var n elfNoteHeader | ||||
| 
 | ||||
| 		// Read notes until we find one named 'Linux'. | ||||
| 		for { | ||||
| 			if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { | ||||
| 				if errors.Is(err, io.EOF) { | ||||
| 					// We looked at all the notes in this section | ||||
| 					break | ||||
| 				} | ||||
| 				return 0, fmt.Errorf("reading note header: %w", err) | ||||
| 			} | ||||
| 
 | ||||
| 			// If a note name is defined, it follows the note header. | ||||
| 			var name string | ||||
| 			if n.NameSize > 0 { | ||||
| 				// Read the note name, aligned to 4 bytes. | ||||
| 				buf := make([]byte, Align(int(n.NameSize), 4)) | ||||
| 				if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { | ||||
| 					return 0, fmt.Errorf("reading note name: %w", err) | ||||
| 				} | ||||
| 
 | ||||
| 				// Read nul-terminated string. | ||||
| 				name = unix.ByteSliceToString(buf[:n.NameSize]) | ||||
| 			} | ||||
| 
 | ||||
| 			// If a note descriptor is defined, it follows the name. | ||||
| 			// It is possible for a note to have a descriptor but not a name. | ||||
| 			if n.DescSize > 0 { | ||||
| 				// LINUX_VERSION_CODE is a uint32 value. | ||||
| 				if name == "Linux" && n.DescSize == 4 && n.Type == 0 { | ||||
| 					var version uint32 | ||||
| 					if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { | ||||
| 						return 0, fmt.Errorf("reading note descriptor: %w", err) | ||||
| 					} | ||||
| 					return version, nil | ||||
| 				} | ||||
| 
 | ||||
| 				// Discard the note descriptor if it exists but we're not interested in it. | ||||
| 				if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil { | ||||
| 					return 0, err | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return 0, fmt.Errorf("no Linux note in ELF") | ||||
| } | ||||
							
								
								
									
										122
									
								
								vendor/github.com/cilium/ebpf/internal/version.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								vendor/github.com/cilium/ebpf/internal/version.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,122 @@ | |||
| package internal | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	// Version constant used in ELF binaries indicating that the loader needs to | ||||
| 	// substitute the eBPF program's version with the value of the kernel's | ||||
| 	// KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf | ||||
| 	// and RedSift. | ||||
| 	MagicKernelVersion = 0xFFFFFFFE | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	kernelVersion = struct { | ||||
| 		once    sync.Once | ||||
| 		version Version | ||||
| 		err     error | ||||
| 	}{} | ||||
| ) | ||||
| 
 | ||||
| // A Version in the form Major.Minor.Patch. | ||||
| type Version [3]uint16 | ||||
| 
 | ||||
| // NewVersion creates a version from a string like "Major.Minor.Patch". | ||||
| // | ||||
| // Patch is optional. | ||||
| func NewVersion(ver string) (Version, error) { | ||||
| 	var major, minor, patch uint16 | ||||
| 	n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) | ||||
| 	if n < 2 { | ||||
| 		return Version{}, fmt.Errorf("invalid version: %s", ver) | ||||
| 	} | ||||
| 	return Version{major, minor, patch}, nil | ||||
| } | ||||
| 
 | ||||
| // NewVersionFromCode creates a version from a LINUX_VERSION_CODE. | ||||
| func NewVersionFromCode(code uint32) Version { | ||||
| 	return Version{ | ||||
| 		uint16(uint8(code >> 16)), | ||||
| 		uint16(uint8(code >> 8)), | ||||
| 		uint16(uint8(code)), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (v Version) String() string { | ||||
| 	if v[2] == 0 { | ||||
| 		return fmt.Sprintf("v%d.%d", v[0], v[1]) | ||||
| 	} | ||||
| 	return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) | ||||
| } | ||||
| 
 | ||||
| // Less returns true if the version is less than another version. | ||||
| func (v Version) Less(other Version) bool { | ||||
| 	for i, a := range v { | ||||
| 		if a == other[i] { | ||||
| 			continue | ||||
| 		} | ||||
| 		return a < other[i] | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // Unspecified returns true if the version is all zero. | ||||
| func (v Version) Unspecified() bool { | ||||
| 	return v[0] == 0 && v[1] == 0 && v[2] == 0 | ||||
| } | ||||
| 
 | ||||
| // Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h. | ||||
| // It represents the kernel version and patch level as a single value. | ||||
| func (v Version) Kernel() uint32 { | ||||
| 
 | ||||
| 	// Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid | ||||
| 	// overflowing into PATCHLEVEL. | ||||
| 	// See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). | ||||
| 	s := v[2] | ||||
| 	if s > 255 { | ||||
| 		s = 255 | ||||
| 	} | ||||
| 
 | ||||
| 	// Truncate members to uint8 to prevent them from spilling over into | ||||
| 	// each other when overflowing 8 bits. | ||||
| 	return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) | ||||
| } | ||||
| 
 | ||||
| // KernelVersion returns the version of the currently running kernel. | ||||
| func KernelVersion() (Version, error) { | ||||
| 	kernelVersion.once.Do(func() { | ||||
| 		kernelVersion.version, kernelVersion.err = detectKernelVersion() | ||||
| 	}) | ||||
| 
 | ||||
| 	if kernelVersion.err != nil { | ||||
| 		return Version{}, kernelVersion.err | ||||
| 	} | ||||
| 	return kernelVersion.version, nil | ||||
| } | ||||
| 
 | ||||
| // detectKernelVersion returns the version of the running kernel. | ||||
| func detectKernelVersion() (Version, error) { | ||||
| 	vc, err := vdsoVersion() | ||||
| 	if err != nil { | ||||
| 		return Version{}, err | ||||
| 	} | ||||
| 	return NewVersionFromCode(vc), nil | ||||
| } | ||||
| 
 | ||||
| // KernelRelease returns the release string of the running kernel. | ||||
| // Its format depends on the Linux distribution and corresponds to directory | ||||
| // names in /lib/modules by convention. Some examples are 5.15.17-1-lts and | ||||
| // 4.19.0-16-amd64. | ||||
| func KernelRelease() (string, error) { | ||||
| 	var uname unix.Utsname | ||||
| 	if err := unix.Uname(&uname); err != nil { | ||||
| 		return "", fmt.Errorf("uname failed: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return unix.ByteSliceToString(uname.Release[:]), nil | ||||
| } | ||||
							
								
								
									
										24
									
								
								vendor/github.com/cilium/ebpf/link/cgroup.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/cilium/ebpf/link/cgroup.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -56,16 +56,6 @@ func AttachCgroup(opts CgroupOptions) (Link, error) { | |||
| 	return cg, nil | ||||
| } | ||||
| 
 | ||||
| // LoadPinnedCgroup loads a pinned cgroup from a bpffs. | ||||
| func LoadPinnedCgroup(fileName string) (Link, error) { | ||||
| 	link, err := LoadPinnedRawLink(fileName) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &linkCgroup{link}, nil | ||||
| } | ||||
| 
 | ||||
| type progAttachCgroup struct { | ||||
| 	cgroup     *os.File | ||||
| 	current    *ebpf.Program | ||||
|  | @ -147,14 +137,20 @@ func (cg *progAttachCgroup) Pin(string) error { | |||
| 	return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (cg *progAttachCgroup) Unpin() error { | ||||
| 	return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (cg *progAttachCgroup) Info() (*Info, error) { | ||||
| 	return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| type linkCgroup struct { | ||||
| 	*RawLink | ||||
| 	RawLink | ||||
| } | ||||
| 
 | ||||
| var _ Link = (*linkCgroup)(nil) | ||||
| 
 | ||||
| func (cg *linkCgroup) isLink() {} | ||||
| 
 | ||||
| func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) { | ||||
| 	link, err := AttachRawLink(RawLinkOptions{ | ||||
| 		Target:  int(cgroup.Fd()), | ||||
|  | @ -165,5 +161,5 @@ func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &linkCgroup{link}, err | ||||
| 	return &linkCgroup{*link}, err | ||||
| } | ||||
|  |  | |||
							
								
								
									
										90
									
								
								vendor/github.com/cilium/ebpf/link/iter.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										90
									
								
								vendor/github.com/cilium/ebpf/link/iter.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -3,8 +3,10 @@ package link | |||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| ) | ||||
| 
 | ||||
| type IterOptions struct { | ||||
|  | @ -15,77 +17,69 @@ type IterOptions struct { | |||
| 	// AttachTo requires the kernel to include BTF of itself, | ||||
| 	// and it to be compiled with a recent pahole (>= 1.16). | ||||
| 	Program *ebpf.Program | ||||
| 
 | ||||
| 	// Map specifies the target map for bpf_map_elem and sockmap iterators. | ||||
| 	// It may be nil. | ||||
| 	Map *ebpf.Map | ||||
| } | ||||
| 
 | ||||
| // AttachIter attaches a BPF seq_file iterator. | ||||
| func AttachIter(opts IterOptions) (*Iter, error) { | ||||
| 	link, err := AttachRawLink(RawLinkOptions{ | ||||
| 		Program: opts.Program, | ||||
| 		Attach:  ebpf.AttachTraceIter, | ||||
| 	}) | ||||
| 	if err := haveBPFLink(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	progFd := opts.Program.FD() | ||||
| 	if progFd < 0 { | ||||
| 		return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) | ||||
| 	} | ||||
| 
 | ||||
| 	var info bpfIterLinkInfoMap | ||||
| 	if opts.Map != nil { | ||||
| 		mapFd := opts.Map.FD() | ||||
| 		if mapFd < 0 { | ||||
| 			return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd) | ||||
| 		} | ||||
| 		info.map_fd = uint32(mapFd) | ||||
| 	} | ||||
| 
 | ||||
| 	attr := sys.LinkCreateIterAttr{ | ||||
| 		ProgFd:      uint32(progFd), | ||||
| 		AttachType:  sys.AttachType(ebpf.AttachTraceIter), | ||||
| 		IterInfo:    sys.NewPointer(unsafe.Pointer(&info)), | ||||
| 		IterInfoLen: uint32(unsafe.Sizeof(info)), | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := sys.LinkCreateIter(&attr) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't link iterator: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &Iter{link}, err | ||||
| } | ||||
| 
 | ||||
| // LoadPinnedIter loads a pinned iterator from a bpffs. | ||||
| func LoadPinnedIter(fileName string) (*Iter, error) { | ||||
| 	link, err := LoadPinnedRawLink(fileName) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &Iter{link}, err | ||||
| 	return &Iter{RawLink{fd, ""}}, err | ||||
| } | ||||
| 
 | ||||
| // Iter represents an attached bpf_iter. | ||||
| type Iter struct { | ||||
| 	link *RawLink | ||||
| } | ||||
| 
 | ||||
| var _ Link = (*Iter)(nil) | ||||
| 
 | ||||
| func (it *Iter) isLink() {} | ||||
| 
 | ||||
| // FD returns the underlying file descriptor. | ||||
| func (it *Iter) FD() int { | ||||
| 	return it.link.FD() | ||||
| } | ||||
| 
 | ||||
| // Close implements Link. | ||||
| func (it *Iter) Close() error { | ||||
| 	return it.link.Close() | ||||
| } | ||||
| 
 | ||||
| // Pin implements Link. | ||||
| func (it *Iter) Pin(fileName string) error { | ||||
| 	return it.link.Pin(fileName) | ||||
| } | ||||
| 
 | ||||
| // Update implements Link. | ||||
| func (it *Iter) Update(new *ebpf.Program) error { | ||||
| 	return it.link.Update(new) | ||||
| 	RawLink | ||||
| } | ||||
| 
 | ||||
| // Open creates a new instance of the iterator. | ||||
| // | ||||
| // Reading from the returned reader triggers the BPF program. | ||||
| func (it *Iter) Open() (io.ReadCloser, error) { | ||||
| 	linkFd, err := it.link.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	attr := &sys.IterCreateAttr{ | ||||
| 		LinkFd: it.fd.Uint(), | ||||
| 	} | ||||
| 
 | ||||
| 	attr := &bpfIterCreateAttr{ | ||||
| 		linkFd: linkFd, | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := bpfIterCreate(attr) | ||||
| 	fd, err := sys.IterCreate(attr) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't create iterator: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return fd.File("bpf_iter"), nil | ||||
| } | ||||
| 
 | ||||
| // union bpf_iter_link_info.map | ||||
| type bpfIterLinkInfoMap struct { | ||||
| 	map_fd uint32 | ||||
| } | ||||
|  |  | |||
							
								
								
									
										568
									
								
								vendor/github.com/cilium/ebpf/link/kprobe.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										568
									
								
								vendor/github.com/cilium/ebpf/link/kprobe.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,568 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"crypto/rand" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"syscall" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events") | ||||
| 
 | ||||
| 	kprobeRetprobeBit = struct { | ||||
| 		once  sync.Once | ||||
| 		value uint64 | ||||
| 		err   error | ||||
| 	}{} | ||||
| ) | ||||
| 
 | ||||
| type probeType uint8 | ||||
| 
 | ||||
| type probeArgs struct { | ||||
| 	symbol, group, path          string | ||||
| 	offset, refCtrOffset, cookie uint64 | ||||
| 	pid                          int | ||||
| 	ret                          bool | ||||
| } | ||||
| 
 | ||||
| // KprobeOptions defines additional parameters that will be used | ||||
| // when loading Kprobes. | ||||
| type KprobeOptions struct { | ||||
| 	// Arbitrary value that can be fetched from an eBPF program | ||||
| 	// via `bpf_get_attach_cookie()`. | ||||
| 	// | ||||
| 	// Needs kernel 5.15+. | ||||
| 	Cookie uint64 | ||||
| 	// Offset of the kprobe relative to the traced symbol. | ||||
| 	// Can be used to insert kprobes at arbitrary offsets in kernel functions, | ||||
| 	// e.g. in places where functions have been inlined. | ||||
| 	Offset uint64 | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	kprobeType probeType = iota | ||||
| 	uprobeType | ||||
| ) | ||||
| 
 | ||||
| func (pt probeType) String() string { | ||||
| 	if pt == kprobeType { | ||||
| 		return "kprobe" | ||||
| 	} | ||||
| 	return "uprobe" | ||||
| } | ||||
| 
 | ||||
| func (pt probeType) EventsPath() string { | ||||
| 	if pt == kprobeType { | ||||
| 		return kprobeEventsPath | ||||
| 	} | ||||
| 	return uprobeEventsPath | ||||
| } | ||||
| 
 | ||||
| func (pt probeType) PerfEventType(ret bool) perfEventType { | ||||
| 	if pt == kprobeType { | ||||
| 		if ret { | ||||
| 			return kretprobeEvent | ||||
| 		} | ||||
| 		return kprobeEvent | ||||
| 	} | ||||
| 	if ret { | ||||
| 		return uretprobeEvent | ||||
| 	} | ||||
| 	return uprobeEvent | ||||
| } | ||||
| 
 | ||||
| func (pt probeType) RetprobeBit() (uint64, error) { | ||||
| 	if pt == kprobeType { | ||||
| 		return kretprobeBit() | ||||
| 	} | ||||
| 	return uretprobeBit() | ||||
| } | ||||
| 
 | ||||
| // Kprobe attaches the given eBPF program to a perf event that fires when the | ||||
| // given kernel symbol starts executing. See /proc/kallsyms for available | ||||
| // symbols. For example, printk(): | ||||
| // | ||||
| //	kp, err := Kprobe("printk", prog, nil) | ||||
| // | ||||
| // Losing the reference to the resulting Link (kp) will close the Kprobe | ||||
| // and prevent further execution of prog. The Link must be Closed during | ||||
| // program shutdown to avoid leaking system resources. | ||||
| func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { | ||||
| 	k, err := kprobe(symbol, prog, opts, false) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	lnk, err := attachPerfEvent(k, prog) | ||||
| 	if err != nil { | ||||
| 		k.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return lnk, nil | ||||
| } | ||||
| 
 | ||||
| // Kretprobe attaches the given eBPF program to a perf event that fires right | ||||
| // before the given kernel symbol exits, with the function stack left intact. | ||||
| // See /proc/kallsyms for available symbols. For example, printk(): | ||||
| // | ||||
| //	kp, err := Kretprobe("printk", prog, nil) | ||||
| // | ||||
| // Losing the reference to the resulting Link (kp) will close the Kretprobe | ||||
| // and prevent further execution of prog. The Link must be Closed during | ||||
| // program shutdown to avoid leaking system resources. | ||||
| func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { | ||||
| 	k, err := kprobe(symbol, prog, opts, true) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	lnk, err := attachPerfEvent(k, prog) | ||||
| 	if err != nil { | ||||
| 		k.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return lnk, nil | ||||
| } | ||||
| 
 | ||||
| // isValidKprobeSymbol implements the equivalent of a regex match | ||||
| // against "^[a-zA-Z_][0-9a-zA-Z_.]*$". | ||||
| func isValidKprobeSymbol(s string) bool { | ||||
| 	if len(s) < 1 { | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	for i, c := range []byte(s) { | ||||
| 		switch { | ||||
| 		case c >= 'a' && c <= 'z': | ||||
| 		case c >= 'A' && c <= 'Z': | ||||
| 		case c == '_': | ||||
| 		case i > 0 && c >= '0' && c <= '9': | ||||
| 
 | ||||
| 		// Allow `.` in symbol name. GCC-compiled kernel may change symbol name | ||||
| 		// to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`. | ||||
| 		// See: https://gcc.gnu.org/gcc-10/changes.html | ||||
| 		case i > 0 && c == '.': | ||||
| 
 | ||||
| 		default: | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // kprobe opens a perf event on the given symbol and attaches prog to it. | ||||
| // If ret is true, create a kretprobe. | ||||
| func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) { | ||||
| 	if symbol == "" { | ||||
| 		return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) | ||||
| 	} | ||||
| 	if prog == nil { | ||||
| 		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) | ||||
| 	} | ||||
| 	if !isValidKprobeSymbol(symbol) { | ||||
| 		return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput) | ||||
| 	} | ||||
| 	if prog.Type() != ebpf.Kprobe { | ||||
| 		return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) | ||||
| 	} | ||||
| 
 | ||||
| 	args := probeArgs{ | ||||
| 		pid:    perfAllThreads, | ||||
| 		symbol: symbol, | ||||
| 		ret:    ret, | ||||
| 	} | ||||
| 
 | ||||
| 	if opts != nil { | ||||
| 		args.cookie = opts.Cookie | ||||
| 		args.offset = opts.Offset | ||||
| 	} | ||||
| 
 | ||||
| 	// Use kprobe PMU if the kernel has it available. | ||||
| 	tp, err := pmuKprobe(args) | ||||
| 	if errors.Is(err, os.ErrNotExist) { | ||||
| 		args.symbol = platformPrefix(symbol) | ||||
| 		tp, err = pmuKprobe(args) | ||||
| 	} | ||||
| 	if err == nil { | ||||
| 		return tp, nil | ||||
| 	} | ||||
| 	if err != nil && !errors.Is(err, ErrNotSupported) { | ||||
| 		return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Use tracefs if kprobe PMU is missing. | ||||
| 	args.symbol = symbol | ||||
| 	tp, err = tracefsKprobe(args) | ||||
| 	if errors.Is(err, os.ErrNotExist) { | ||||
| 		args.symbol = platformPrefix(symbol) | ||||
| 		tp, err = tracefsKprobe(args) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return tp, nil | ||||
| } | ||||
| 
 | ||||
| // pmuKprobe opens a perf event based on the kprobe PMU. | ||||
| // Returns os.ErrNotExist if the given symbol does not exist in the kernel. | ||||
| func pmuKprobe(args probeArgs) (*perfEvent, error) { | ||||
| 	return pmuProbe(kprobeType, args) | ||||
| } | ||||
| 
 | ||||
| // pmuProbe opens a perf event based on a Performance Monitoring Unit. | ||||
| // | ||||
| // Requires at least a 4.17 kernel. | ||||
| // e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU" | ||||
| // 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" | ||||
| // | ||||
| // Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU | ||||
| func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) { | ||||
| 	// Getting the PMU type will fail if the kernel doesn't support | ||||
| 	// the perf_[k,u]probe PMU. | ||||
| 	et, err := getPMUEventType(typ) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	var config uint64 | ||||
| 	if args.ret { | ||||
| 		bit, err := typ.RetprobeBit() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		config |= 1 << bit | ||||
| 	} | ||||
| 
 | ||||
| 	var ( | ||||
| 		attr unix.PerfEventAttr | ||||
| 		sp   unsafe.Pointer | ||||
| 	) | ||||
| 	switch typ { | ||||
| 	case kprobeType: | ||||
| 		// Create a pointer to a NUL-terminated string for the kernel. | ||||
| 		sp, err = unsafeStringPtr(args.symbol) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		attr = unix.PerfEventAttr{ | ||||
| 			// The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1, | ||||
| 			// since it added the config2 (Ext2) field. Use Ext2 as probe_offset. | ||||
| 			Size:   unix.PERF_ATTR_SIZE_VER1, | ||||
| 			Type:   uint32(et),          // PMU event type read from sysfs | ||||
| 			Ext1:   uint64(uintptr(sp)), // Kernel symbol to trace | ||||
| 			Ext2:   args.offset,         // Kernel symbol offset | ||||
| 			Config: config,              // Retprobe flag | ||||
| 		} | ||||
| 	case uprobeType: | ||||
| 		sp, err = unsafeStringPtr(args.path) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		if args.refCtrOffset != 0 { | ||||
| 			config |= args.refCtrOffset << uprobeRefCtrOffsetShift | ||||
| 		} | ||||
| 
 | ||||
| 		attr = unix.PerfEventAttr{ | ||||
| 			// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, | ||||
| 			// since it added the config2 (Ext2) field. The Size field controls the | ||||
| 			// size of the internal buffer the kernel allocates for reading the | ||||
| 			// perf_event_attr argument from userspace. | ||||
| 			Size:   unix.PERF_ATTR_SIZE_VER1, | ||||
| 			Type:   uint32(et),          // PMU event type read from sysfs | ||||
| 			Ext1:   uint64(uintptr(sp)), // Uprobe path | ||||
| 			Ext2:   args.offset,         // Uprobe offset | ||||
| 			Config: config,              // RefCtrOffset, Retprobe flag | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) | ||||
| 
 | ||||
| 	// On some old kernels, kprobe PMU doesn't allow `.` in symbol names and | ||||
| 	// return -EINVAL. Return ErrNotSupported to allow falling back to tracefs. | ||||
| 	// https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343 | ||||
| 	if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") { | ||||
| 		return nil, fmt.Errorf("symbol '%s+%#x': older kernels don't accept dots: %w", args.symbol, args.offset, ErrNotSupported) | ||||
| 	} | ||||
| 	// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL | ||||
| 	// when trying to create a kretprobe for a missing symbol. Make sure ENOENT | ||||
| 	// is returned to the caller. | ||||
| 	if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { | ||||
| 		return nil, fmt.Errorf("symbol '%s+%#x' not found: %w", args.symbol, args.offset, os.ErrNotExist) | ||||
| 	} | ||||
| 	// Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved | ||||
| 	// to an invalid insn boundary. | ||||
| 	if errors.Is(err, syscall.EILSEQ) { | ||||
| 		return nil, fmt.Errorf("symbol '%s+%#x' not found (bad insn boundary): %w", args.symbol, args.offset, os.ErrNotExist) | ||||
| 	} | ||||
| 	// Since at least commit cb9a19fe4aa51, ENOTSUPP is returned | ||||
| 	// when attempting to set a uprobe on a trap instruction. | ||||
| 	if errors.Is(err, unix.ENOTSUPP) { | ||||
| 		return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", args.offset, err) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("opening perf event: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Ensure the string pointer is not collected before PerfEventOpen returns. | ||||
| 	runtime.KeepAlive(sp) | ||||
| 
 | ||||
| 	fd, err := sys.NewFD(rawFd) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// Kernel has perf_[k,u]probe PMU available, initialize perf event. | ||||
| 	return &perfEvent{ | ||||
| 		typ:    typ.PerfEventType(args.ret), | ||||
| 		name:   args.symbol, | ||||
| 		pmuID:  et, | ||||
| 		cookie: args.cookie, | ||||
| 		fd:     fd, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // tracefsKprobe creates a Kprobe tracefs entry. | ||||
| func tracefsKprobe(args probeArgs) (*perfEvent, error) { | ||||
| 	return tracefsProbe(kprobeType, args) | ||||
| } | ||||
| 
 | ||||
| // tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events. | ||||
| // A new trace event group name is generated on every call to support creating | ||||
| // multiple trace events for the same kernel or userspace symbol. | ||||
| // Path and offset are only set in the case of uprobe(s) and are used to set | ||||
| // the executable/library path on the filesystem and the offset where the probe is inserted. | ||||
| // A perf event is then opened on the newly-created trace event and returned to the caller. | ||||
| func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) { | ||||
| 	// Generate a random string for each trace event we attempt to create. | ||||
| 	// This value is used as the 'group' token in tracefs to allow creating | ||||
| 	// multiple kprobe trace events with the same name. | ||||
| 	group, err := randomGroup("ebpf") | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("randomizing group name: %w", err) | ||||
| 	} | ||||
| 	args.group = group | ||||
| 
 | ||||
| 	// Before attempting to create a trace event through tracefs, | ||||
| 	// check if an event with the same group and name already exists. | ||||
| 	// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate | ||||
| 	// entry, so we need to rely on reads for detecting uniqueness. | ||||
| 	_, err = getTraceEventID(group, args.symbol) | ||||
| 	if err == nil { | ||||
| 		return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol) | ||||
| 	} | ||||
| 	if err != nil && !errors.Is(err, os.ErrNotExist) { | ||||
| 		return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Create the [k,u]probe trace event using tracefs. | ||||
| 	if err := createTraceFSProbeEvent(typ, args); err != nil { | ||||
| 		return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		if err != nil { | ||||
| 			// Make sure we clean up the created tracefs event when we return error. | ||||
| 			// If a livepatch handler is already active on the symbol, the write to | ||||
| 			// tracefs will succeed, a trace event will show up, but creating the | ||||
| 			// perf event will fail with EBUSY. | ||||
| 			_ = closeTraceFSProbeEvent(typ, args.group, args.symbol) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	// Get the newly-created trace event's id. | ||||
| 	tid, err := getTraceEventID(group, args.symbol) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("getting trace event id: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Kprobes are ephemeral tracepoints and share the same perf event type. | ||||
| 	fd, err := openTracepointPerfEvent(tid, args.pid) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &perfEvent{ | ||||
| 		typ:       typ.PerfEventType(args.ret), | ||||
| 		group:     group, | ||||
| 		name:      args.symbol, | ||||
| 		tracefsID: tid, | ||||
| 		cookie:    args.cookie, | ||||
| 		fd:        fd, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // createTraceFSProbeEvent creates a new ephemeral trace event by writing to | ||||
| // <tracefs>/[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid | ||||
| // kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist | ||||
| // if a probe with the same group and symbol already exists. | ||||
| func createTraceFSProbeEvent(typ probeType, args probeArgs) error { | ||||
| 	// Open the kprobe_events file in tracefs. | ||||
| 	f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err) | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 
 | ||||
| 	var pe, token string | ||||
| 	switch typ { | ||||
| 	case kprobeType: | ||||
| 		// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): | ||||
| 		// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe | ||||
| 		// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe | ||||
| 		// -:[GRP/]EVENT                                        : Clear a probe | ||||
| 		// | ||||
| 		// Some examples: | ||||
| 		// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy | ||||
| 		// p:ebpf_5678/p_my_kprobe __x64_sys_execve | ||||
| 		// | ||||
| 		// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the | ||||
| 		// kernel default to NR_CPUS. This is desired in most eBPF cases since | ||||
| 		// subsampling or rate limiting logic can be more accurately implemented in | ||||
| 		// the eBPF program itself. | ||||
| 		// See Documentation/kprobes.txt for more details. | ||||
| 		token = kprobeToken(args) | ||||
| 		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, sanitizeSymbol(args.symbol), token) | ||||
| 	case uprobeType: | ||||
| 		// The uprobe_events syntax is as follows: | ||||
| 		// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe | ||||
| 		// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe | ||||
| 		// -:[GRP/]EVENT                           : Clear a probe | ||||
| 		// | ||||
| 		// Some examples: | ||||
| 		// r:ebpf_1234/readline /bin/bash:0x12345 | ||||
| 		// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) | ||||
| 		// | ||||
| 		// See Documentation/trace/uprobetracer.txt for more details. | ||||
| 		token = uprobeToken(args) | ||||
| 		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, token) | ||||
| 	} | ||||
| 	_, err = f.WriteString(pe) | ||||
| 	// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL | ||||
| 	// when trying to create a kretprobe for a missing symbol. Make sure ENOENT | ||||
| 	// is returned to the caller. | ||||
| 	// EINVAL is also returned on pre-5.2 kernels when the `SYM[+offs]` token | ||||
| 	// is resolved to an invalid insn boundary. | ||||
| 	if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { | ||||
| 		return fmt.Errorf("token %s: %w", token, os.ErrNotExist) | ||||
| 	} | ||||
| 	// Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved | ||||
| 	// to an invalid insn boundary. | ||||
| 	if errors.Is(err, syscall.EILSEQ) { | ||||
| 		return fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) | ||||
| 	} | ||||
| 	// ERANGE is returned when the `SYM[+offs]` token is too big and cannot | ||||
| 	// be resolved. | ||||
| 	if errors.Is(err, syscall.ERANGE) { | ||||
| 		return fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol | ||||
| // from <tracefs>/[k,u]probe_events. | ||||
| func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { | ||||
| 	f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err) | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 
 | ||||
| 	// See [k,u]probe_events syntax above. The probe type does not need to be specified | ||||
| 	// for removals. | ||||
| 	pe := fmt.Sprintf("-:%s/%s", group, sanitizeSymbol(symbol)) | ||||
| 	if _, err = f.WriteString(pe); err != nil { | ||||
| 		return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // randomGroup generates a pseudorandom string for use as a tracefs group name. | ||||
| // Returns an error when the output string would exceed 63 characters (kernel | ||||
| // limitation), when rand.Read() fails or when prefix contains characters not | ||||
| // allowed by isValidTraceID. | ||||
| func randomGroup(prefix string) (string, error) { | ||||
| 	if !isValidTraceID(prefix) { | ||||
| 		return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput) | ||||
| 	} | ||||
| 
 | ||||
| 	b := make([]byte, 8) | ||||
| 	if _, err := rand.Read(b); err != nil { | ||||
| 		return "", fmt.Errorf("reading random bytes: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	group := fmt.Sprintf("%s_%x", prefix, b) | ||||
| 	if len(group) > 63 { | ||||
| 		return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput) | ||||
| 	} | ||||
| 
 | ||||
| 	return group, nil | ||||
| } | ||||
| 
 | ||||
| func probePrefix(ret bool) string { | ||||
| 	if ret { | ||||
| 		return "r" | ||||
| 	} | ||||
| 	return "p" | ||||
| } | ||||
| 
 | ||||
| // determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit | ||||
| // from /sys/bus/event_source/devices/<pmu>/format/retprobe. | ||||
| func determineRetprobeBit(typ probeType) (uint64, error) { | ||||
| 	p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe") | ||||
| 
 | ||||
| 	data, err := os.ReadFile(p) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	var rp uint64 | ||||
| 	n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("parse retprobe bit: %w", err) | ||||
| 	} | ||||
| 	if n != 1 { | ||||
| 		return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n) | ||||
| 	} | ||||
| 
 | ||||
| 	return rp, nil | ||||
| } | ||||
| 
 | ||||
| func kretprobeBit() (uint64, error) { | ||||
| 	kprobeRetprobeBit.once.Do(func() { | ||||
| 		kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType) | ||||
| 	}) | ||||
| 	return kprobeRetprobeBit.value, kprobeRetprobeBit.err | ||||
| } | ||||
| 
 | ||||
| // kprobeToken creates the SYM[+offs] token for the tracefs api. | ||||
| func kprobeToken(args probeArgs) string { | ||||
| 	po := args.symbol | ||||
| 
 | ||||
| 	if args.offset != 0 { | ||||
| 		po += fmt.Sprintf("+%#x", args.offset) | ||||
| 	} | ||||
| 
 | ||||
| 	return po | ||||
| } | ||||
							
								
								
									
										251
									
								
								vendor/github.com/cilium/ebpf/link/link.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										251
									
								
								vendor/github.com/cilium/ebpf/link/link.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,11 +1,14 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/binary" | ||||
| 	"fmt" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/btf" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| ) | ||||
| 
 | ||||
| var ErrNotSupported = internal.ErrNotSupported | ||||
|  | @ -22,19 +25,65 @@ type Link interface { | |||
| 	// May return an error wrapping ErrNotSupported. | ||||
| 	Pin(string) error | ||||
| 
 | ||||
| 	// Undo a previous call to Pin. | ||||
| 	// | ||||
| 	// May return an error wrapping ErrNotSupported. | ||||
| 	Unpin() error | ||||
| 
 | ||||
| 	// Close frees resources. | ||||
| 	// | ||||
| 	// The link will be broken unless it has been pinned. A link | ||||
| 	// may continue past the lifetime of the process if Close is | ||||
| 	// The link will be broken unless it has been successfully pinned. | ||||
| 	// A link may continue past the lifetime of the process if Close is | ||||
| 	// not called. | ||||
| 	Close() error | ||||
| 
 | ||||
| 	// Info returns metadata on a link. | ||||
| 	// | ||||
| 	// May return an error wrapping ErrNotSupported. | ||||
| 	Info() (*Info, error) | ||||
| 
 | ||||
| 	// Prevent external users from implementing this interface. | ||||
| 	isLink() | ||||
| } | ||||
| 
 | ||||
| // LoadPinnedLink loads a link that was persisted into a bpffs. | ||||
| func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { | ||||
| 	raw, err := loadPinnedRawLink(fileName, opts) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return wrapRawLink(raw) | ||||
| } | ||||
| 
 | ||||
| // wrap a RawLink in a more specific type if possible. | ||||
| // | ||||
| // The function takes ownership of raw and closes it on error. | ||||
| func wrapRawLink(raw *RawLink) (Link, error) { | ||||
| 	info, err := raw.Info() | ||||
| 	if err != nil { | ||||
| 		raw.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	switch info.Type { | ||||
| 	case RawTracepointType: | ||||
| 		return &rawTracepoint{*raw}, nil | ||||
| 	case TracingType: | ||||
| 		return &tracing{*raw}, nil | ||||
| 	case CgroupType: | ||||
| 		return &linkCgroup{*raw}, nil | ||||
| 	case IterType: | ||||
| 		return &Iter{*raw}, nil | ||||
| 	case NetNsType: | ||||
| 		return &NetNsLink{*raw}, nil | ||||
| 	default: | ||||
| 		return raw, nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // ID uniquely identifies a BPF link. | ||||
| type ID uint32 | ||||
| type ID = sys.LinkID | ||||
| 
 | ||||
| // RawLinkOptions control the creation of a raw link. | ||||
| type RawLinkOptions struct { | ||||
|  | @ -44,13 +93,55 @@ type RawLinkOptions struct { | |||
| 	Program *ebpf.Program | ||||
| 	// Attach must match the attach type of Program. | ||||
| 	Attach ebpf.AttachType | ||||
| 	// BTF is the BTF of the attachment target. | ||||
| 	BTF btf.TypeID | ||||
| 	// Flags control the attach behaviour. | ||||
| 	Flags uint32 | ||||
| } | ||||
| 
 | ||||
| // RawLinkInfo contains metadata on a link. | ||||
| type RawLinkInfo struct { | ||||
| // Info contains metadata on a link. | ||||
| type Info struct { | ||||
| 	Type    Type | ||||
| 	ID      ID | ||||
| 	Program ebpf.ProgramID | ||||
| 	extra   interface{} | ||||
| } | ||||
| 
 | ||||
| type TracingInfo sys.TracingLinkInfo | ||||
| type CgroupInfo sys.CgroupLinkInfo | ||||
| type NetNsInfo sys.NetNsLinkInfo | ||||
| type XDPInfo sys.XDPLinkInfo | ||||
| 
 | ||||
| // Tracing returns tracing type-specific link info. | ||||
| // | ||||
| // Returns nil if the type-specific link info isn't available. | ||||
| func (r Info) Tracing() *TracingInfo { | ||||
| 	e, _ := r.extra.(*TracingInfo) | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // Cgroup returns cgroup type-specific link info. | ||||
| // | ||||
| // Returns nil if the type-specific link info isn't available. | ||||
| func (r Info) Cgroup() *CgroupInfo { | ||||
| 	e, _ := r.extra.(*CgroupInfo) | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // NetNs returns netns type-specific link info. | ||||
| // | ||||
| // Returns nil if the type-specific link info isn't available. | ||||
| func (r Info) NetNs() *NetNsInfo { | ||||
| 	e, _ := r.extra.(*NetNsInfo) | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // ExtraNetNs returns XDP type-specific link info. | ||||
| // | ||||
| // Returns nil if the type-specific link info isn't available. | ||||
| func (r Info) XDP() *XDPInfo { | ||||
| 	e, _ := r.extra.(*XDPInfo) | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // RawLink is the low-level API to bpf_link. | ||||
|  | @ -58,7 +149,8 @@ type RawLinkInfo struct { | |||
| // You should consider using the higher level interfaces in this | ||||
| // package instead. | ||||
| type RawLink struct { | ||||
| 	fd *internal.FD | ||||
| 	fd         *sys.FD | ||||
| 	pinnedPath string | ||||
| } | ||||
| 
 | ||||
| // AttachRawLink creates a raw link. | ||||
|  | @ -68,66 +160,46 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { | |||
| 	} | ||||
| 
 | ||||
| 	if opts.Target < 0 { | ||||
| 		return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd) | ||||
| 		return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) | ||||
| 	} | ||||
| 
 | ||||
| 	progFd := opts.Program.FD() | ||||
| 	if progFd < 0 { | ||||
| 		return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) | ||||
| 		return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfLinkCreateAttr{ | ||||
| 		targetFd:   uint32(opts.Target), | ||||
| 		progFd:     uint32(progFd), | ||||
| 		attachType: opts.Attach, | ||||
| 	attr := sys.LinkCreateAttr{ | ||||
| 		TargetFd:    uint32(opts.Target), | ||||
| 		ProgFd:      uint32(progFd), | ||||
| 		AttachType:  sys.AttachType(opts.Attach), | ||||
| 		TargetBtfId: uint32(opts.BTF), | ||||
| 		Flags:       opts.Flags, | ||||
| 	} | ||||
| 	fd, err := bpfLinkCreate(&attr) | ||||
| 	fd, err := sys.LinkCreate(&attr) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't create link: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &RawLink{fd}, nil | ||||
| 	return &RawLink{fd, ""}, nil | ||||
| } | ||||
| 
 | ||||
| // LoadPinnedRawLink loads a persisted link from a bpffs. | ||||
| func LoadPinnedRawLink(fileName string) (*RawLink, error) { | ||||
| 	return loadPinnedRawLink(fileName, UnspecifiedType) | ||||
| } | ||||
| 
 | ||||
| func loadPinnedRawLink(fileName string, typ Type) (*RawLink, error) { | ||||
| 	fd, err := internal.BPFObjGet(fileName) | ||||
| func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { | ||||
| 	fd, err := sys.ObjGet(&sys.ObjGetAttr{ | ||||
| 		Pathname:  sys.NewStringPointer(fileName), | ||||
| 		FileFlags: opts.Marshal(), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("load pinned link: %s", err) | ||||
| 		return nil, fmt.Errorf("load pinned link: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	link := &RawLink{fd} | ||||
| 	if typ == UnspecifiedType { | ||||
| 		return link, nil | ||||
| 	} | ||||
| 
 | ||||
| 	info, err := link.Info() | ||||
| 	if err != nil { | ||||
| 		link.Close() | ||||
| 		return nil, fmt.Errorf("get pinned link info: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if info.Type != typ { | ||||
| 		link.Close() | ||||
| 		return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, typ) | ||||
| 	} | ||||
| 
 | ||||
| 	return link, nil | ||||
| 	return &RawLink{fd, fileName}, nil | ||||
| } | ||||
| 
 | ||||
| func (l *RawLink) isLink() {} | ||||
| 
 | ||||
| // FD returns the raw file descriptor. | ||||
| func (l *RawLink) FD() int { | ||||
| 	fd, err := l.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return -1 | ||||
| 	} | ||||
| 	return int(fd) | ||||
| 	return l.fd.Int() | ||||
| } | ||||
| 
 | ||||
| // Close breaks the link. | ||||
|  | @ -142,13 +214,23 @@ func (l *RawLink) Close() error { | |||
| // Calling Close on a pinned Link will not break the link | ||||
| // until the pin is removed. | ||||
| func (l *RawLink) Pin(fileName string) error { | ||||
| 	if err := internal.BPFObjPin(fileName, l.fd); err != nil { | ||||
| 		return fmt.Errorf("can't pin link: %s", err) | ||||
| 	if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	l.pinnedPath = fileName | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Update implements Link. | ||||
| // Unpin implements the Link interface. | ||||
| func (l *RawLink) Unpin() error { | ||||
| 	if err := internal.Unpin(l.pinnedPath); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	l.pinnedPath = "" | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Update implements the Link interface. | ||||
| func (l *RawLink) Update(new *ebpf.Program) error { | ||||
| 	return l.UpdateArgs(RawLinkUpdateOptions{ | ||||
| 		New: new, | ||||
|  | @ -166,49 +248,66 @@ type RawLinkUpdateOptions struct { | |||
| func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { | ||||
| 	newFd := opts.New.FD() | ||||
| 	if newFd < 0 { | ||||
| 		return fmt.Errorf("invalid program: %s", internal.ErrClosedFd) | ||||
| 		return fmt.Errorf("invalid program: %s", sys.ErrClosedFd) | ||||
| 	} | ||||
| 
 | ||||
| 	var oldFd int | ||||
| 	if opts.Old != nil { | ||||
| 		oldFd = opts.Old.FD() | ||||
| 		if oldFd < 0 { | ||||
| 			return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd) | ||||
| 			return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	linkFd, err := l.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("can't update link: %s", err) | ||||
| 	attr := sys.LinkUpdateAttr{ | ||||
| 		LinkFd:    l.fd.Uint(), | ||||
| 		NewProgFd: uint32(newFd), | ||||
| 		OldProgFd: uint32(oldFd), | ||||
| 		Flags:     opts.Flags, | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfLinkUpdateAttr{ | ||||
| 		linkFd:    linkFd, | ||||
| 		newProgFd: uint32(newFd), | ||||
| 		oldProgFd: uint32(oldFd), | ||||
| 		flags:     opts.Flags, | ||||
| 	} | ||||
| 	return bpfLinkUpdate(&attr) | ||||
| } | ||||
| 
 | ||||
| // struct bpf_link_info | ||||
| type bpfLinkInfo struct { | ||||
| 	typ     uint32 | ||||
| 	id      uint32 | ||||
| 	prog_id uint32 | ||||
| 	return sys.LinkUpdate(&attr) | ||||
| } | ||||
| 
 | ||||
| // Info returns metadata about the link. | ||||
| func (l *RawLink) Info() (*RawLinkInfo, error) { | ||||
| 	var info bpfLinkInfo | ||||
| 	err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) | ||||
| 	if err != nil { | ||||
| func (l *RawLink) Info() (*Info, error) { | ||||
| 	var info sys.LinkInfo | ||||
| 
 | ||||
| 	if err := sys.ObjInfo(l.fd, &info); err != nil { | ||||
| 		return nil, fmt.Errorf("link info: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &RawLinkInfo{ | ||||
| 		Type(info.typ), | ||||
| 		ID(info.id), | ||||
| 		ebpf.ProgramID(info.prog_id), | ||||
| 	var extra interface{} | ||||
| 	switch info.Type { | ||||
| 	case CgroupType: | ||||
| 		extra = &CgroupInfo{} | ||||
| 	case IterType: | ||||
| 		// not supported | ||||
| 	case NetNsType: | ||||
| 		extra = &NetNsInfo{} | ||||
| 	case RawTracepointType: | ||||
| 		// not supported | ||||
| 	case TracingType: | ||||
| 		extra = &TracingInfo{} | ||||
| 	case XDPType: | ||||
| 		extra = &XDPInfo{} | ||||
| 	case PerfEventType: | ||||
| 		// no extra | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("unknown link info type: %d", info.Type) | ||||
| 	} | ||||
| 
 | ||||
| 	if info.Type != RawTracepointType && info.Type != IterType && info.Type != PerfEventType { | ||||
| 		buf := bytes.NewReader(info.Extra[:]) | ||||
| 		err := binary.Read(buf, internal.NativeEndian, extra) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("can not read extra link info: %w", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return &Info{ | ||||
| 		info.Type, | ||||
| 		info.Id, | ||||
| 		ebpf.ProgramID(info.ProgId), | ||||
| 		extra, | ||||
| 	}, nil | ||||
| } | ||||
|  |  | |||
							
								
								
									
										28
									
								
								vendor/github.com/cilium/ebpf/link/netns.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/cilium/ebpf/link/netns.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -6,14 +6,9 @@ import ( | |||
| 	"github.com/cilium/ebpf" | ||||
| ) | ||||
| 
 | ||||
| // NetNsInfo contains metadata about a network namespace link. | ||||
| type NetNsInfo struct { | ||||
| 	RawLinkInfo | ||||
| } | ||||
| 
 | ||||
| // NetNsLink is a program attached to a network namespace. | ||||
| type NetNsLink struct { | ||||
| 	*RawLink | ||||
| 	RawLink | ||||
| } | ||||
| 
 | ||||
| // AttachNetNs attaches a program to a network namespace. | ||||
|  | @ -37,24 +32,5 @@ func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &NetNsLink{link}, nil | ||||
| } | ||||
| 
 | ||||
| // LoadPinnedNetNs loads a network namespace link from bpffs. | ||||
| func LoadPinnedNetNs(fileName string) (*NetNsLink, error) { | ||||
| 	link, err := loadPinnedRawLink(fileName, NetNsType) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &NetNsLink{link}, nil | ||||
| } | ||||
| 
 | ||||
| // Info returns information about the link. | ||||
| func (nns *NetNsLink) Info() (*NetNsInfo, error) { | ||||
| 	info, err := nns.RawLink.Info() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &NetNsInfo{*info}, nil | ||||
| 	return &NetNsLink{*link}, nil | ||||
| } | ||||
|  |  | |||
							
								
								
									
										394
									
								
								vendor/github.com/cilium/ebpf/link/perf_event.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										394
									
								
								vendor/github.com/cilium/ebpf/link/perf_event.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,394 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // Getting the terminology right is usually the hardest part. For posterity and | ||||
| // for staying sane during implementation: | ||||
| // | ||||
| // - trace event: Representation of a kernel runtime hook. Filesystem entries | ||||
| //   under <tracefs>/events. Can be tracepoints (static), kprobes or uprobes. | ||||
| //   Can be instantiated into perf events (see below). | ||||
| // - tracepoint: A predetermined hook point in the kernel. Exposed as trace | ||||
| //   events in (sub)directories under <tracefs>/events. Cannot be closed or | ||||
| //   removed, they are static. | ||||
| // - k(ret)probe: Ephemeral trace events based on entry or exit points of | ||||
| //   exported kernel symbols. kprobe-based (tracefs) trace events can be | ||||
| //   created system-wide by writing to the <tracefs>/kprobe_events file, or | ||||
| //   they can be scoped to the current process by creating PMU perf events. | ||||
| // - u(ret)probe: Ephemeral trace events based on user provides ELF binaries | ||||
| //   and offsets. uprobe-based (tracefs) trace events can be | ||||
| //   created system-wide by writing to the <tracefs>/uprobe_events file, or | ||||
| //   they can be scoped to the current process by creating PMU perf events. | ||||
| // - perf event: An object instantiated based on an existing trace event or | ||||
| //   kernel symbol. Referred to by fd in userspace. | ||||
| //   Exactly one eBPF program can be attached to a perf event. Multiple perf | ||||
| //   events can be created from a single trace event. Closing a perf event | ||||
| //   stops any further invocations of the attached eBPF program. | ||||
| 
 | ||||
| var ( | ||||
| 	tracefsPath = "/sys/kernel/debug/tracing" | ||||
| 
 | ||||
| 	errInvalidInput = errors.New("invalid input") | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	perfAllThreads = -1 | ||||
| ) | ||||
| 
 | ||||
| type perfEventType uint8 | ||||
| 
 | ||||
| const ( | ||||
| 	tracepointEvent perfEventType = iota | ||||
| 	kprobeEvent | ||||
| 	kretprobeEvent | ||||
| 	uprobeEvent | ||||
| 	uretprobeEvent | ||||
| ) | ||||
| 
 | ||||
| // A perfEvent represents a perf event kernel object. Exactly one eBPF program | ||||
| // can be attached to it. It is created based on a tracefs trace event or a | ||||
| // Performance Monitoring Unit (PMU). | ||||
| type perfEvent struct { | ||||
| 	// The event type determines the types of programs that can be attached. | ||||
| 	typ perfEventType | ||||
| 
 | ||||
| 	// Group and name of the tracepoint/kprobe/uprobe. | ||||
| 	group string | ||||
| 	name  string | ||||
| 
 | ||||
| 	// PMU event ID read from sysfs. Valid IDs are non-zero. | ||||
| 	pmuID uint64 | ||||
| 	// ID of the trace event read from tracefs. Valid IDs are non-zero. | ||||
| 	tracefsID uint64 | ||||
| 
 | ||||
| 	// User provided arbitrary value. | ||||
| 	cookie uint64 | ||||
| 
 | ||||
| 	// This is the perf event FD. | ||||
| 	fd *sys.FD | ||||
| } | ||||
| 
 | ||||
| func (pe *perfEvent) Close() error { | ||||
| 	if err := pe.fd.Close(); err != nil { | ||||
| 		return fmt.Errorf("closing perf event fd: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	switch pe.typ { | ||||
| 	case kprobeEvent, kretprobeEvent: | ||||
| 		// Clean up kprobe tracefs entry. | ||||
| 		if pe.tracefsID != 0 { | ||||
| 			return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name) | ||||
| 		} | ||||
| 	case uprobeEvent, uretprobeEvent: | ||||
| 		// Clean up uprobe tracefs entry. | ||||
| 		if pe.tracefsID != 0 { | ||||
| 			return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name) | ||||
| 		} | ||||
| 	case tracepointEvent: | ||||
| 		// Tracepoint trace events don't hold any extra resources. | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // perfEventLink represents a bpf perf link. | ||||
| type perfEventLink struct { | ||||
| 	RawLink | ||||
| 	pe *perfEvent | ||||
| } | ||||
| 
 | ||||
| func (pl *perfEventLink) isLink() {} | ||||
| 
 | ||||
| // Pinning requires the underlying perf event FD to stay open. | ||||
| // | ||||
| // | PerfEvent FD | BpfLink FD | Works | | ||||
| // |--------------|------------|-------| | ||||
| // | Open         | Open       | Yes   | | ||||
| // | Closed       | Open       | No    | | ||||
| // | Open         | Closed     | No (Pin() -> EINVAL) | | ||||
| // | Closed       | Closed     | No (Pin() -> EINVAL) | | ||||
| // | ||||
| // There is currently no pretty way to recover the perf event FD | ||||
| // when loading a pinned link, so leave as not supported for now. | ||||
| func (pl *perfEventLink) Pin(string) error { | ||||
| 	return fmt.Errorf("perf event link pin: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (pl *perfEventLink) Unpin() error { | ||||
| 	return fmt.Errorf("perf event link unpin: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (pl *perfEventLink) Close() error { | ||||
| 	if err := pl.pe.Close(); err != nil { | ||||
| 		return fmt.Errorf("perf event link close: %w", err) | ||||
| 	} | ||||
| 	return pl.fd.Close() | ||||
| } | ||||
| 
 | ||||
| func (pl *perfEventLink) Update(prog *ebpf.Program) error { | ||||
| 	return fmt.Errorf("perf event link update: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| // perfEventIoctl implements Link and handles the perf event lifecycle | ||||
| // via ioctl(). | ||||
| type perfEventIoctl struct { | ||||
| 	*perfEvent | ||||
| } | ||||
| 
 | ||||
| func (pi *perfEventIoctl) isLink() {} | ||||
| 
 | ||||
| // Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), | ||||
| // calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array | ||||
| // owned by the perf event, which means multiple programs can be attached | ||||
| // simultaneously. | ||||
| // | ||||
| // Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event | ||||
| // returns EEXIST. | ||||
| // | ||||
| // Detaching a program from a perf event is currently not possible, so a | ||||
| // program replacement mechanism cannot be implemented for perf events. | ||||
| func (pi *perfEventIoctl) Update(prog *ebpf.Program) error { | ||||
| 	return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (pi *perfEventIoctl) Pin(string) error { | ||||
| 	return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (pi *perfEventIoctl) Unpin() error { | ||||
| 	return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (pi *perfEventIoctl) Info() (*Info, error) { | ||||
| 	return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| // attach the given eBPF prog to the perf event stored in pe. | ||||
| // pe must contain a valid perf event fd. | ||||
| // prog's type must match the program type stored in pe. | ||||
| func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) { | ||||
| 	if prog == nil { | ||||
| 		return nil, errors.New("cannot attach a nil program") | ||||
| 	} | ||||
| 	if prog.FD() < 0 { | ||||
| 		return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) | ||||
| 	} | ||||
| 
 | ||||
| 	switch pe.typ { | ||||
| 	case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent: | ||||
| 		if t := prog.Type(); t != ebpf.Kprobe { | ||||
| 			return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) | ||||
| 		} | ||||
| 	case tracepointEvent: | ||||
| 		if t := prog.Type(); t != ebpf.TracePoint { | ||||
| 			return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) | ||||
| 		} | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("unknown perf event type: %d", pe.typ) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := haveBPFLinkPerfEvent(); err == nil { | ||||
| 		return attachPerfEventLink(pe, prog) | ||||
| 	} | ||||
| 	return attachPerfEventIoctl(pe, prog) | ||||
| } | ||||
| 
 | ||||
| func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) { | ||||
| 	if pe.cookie != 0 { | ||||
| 		return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported) | ||||
| 	} | ||||
| 
 | ||||
| 	// Assign the eBPF program to the perf event. | ||||
| 	err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("setting perf event bpf program: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. | ||||
| 	if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { | ||||
| 		return nil, fmt.Errorf("enable perf event: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	pi := &perfEventIoctl{pe} | ||||
| 
 | ||||
| 	// Close the perf event when its reference is lost to avoid leaking system resources. | ||||
| 	runtime.SetFinalizer(pi, (*perfEventIoctl).Close) | ||||
| 	return pi, nil | ||||
| } | ||||
| 
 | ||||
| // Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+). | ||||
| // | ||||
| // https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e | ||||
| func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) { | ||||
| 	fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ | ||||
| 		ProgFd:     uint32(prog.FD()), | ||||
| 		TargetFd:   pe.fd.Uint(), | ||||
| 		AttachType: sys.BPF_PERF_EVENT, | ||||
| 		BpfCookie:  pe.cookie, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("cannot create bpf perf link: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	pl := &perfEventLink{RawLink{fd: fd}, pe} | ||||
| 
 | ||||
| 	// Close the perf event when its reference is lost to avoid leaking system resources. | ||||
| 	runtime.SetFinalizer(pl, (*perfEventLink).Close) | ||||
| 	return pl, nil | ||||
| } | ||||
| 
 | ||||
| // unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. | ||||
| func unsafeStringPtr(str string) (unsafe.Pointer, error) { | ||||
| 	p, err := unix.BytePtrFromString(str) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return unsafe.Pointer(p), nil | ||||
| } | ||||
| 
 | ||||
| // getTraceEventID reads a trace event's ID from tracefs given its group and name. | ||||
| // The kernel requires group and name to be alphanumeric or underscore. | ||||
| // | ||||
| // name automatically has its invalid symbols converted to underscores so the caller | ||||
| // can pass a raw symbol name, e.g. a kernel symbol containing dots. | ||||
| func getTraceEventID(group, name string) (uint64, error) { | ||||
| 	name = sanitizeSymbol(name) | ||||
| 	tid, err := uint64FromFile(tracefsPath, "events", group, name, "id") | ||||
| 	if errors.Is(err, os.ErrNotExist) { | ||||
| 		return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return tid, nil | ||||
| } | ||||
| 
 | ||||
| // getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier) | ||||
| // from /sys/bus/event_source/devices/<pmu>/type. | ||||
| // | ||||
| // Returns ErrNotSupported if the pmu type is not supported. | ||||
| func getPMUEventType(typ probeType) (uint64, error) { | ||||
| 	et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type") | ||||
| 	if errors.Is(err, os.ErrNotExist) { | ||||
| 		return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("reading pmu type %s: %w", typ, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return et, nil | ||||
| } | ||||
| 
 | ||||
| // openTracepointPerfEvent opens a tracepoint-type perf event. System-wide | ||||
| // [k,u]probes created by writing to <tracefs>/[k,u]probe_events are tracepoints | ||||
| // behind the scenes, and can be attached to using these perf events. | ||||
| func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { | ||||
| 	attr := unix.PerfEventAttr{ | ||||
| 		Type:        unix.PERF_TYPE_TRACEPOINT, | ||||
| 		Config:      tid, | ||||
| 		Sample_type: unix.PERF_SAMPLE_RAW, | ||||
| 		Sample:      1, | ||||
| 		Wakeup:      1, | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("opening tracepoint perf event: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return sys.NewFD(fd) | ||||
| } | ||||
| 
 | ||||
| // uint64FromFile reads a uint64 from a file. All elements of path are sanitized | ||||
| // and joined onto base. Returns error if base no longer prefixes the path after | ||||
| // joining all components. | ||||
| func uint64FromFile(base string, path ...string) (uint64, error) { | ||||
| 	l := filepath.Join(path...) | ||||
| 	p := filepath.Join(base, l) | ||||
| 	if !strings.HasPrefix(p, base) { | ||||
| 		return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput) | ||||
| 	} | ||||
| 
 | ||||
| 	data, err := os.ReadFile(p) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("reading file %s: %w", p, err) | ||||
| 	} | ||||
| 
 | ||||
| 	et := bytes.TrimSpace(data) | ||||
| 	return strconv.ParseUint(string(et), 10, 64) | ||||
| } | ||||
| 
 | ||||
| // Probe BPF perf link. | ||||
| // | ||||
| // https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 | ||||
| // https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e | ||||
| var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", func() error { | ||||
| 	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ | ||||
| 		Name: "probe_bpf_perf_link", | ||||
| 		Type: ebpf.Kprobe, | ||||
| 		Instructions: asm.Instructions{ | ||||
| 			asm.Mov.Imm(asm.R0, 0), | ||||
| 			asm.Return(), | ||||
| 		}, | ||||
| 		License: "MIT", | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer prog.Close() | ||||
| 
 | ||||
| 	_, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ | ||||
| 		ProgFd:     uint32(prog.FD()), | ||||
| 		AttachType: sys.BPF_PERF_EVENT, | ||||
| 	}) | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	if errors.Is(err, unix.EBADF) { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return err | ||||
| }) | ||||
| 
 | ||||
| // isValidTraceID implements the equivalent of a regex match | ||||
| // against "^[a-zA-Z_][0-9a-zA-Z_]*$". | ||||
| // | ||||
| // Trace event groups, names and kernel symbols must adhere to this set | ||||
| // of characters. Non-empty, first character must not be a number, all | ||||
| // characters must be alphanumeric or underscore. | ||||
| func isValidTraceID(s string) bool { | ||||
| 	if len(s) < 1 { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i, c := range []byte(s) { | ||||
| 		switch { | ||||
| 		case c >= 'a' && c <= 'z': | ||||
| 		case c >= 'A' && c <= 'Z': | ||||
| 		case c == '_': | ||||
| 		case i > 0 && c >= '0' && c <= '9': | ||||
| 
 | ||||
| 		default: | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return true | ||||
| } | ||||
							
								
								
									
										25
									
								
								vendor/github.com/cilium/ebpf/link/platform.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/cilium/ebpf/link/platform.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,25 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"runtime" | ||||
| ) | ||||
| 
 | ||||
| func platformPrefix(symbol string) string { | ||||
| 
 | ||||
| 	prefix := runtime.GOARCH | ||||
| 
 | ||||
| 	// per https://github.com/golang/go/blob/master/src/go/build/syslist.go | ||||
| 	switch prefix { | ||||
| 	case "386": | ||||
| 		prefix = "ia32" | ||||
| 	case "amd64", "amd64p32": | ||||
| 		prefix = "x64" | ||||
| 	case "arm64", "arm64be": | ||||
| 		prefix = "arm64" | ||||
| 	default: | ||||
| 		return symbol | ||||
| 	} | ||||
| 
 | ||||
| 	return fmt.Sprintf("__%s_%s", prefix, symbol) | ||||
| } | ||||
							
								
								
									
										14
									
								
								vendor/github.com/cilium/ebpf/link/program.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/cilium/ebpf/link/program.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -4,7 +4,7 @@ import ( | |||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| ) | ||||
| 
 | ||||
| type RawAttachProgramOptions struct { | ||||
|  | @ -34,7 +34,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { | |||
| 		replaceFd = uint32(opts.Replace.FD()) | ||||
| 	} | ||||
| 
 | ||||
| 	attr := internal.BPFProgAttachAttr{ | ||||
| 	attr := sys.ProgAttachAttr{ | ||||
| 		TargetFd:     uint32(opts.Target), | ||||
| 		AttachBpfFd:  uint32(opts.Program.FD()), | ||||
| 		ReplaceBpfFd: replaceFd, | ||||
|  | @ -42,8 +42,8 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { | |||
| 		AttachFlags:  uint32(opts.Flags), | ||||
| 	} | ||||
| 
 | ||||
| 	if err := internal.BPFProgAttach(&attr); err != nil { | ||||
| 		return fmt.Errorf("can't attach program: %s", err) | ||||
| 	if err := sys.ProgAttach(&attr); err != nil { | ||||
| 		return fmt.Errorf("can't attach program: %w", err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | @ -63,13 +63,13 @@ func RawDetachProgram(opts RawDetachProgramOptions) error { | |||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := internal.BPFProgDetachAttr{ | ||||
| 	attr := sys.ProgDetachAttr{ | ||||
| 		TargetFd:    uint32(opts.Target), | ||||
| 		AttachBpfFd: uint32(opts.Program.FD()), | ||||
| 		AttachType:  uint32(opts.Attach), | ||||
| 	} | ||||
| 	if err := internal.BPFProgDetach(&attr); err != nil { | ||||
| 		return fmt.Errorf("can't detach program: %s", err) | ||||
| 	if err := sys.ProgDetach(&attr); err != nil { | ||||
| 		return fmt.Errorf("can't detach program: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
|  |  | |||
							
								
								
									
										62
									
								
								vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										62
									
								
								vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,10 +1,11 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| ) | ||||
| 
 | ||||
| type RawTracepointOptions struct { | ||||
|  | @ -22,36 +23,65 @@ func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { | |||
| 		return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) | ||||
| 	} | ||||
| 	if opts.Program.FD() < 0 { | ||||
| 		return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd) | ||||
| 		return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{ | ||||
| 		name: internal.NewStringPointer(opts.Name), | ||||
| 		fd:   uint32(opts.Program.FD()), | ||||
| 	fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ | ||||
| 		Name:   sys.NewStringPointer(opts.Name), | ||||
| 		ProgFd: uint32(opts.Program.FD()), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &progAttachRawTracepoint{fd: fd}, nil | ||||
| 	err = haveBPFLink() | ||||
| 	if errors.Is(err, ErrNotSupported) { | ||||
| 		// Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction") | ||||
| 		// raw_tracepoints are just a plain fd. | ||||
| 		return &simpleRawTracepoint{fd}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &rawTracepoint{RawLink{fd: fd}}, nil | ||||
| } | ||||
| 
 | ||||
| type progAttachRawTracepoint struct { | ||||
| 	fd *internal.FD | ||||
| type simpleRawTracepoint struct { | ||||
| 	fd *sys.FD | ||||
| } | ||||
| 
 | ||||
| var _ Link = (*progAttachRawTracepoint)(nil) | ||||
| var _ Link = (*simpleRawTracepoint)(nil) | ||||
| 
 | ||||
| func (rt *progAttachRawTracepoint) isLink() {} | ||||
| func (frt *simpleRawTracepoint) isLink() {} | ||||
| 
 | ||||
| func (rt *progAttachRawTracepoint) Close() error { | ||||
| 	return rt.fd.Close() | ||||
| func (frt *simpleRawTracepoint) Close() error { | ||||
| 	return frt.fd.Close() | ||||
| } | ||||
| 
 | ||||
| func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error { | ||||
| 	return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported) | ||||
| func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error { | ||||
| 	return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (rt *progAttachRawTracepoint) Pin(_ string) error { | ||||
| 	return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported) | ||||
| func (frt *simpleRawTracepoint) Pin(string) error { | ||||
| 	return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (frt *simpleRawTracepoint) Unpin() error { | ||||
| 	return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| func (frt *simpleRawTracepoint) Info() (*Info, error) { | ||||
| 	return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| type rawTracepoint struct { | ||||
| 	RawLink | ||||
| } | ||||
| 
 | ||||
| var _ Link = (*rawTracepoint)(nil) | ||||
| 
 | ||||
| func (rt *rawTracepoint) Update(_ *ebpf.Program) error { | ||||
| 	return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										40
									
								
								vendor/github.com/cilium/ebpf/link/socket_filter.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								vendor/github.com/cilium/ebpf/link/socket_filter.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,40 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"syscall" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // AttachSocketFilter attaches a SocketFilter BPF program to a socket. | ||||
| func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { | ||||
| 	rawConn, err := conn.SyscallConn() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	var ssoErr error | ||||
| 	err = rawConn.Control(func(fd uintptr) { | ||||
| 		ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) | ||||
| 	}) | ||||
| 	if ssoErr != nil { | ||||
| 		return ssoErr | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| // DetachSocketFilter detaches a SocketFilter BPF program from a socket. | ||||
| func DetachSocketFilter(conn syscall.Conn) error { | ||||
| 	rawConn, err := conn.SyscallConn() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	var ssoErr error | ||||
| 	err = rawConn.Control(func(fd uintptr) { | ||||
| 		ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) | ||||
| 	}) | ||||
| 	if ssoErr != nil { | ||||
| 		return ssoErr | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
							
								
								
									
										108
									
								
								vendor/github.com/cilium/ebpf/link/syscalls.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										108
									
								
								vendor/github.com/cilium/ebpf/link/syscalls.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -2,35 +2,33 @@ package link | |||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // Type is the kind of link. | ||||
| type Type uint32 | ||||
| type Type = sys.LinkType | ||||
| 
 | ||||
| // Valid link types. | ||||
| // | ||||
| // Equivalent to enum bpf_link_type. | ||||
| const ( | ||||
| 	UnspecifiedType Type = iota | ||||
| 	RawTracepointType | ||||
| 	TracingType | ||||
| 	CgroupType | ||||
| 	IterType | ||||
| 	NetNsType | ||||
| 	XDPType | ||||
| 	UnspecifiedType   = sys.BPF_LINK_TYPE_UNSPEC | ||||
| 	RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT | ||||
| 	TracingType       = sys.BPF_LINK_TYPE_TRACING | ||||
| 	CgroupType        = sys.BPF_LINK_TYPE_CGROUP | ||||
| 	IterType          = sys.BPF_LINK_TYPE_ITER | ||||
| 	NetNsType         = sys.BPF_LINK_TYPE_NETNS | ||||
| 	XDPType           = sys.BPF_LINK_TYPE_XDP | ||||
| 	PerfEventType     = sys.BPF_LINK_TYPE_PERF_EVENT | ||||
| ) | ||||
| 
 | ||||
| var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error { | ||||
| 	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ | ||||
| 		Type:       ebpf.CGroupSKB, | ||||
| 		AttachType: ebpf.AttachCGroupInetIngress, | ||||
| 		License:    "MIT", | ||||
| 		Type:    ebpf.CGroupSKB, | ||||
| 		License: "MIT", | ||||
| 		Instructions: asm.Instructions{ | ||||
| 			asm.Mov.Imm(asm.R0, 0), | ||||
| 			asm.Return(), | ||||
|  | @ -69,7 +67,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace | |||
| 	// We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. | ||||
| 	// If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't | ||||
| 	// present. | ||||
| 	attr := internal.BPFProgAttachAttr{ | ||||
| 	attr := sys.ProgAttachAttr{ | ||||
| 		// We rely on this being checked after attachFlags. | ||||
| 		TargetFd:    ^uint32(0), | ||||
| 		AttachBpfFd: uint32(prog.FD()), | ||||
|  | @ -77,7 +75,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace | |||
| 		AttachFlags: uint32(flagReplace), | ||||
| 	} | ||||
| 
 | ||||
| 	err = internal.BPFProgAttach(&attr) | ||||
| 	err = sys.ProgAttach(&attr) | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
|  | @ -87,55 +85,14 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace | |||
| 	return err | ||||
| }) | ||||
| 
 | ||||
| type bpfLinkCreateAttr struct { | ||||
| 	progFd     uint32 | ||||
| 	targetFd   uint32 | ||||
| 	attachType ebpf.AttachType | ||||
| 	flags      uint32 | ||||
| } | ||||
| 
 | ||||
| func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) { | ||||
| 	ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return internal.NewFD(uint32(ptr)), nil | ||||
| } | ||||
| 
 | ||||
| type bpfLinkUpdateAttr struct { | ||||
| 	linkFd    uint32 | ||||
| 	newProgFd uint32 | ||||
| 	flags     uint32 | ||||
| 	oldProgFd uint32 | ||||
| } | ||||
| 
 | ||||
| func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error { | ||||
| 	_, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { | ||||
| 	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ | ||||
| 		Type:       ebpf.CGroupSKB, | ||||
| 		AttachType: ebpf.AttachCGroupInetIngress, | ||||
| 		License:    "MIT", | ||||
| 		Instructions: asm.Instructions{ | ||||
| 			asm.Mov.Imm(asm.R0, 0), | ||||
| 			asm.Return(), | ||||
| 		}, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	defer prog.Close() | ||||
| 
 | ||||
| 	attr := bpfLinkCreateAttr{ | ||||
| 	attr := sys.LinkCreateAttr{ | ||||
| 		// This is a hopefully invalid file descriptor, which triggers EBADF. | ||||
| 		targetFd:   ^uint32(0), | ||||
| 		progFd:     uint32(prog.FD()), | ||||
| 		attachType: ebpf.AttachCGroupInetIngress, | ||||
| 		TargetFd:   ^uint32(0), | ||||
| 		ProgFd:     ^uint32(0), | ||||
| 		AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), | ||||
| 	} | ||||
| 	_, err = bpfLinkCreate(&attr) | ||||
| 	_, err := sys.LinkCreate(&attr) | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
|  | @ -144,30 +101,3 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { | |||
| 	} | ||||
| 	return err | ||||
| }) | ||||
| 
 | ||||
| type bpfIterCreateAttr struct { | ||||
| 	linkFd uint32 | ||||
| 	flags  uint32 | ||||
| } | ||||
| 
 | ||||
| func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) { | ||||
| 	ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	if err == nil { | ||||
| 		return internal.NewFD(uint32(ptr)), nil | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
| 
 | ||||
| type bpfRawTracepointOpenAttr struct { | ||||
| 	name internal.Pointer | ||||
| 	fd   uint32 | ||||
| 	_    uint32 | ||||
| } | ||||
| 
 | ||||
| func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) { | ||||
| 	ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	if err == nil { | ||||
| 		return internal.NewFD(uint32(ptr)), nil | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  |  | |||
							
								
								
									
										77
									
								
								vendor/github.com/cilium/ebpf/link/tracepoint.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/cilium/ebpf/link/tracepoint.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,77 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| ) | ||||
| 
 | ||||
| // TracepointOptions defines additional parameters that will be used | ||||
| // when loading Tracepoints. | ||||
| type TracepointOptions struct { | ||||
| 	// Arbitrary value that can be fetched from an eBPF program | ||||
| 	// via `bpf_get_attach_cookie()`. | ||||
| 	// | ||||
| 	// Needs kernel 5.15+. | ||||
| 	Cookie uint64 | ||||
| } | ||||
| 
 | ||||
| // Tracepoint attaches the given eBPF program to the tracepoint with the given | ||||
| // group and name. See /sys/kernel/debug/tracing/events to find available | ||||
| // tracepoints. The top-level directory is the group, the event's subdirectory | ||||
| // is the name. Example: | ||||
| // | ||||
| //	tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil) | ||||
| // | ||||
| // Losing the reference to the resulting Link (tp) will close the Tracepoint | ||||
| // and prevent further execution of prog. The Link must be Closed during | ||||
| // program shutdown to avoid leaking system resources. | ||||
| // | ||||
| // Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is | ||||
| // only possible as of kernel 4.14 (commit cf5f5ce). | ||||
| func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) { | ||||
| 	if group == "" || name == "" { | ||||
| 		return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) | ||||
| 	} | ||||
| 	if prog == nil { | ||||
| 		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) | ||||
| 	} | ||||
| 	if !isValidTraceID(group) || !isValidTraceID(name) { | ||||
| 		return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput) | ||||
| 	} | ||||
| 	if prog.Type() != ebpf.TracePoint { | ||||
| 		return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput) | ||||
| 	} | ||||
| 
 | ||||
| 	tid, err := getTraceEventID(group, name) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := openTracepointPerfEvent(tid, perfAllThreads) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	var cookie uint64 | ||||
| 	if opts != nil { | ||||
| 		cookie = opts.Cookie | ||||
| 	} | ||||
| 
 | ||||
| 	pe := &perfEvent{ | ||||
| 		typ:       tracepointEvent, | ||||
| 		group:     group, | ||||
| 		name:      name, | ||||
| 		tracefsID: tid, | ||||
| 		cookie:    cookie, | ||||
| 		fd:        fd, | ||||
| 	} | ||||
| 
 | ||||
| 	lnk, err := attachPerfEvent(pe, prog) | ||||
| 	if err != nil { | ||||
| 		pe.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return lnk, nil | ||||
| } | ||||
							
								
								
									
										141
									
								
								vendor/github.com/cilium/ebpf/link/tracing.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								vendor/github.com/cilium/ebpf/link/tracing.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,141 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/btf" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| ) | ||||
| 
 | ||||
| type tracing struct { | ||||
| 	RawLink | ||||
| } | ||||
| 
 | ||||
| func (f *tracing) Update(new *ebpf.Program) error { | ||||
| 	return fmt.Errorf("tracing update: %w", ErrNotSupported) | ||||
| } | ||||
| 
 | ||||
| // AttachFreplace attaches the given eBPF program to the function it replaces. | ||||
| // | ||||
| // The program and name can either be provided at link time, or can be provided | ||||
| // at program load time. If they were provided at load time, they should be nil | ||||
| // and empty respectively here, as they will be ignored by the kernel. | ||||
| // Examples: | ||||
| // | ||||
| //	AttachFreplace(dispatcher, "function", replacement) | ||||
| //	AttachFreplace(nil, "", replacement) | ||||
| func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) { | ||||
| 	if (name == "") != (targetProg == nil) { | ||||
| 		return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) | ||||
| 	} | ||||
| 	if prog == nil { | ||||
| 		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) | ||||
| 	} | ||||
| 	if prog.Type() != ebpf.Extension { | ||||
| 		return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) | ||||
| 	} | ||||
| 
 | ||||
| 	var ( | ||||
| 		target int | ||||
| 		typeID btf.TypeID | ||||
| 	) | ||||
| 	if targetProg != nil { | ||||
| 		btfHandle, err := targetProg.Handle() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		defer btfHandle.Close() | ||||
| 
 | ||||
| 		spec, err := btfHandle.Spec(nil) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		var function *btf.Func | ||||
| 		if err := spec.TypeByName(name, &function); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		target = targetProg.FD() | ||||
| 		typeID, err = spec.TypeID(function) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	link, err := AttachRawLink(RawLinkOptions{ | ||||
| 		Target:  target, | ||||
| 		Program: prog, | ||||
| 		Attach:  ebpf.AttachNone, | ||||
| 		BTF:     typeID, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &tracing{*link}, nil | ||||
| } | ||||
| 
 | ||||
| type TracingOptions struct { | ||||
| 	// Program must be of type Tracing with attach type | ||||
| 	// AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or | ||||
| 	// AttachTraceRawTp. | ||||
| 	Program *ebpf.Program | ||||
| } | ||||
| 
 | ||||
| type LSMOptions struct { | ||||
| 	// Program must be of type LSM with attach type | ||||
| 	// AttachLSMMac. | ||||
| 	Program *ebpf.Program | ||||
| } | ||||
| 
 | ||||
| // attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. | ||||
| func attachBTFID(program *ebpf.Program) (Link, error) { | ||||
| 	if program.FD() < 0 { | ||||
| 		return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ | ||||
| 		ProgFd: uint32(program.FD()), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	raw := RawLink{fd: fd} | ||||
| 	info, err := raw.Info() | ||||
| 	if err != nil { | ||||
| 		raw.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if info.Type == RawTracepointType { | ||||
| 		// Sadness upon sadness: a Tracing program with AttachRawTp returns | ||||
| 		// a raw_tracepoint link. Other types return a tracing link. | ||||
| 		return &rawTracepoint{raw}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	return &tracing{RawLink: RawLink{fd: fd}}, nil | ||||
| } | ||||
| 
 | ||||
| // AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or | ||||
| // a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined | ||||
| // in kernel modules. | ||||
| func AttachTracing(opts TracingOptions) (Link, error) { | ||||
| 	if t := opts.Program.Type(); t != ebpf.Tracing { | ||||
| 		return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) | ||||
| 	} | ||||
| 
 | ||||
| 	return attachBTFID(opts.Program) | ||||
| } | ||||
| 
 | ||||
| // AttachLSM links a Linux security module (LSM) BPF Program to a BPF | ||||
| // hook defined in kernel modules. | ||||
| func AttachLSM(opts LSMOptions) (Link, error) { | ||||
| 	if t := opts.Program.Type(); t != ebpf.LSM { | ||||
| 		return nil, fmt.Errorf("invalid program type %s, expected LSM", t) | ||||
| 	} | ||||
| 
 | ||||
| 	return attachBTFID(opts.Program) | ||||
| } | ||||
							
								
								
									
										373
									
								
								vendor/github.com/cilium/ebpf/link/uprobe.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										373
									
								
								vendor/github.com/cilium/ebpf/link/uprobe.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,373 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"debug/elf" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events") | ||||
| 
 | ||||
| 	uprobeRetprobeBit = struct { | ||||
| 		once  sync.Once | ||||
| 		value uint64 | ||||
| 		err   error | ||||
| 	}{} | ||||
| 
 | ||||
| 	uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" | ||||
| 	// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 | ||||
| 	uprobeRefCtrOffsetShift = 32 | ||||
| 	haveRefCtrOffsetPMU     = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error { | ||||
| 		_, err := os.Stat(uprobeRefCtrOffsetPMUPath) | ||||
| 		if err != nil { | ||||
| 			return internal.ErrNotSupported | ||||
| 		} | ||||
| 		return nil | ||||
| 	}) | ||||
| 
 | ||||
| 	// ErrNoSymbol indicates that the given symbol was not found | ||||
| 	// in the ELF symbols table. | ||||
| 	ErrNoSymbol = errors.New("not found") | ||||
| ) | ||||
| 
 | ||||
| // Executable defines an executable program on the filesystem. | ||||
| type Executable struct { | ||||
| 	// Path of the executable on the filesystem. | ||||
| 	path string | ||||
| 	// Parsed ELF and dynamic symbols' addresses. | ||||
| 	addresses map[string]uint64 | ||||
| } | ||||
| 
 | ||||
| // UprobeOptions defines additional parameters that will be used | ||||
| // when loading Uprobes. | ||||
| type UprobeOptions struct { | ||||
| 	// Symbol address. Must be provided in case of external symbols (shared libs). | ||||
| 	// If set, overrides the address eventually parsed from the executable. | ||||
| 	Address uint64 | ||||
| 	// The offset relative to given symbol. Useful when tracing an arbitrary point | ||||
| 	// inside the frame of given symbol. | ||||
| 	// | ||||
| 	// Note: this field changed from being an absolute offset to being relative | ||||
| 	// to Address. | ||||
| 	Offset uint64 | ||||
| 	// Only set the uprobe on the given process ID. Useful when tracing | ||||
| 	// shared library calls or programs that have many running instances. | ||||
| 	PID int | ||||
| 	// Automatically manage SDT reference counts (semaphores). | ||||
| 	// | ||||
| 	// If this field is set, the Kernel will increment/decrement the | ||||
| 	// semaphore located in the process memory at the provided address on | ||||
| 	// probe attach/detach. | ||||
| 	// | ||||
| 	// See also: | ||||
| 	// sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling) | ||||
| 	// github.com/torvalds/linux/commit/1cc33161a83d | ||||
| 	// github.com/torvalds/linux/commit/a6ca88b241d5 | ||||
| 	RefCtrOffset uint64 | ||||
| 	// Arbitrary value that can be fetched from an eBPF program | ||||
| 	// via `bpf_get_attach_cookie()`. | ||||
| 	// | ||||
| 	// Needs kernel 5.15+. | ||||
| 	Cookie uint64 | ||||
| } | ||||
| 
 | ||||
| // To open a new Executable, use: | ||||
| // | ||||
| //  OpenExecutable("/bin/bash") | ||||
| // | ||||
| // The returned value can then be used to open Uprobe(s). | ||||
| func OpenExecutable(path string) (*Executable, error) { | ||||
| 	if path == "" { | ||||
| 		return nil, fmt.Errorf("path cannot be empty") | ||||
| 	} | ||||
| 
 | ||||
| 	f, err := os.Open(path) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("open file '%s': %w", path, err) | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 
 | ||||
| 	se, err := internal.NewSafeELFFile(f) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("parse ELF file: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN { | ||||
| 		// ELF is not an executable or a shared object. | ||||
| 		return nil, errors.New("the given file is not an executable or a shared object") | ||||
| 	} | ||||
| 
 | ||||
| 	ex := Executable{ | ||||
| 		path:      path, | ||||
| 		addresses: make(map[string]uint64), | ||||
| 	} | ||||
| 
 | ||||
| 	if err := ex.load(se); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &ex, nil | ||||
| } | ||||
| 
 | ||||
| func (ex *Executable) load(f *internal.SafeELFFile) error { | ||||
| 	syms, err := f.Symbols() | ||||
| 	if err != nil && !errors.Is(err, elf.ErrNoSymbols) { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	dynsyms, err := f.DynamicSymbols() | ||||
| 	if err != nil && !errors.Is(err, elf.ErrNoSymbols) { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	syms = append(syms, dynsyms...) | ||||
| 
 | ||||
| 	for _, s := range syms { | ||||
| 		if elf.ST_TYPE(s.Info) != elf.STT_FUNC { | ||||
| 			// Symbol not associated with a function or other executable code. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		address := s.Value | ||||
| 
 | ||||
| 		// Loop over ELF segments. | ||||
| 		for _, prog := range f.Progs { | ||||
| 			// Skip uninteresting segments. | ||||
| 			if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) { | ||||
| 				// If the symbol value is contained in the segment, calculate | ||||
| 				// the symbol offset. | ||||
| 				// | ||||
| 				// fn symbol offset = fn symbol VA - .text VA + .text offset | ||||
| 				// | ||||
| 				// stackoverflow.com/a/40249502 | ||||
| 				address = s.Value - prog.Vaddr + prog.Off | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		ex.addresses[s.Name] = address | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // address calculates the address of a symbol in the executable. | ||||
| // | ||||
| // opts must not be nil. | ||||
| func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) { | ||||
| 	if opts.Address > 0 { | ||||
| 		return opts.Address + opts.Offset, nil | ||||
| 	} | ||||
| 
 | ||||
| 	address, ok := ex.addresses[symbol] | ||||
| 	if !ok { | ||||
| 		return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) | ||||
| 	} | ||||
| 
 | ||||
| 	// Symbols with location 0 from section undef are shared library calls and | ||||
| 	// are relocated before the binary is executed. Dynamic linking is not | ||||
| 	// implemented by the library, so mark this as unsupported for now. | ||||
| 	// | ||||
| 	// Since only offset values are stored and not elf.Symbol, if the value is 0, | ||||
| 	// assume it's an external symbol. | ||||
| 	if address == 0 { | ||||
| 		return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ | ||||
| 			"(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) | ||||
| 	} | ||||
| 
 | ||||
| 	return address + opts.Offset, nil | ||||
| } | ||||
| 
 | ||||
| // Uprobe attaches the given eBPF program to a perf event that fires when the | ||||
| // given symbol starts executing in the given Executable. | ||||
| // For example, /bin/bash::main(): | ||||
| // | ||||
| //  ex, _ = OpenExecutable("/bin/bash") | ||||
| //  ex.Uprobe("main", prog, nil) | ||||
| // | ||||
| // When using symbols which belongs to shared libraries, | ||||
| // an offset must be provided via options: | ||||
| // | ||||
| //  up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) | ||||
| // | ||||
| // Note: Setting the Offset field in the options supersedes the symbol's offset. | ||||
| // | ||||
| // Losing the reference to the resulting Link (up) will close the Uprobe | ||||
| // and prevent further execution of prog. The Link must be Closed during | ||||
| // program shutdown to avoid leaking system resources. | ||||
| // | ||||
| // Functions provided by shared libraries can currently not be traced and | ||||
| // will result in an ErrNotSupported. | ||||
| func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { | ||||
| 	u, err := ex.uprobe(symbol, prog, opts, false) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	lnk, err := attachPerfEvent(u, prog) | ||||
| 	if err != nil { | ||||
| 		u.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return lnk, nil | ||||
| } | ||||
| 
 | ||||
| // Uretprobe attaches the given eBPF program to a perf event that fires right | ||||
| // before the given symbol exits. For example, /bin/bash::main(): | ||||
| // | ||||
| //  ex, _ = OpenExecutable("/bin/bash") | ||||
| //  ex.Uretprobe("main", prog, nil) | ||||
| // | ||||
| // When using symbols which belongs to shared libraries, | ||||
| // an offset must be provided via options: | ||||
| // | ||||
| //  up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) | ||||
| // | ||||
| // Note: Setting the Offset field in the options supersedes the symbol's offset. | ||||
| // | ||||
| // Losing the reference to the resulting Link (up) will close the Uprobe | ||||
| // and prevent further execution of prog. The Link must be Closed during | ||||
| // program shutdown to avoid leaking system resources. | ||||
| // | ||||
| // Functions provided by shared libraries can currently not be traced and | ||||
| // will result in an ErrNotSupported. | ||||
| func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { | ||||
| 	u, err := ex.uprobe(symbol, prog, opts, true) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	lnk, err := attachPerfEvent(u, prog) | ||||
| 	if err != nil { | ||||
| 		u.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return lnk, nil | ||||
| } | ||||
| 
 | ||||
| // uprobe opens a perf event for the given binary/symbol and attaches prog to it. | ||||
| // If ret is true, create a uretprobe. | ||||
| func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) { | ||||
| 	if prog == nil { | ||||
| 		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) | ||||
| 	} | ||||
| 	if prog.Type() != ebpf.Kprobe { | ||||
| 		return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) | ||||
| 	} | ||||
| 	if opts == nil { | ||||
| 		opts = &UprobeOptions{} | ||||
| 	} | ||||
| 
 | ||||
| 	offset, err := ex.address(symbol, opts) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	pid := opts.PID | ||||
| 	if pid == 0 { | ||||
| 		pid = perfAllThreads | ||||
| 	} | ||||
| 
 | ||||
| 	if opts.RefCtrOffset != 0 { | ||||
| 		if err := haveRefCtrOffsetPMU(); err != nil { | ||||
| 			return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	args := probeArgs{ | ||||
| 		symbol:       symbol, | ||||
| 		path:         ex.path, | ||||
| 		offset:       offset, | ||||
| 		pid:          pid, | ||||
| 		refCtrOffset: opts.RefCtrOffset, | ||||
| 		ret:          ret, | ||||
| 		cookie:       opts.Cookie, | ||||
| 	} | ||||
| 
 | ||||
| 	// Use uprobe PMU if the kernel has it available. | ||||
| 	tp, err := pmuUprobe(args) | ||||
| 	if err == nil { | ||||
| 		return tp, nil | ||||
| 	} | ||||
| 	if err != nil && !errors.Is(err, ErrNotSupported) { | ||||
| 		return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Use tracefs if uprobe PMU is missing. | ||||
| 	args.symbol = sanitizeSymbol(symbol) | ||||
| 	tp, err = tracefsUprobe(args) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return tp, nil | ||||
| } | ||||
| 
 | ||||
| // pmuUprobe opens a perf event based on the uprobe PMU. | ||||
| func pmuUprobe(args probeArgs) (*perfEvent, error) { | ||||
| 	return pmuProbe(uprobeType, args) | ||||
| } | ||||
| 
 | ||||
| // tracefsUprobe creates a Uprobe tracefs entry. | ||||
| func tracefsUprobe(args probeArgs) (*perfEvent, error) { | ||||
| 	return tracefsProbe(uprobeType, args) | ||||
| } | ||||
| 
 | ||||
| // sanitizeSymbol replaces every invalid character for the tracefs api with an underscore. | ||||
| // It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). | ||||
| func sanitizeSymbol(s string) string { | ||||
| 	var b strings.Builder | ||||
| 	b.Grow(len(s)) | ||||
| 	var skip bool | ||||
| 	for _, c := range []byte(s) { | ||||
| 		switch { | ||||
| 		case c >= 'a' && c <= 'z', | ||||
| 			c >= 'A' && c <= 'Z', | ||||
| 			c >= '0' && c <= '9': | ||||
| 			skip = false | ||||
| 			b.WriteByte(c) | ||||
| 
 | ||||
| 		default: | ||||
| 			if !skip { | ||||
| 				b.WriteByte('_') | ||||
| 				skip = true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return b.String() | ||||
| } | ||||
| 
 | ||||
| // uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. | ||||
| func uprobeToken(args probeArgs) string { | ||||
| 	po := fmt.Sprintf("%s:%#x", args.path, args.offset) | ||||
| 
 | ||||
| 	if args.refCtrOffset != 0 { | ||||
| 		// This is not documented in Documentation/trace/uprobetracer.txt. | ||||
| 		// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 | ||||
| 		po += fmt.Sprintf("(%#x)", args.refCtrOffset) | ||||
| 	} | ||||
| 
 | ||||
| 	return po | ||||
| } | ||||
| 
 | ||||
| func uretprobeBit() (uint64, error) { | ||||
| 	uprobeRetprobeBit.once.Do(func() { | ||||
| 		uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType) | ||||
| 	}) | ||||
| 	return uprobeRetprobeBit.value, uprobeRetprobeBit.err | ||||
| } | ||||
							
								
								
									
										54
									
								
								vendor/github.com/cilium/ebpf/link/xdp.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								vendor/github.com/cilium/ebpf/link/xdp.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,54 @@ | |||
| package link | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf" | ||||
| ) | ||||
| 
 | ||||
| // XDPAttachFlags represents how XDP program will be attached to interface. | ||||
| type XDPAttachFlags uint32 | ||||
| 
 | ||||
| const ( | ||||
| 	// XDPGenericMode (SKB) links XDP BPF program for drivers which do | ||||
| 	// not yet support native XDP. | ||||
| 	XDPGenericMode XDPAttachFlags = 1 << (iota + 1) | ||||
| 	// XDPDriverMode links XDP BPF program into the driver’s receive path. | ||||
| 	XDPDriverMode | ||||
| 	// XDPOffloadMode offloads the entire XDP BPF program into hardware. | ||||
| 	XDPOffloadMode | ||||
| ) | ||||
| 
 | ||||
| type XDPOptions struct { | ||||
| 	// Program must be an XDP BPF program. | ||||
| 	Program *ebpf.Program | ||||
| 
 | ||||
| 	// Interface is the interface index to attach program to. | ||||
| 	Interface int | ||||
| 
 | ||||
| 	// Flags is one of XDPAttachFlags (optional). | ||||
| 	// | ||||
| 	// Only one XDP mode should be set, without flag defaults | ||||
| 	// to driver/generic mode (best effort). | ||||
| 	Flags XDPAttachFlags | ||||
| } | ||||
| 
 | ||||
| // AttachXDP links an XDP BPF program to an XDP hook. | ||||
| func AttachXDP(opts XDPOptions) (Link, error) { | ||||
| 	if t := opts.Program.Type(); t != ebpf.XDP { | ||||
| 		return nil, fmt.Errorf("invalid program type %s, expected XDP", t) | ||||
| 	} | ||||
| 
 | ||||
| 	if opts.Interface < 1 { | ||||
| 		return nil, fmt.Errorf("invalid interface index: %d", opts.Interface) | ||||
| 	} | ||||
| 
 | ||||
| 	rawLink, err := AttachRawLink(RawLinkOptions{ | ||||
| 		Program: opts.Program, | ||||
| 		Attach:  ebpf.AttachXDP, | ||||
| 		Target:  opts.Interface, | ||||
| 		Flags:   uint32(opts.Flags), | ||||
| 	}) | ||||
| 
 | ||||
| 	return rawLink, err | ||||
| } | ||||
							
								
								
									
										299
									
								
								vendor/github.com/cilium/ebpf/linker.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										299
									
								
								vendor/github.com/cilium/ebpf/linker.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,133 +1,238 @@ | |||
| package ebpf | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/internal/btf" | ||||
| 	"github.com/cilium/ebpf/btf" | ||||
| ) | ||||
| 
 | ||||
| // link resolves bpf-to-bpf calls. | ||||
| // splitSymbols splits insns into subsections delimited by Symbol Instructions. | ||||
| // insns cannot be empty and must start with a Symbol Instruction. | ||||
| // | ||||
| // Each library may contain multiple functions / labels, and is only linked | ||||
| // if prog references one of these functions. | ||||
| // The resulting map is indexed by Symbol name. | ||||
| func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { | ||||
| 	if len(insns) == 0 { | ||||
| 		return nil, errors.New("insns is empty") | ||||
| 	} | ||||
| 
 | ||||
| 	if insns[0].Symbol() == "" { | ||||
| 		return nil, errors.New("insns must start with a Symbol") | ||||
| 	} | ||||
| 
 | ||||
| 	var name string | ||||
| 	progs := make(map[string]asm.Instructions) | ||||
| 	for _, ins := range insns { | ||||
| 		if sym := ins.Symbol(); sym != "" { | ||||
| 			if progs[sym] != nil { | ||||
| 				return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) | ||||
| 			} | ||||
| 			name = sym | ||||
| 		} | ||||
| 
 | ||||
| 		progs[name] = append(progs[name], ins) | ||||
| 	} | ||||
| 
 | ||||
| 	return progs, nil | ||||
| } | ||||
| 
 | ||||
| // The linker is responsible for resolving bpf-to-bpf calls between programs | ||||
| // within an ELF. Each BPF program must be a self-contained binary blob, | ||||
| // so when an instruction in one ELF program section wants to jump to | ||||
| // a function in another, the linker needs to pull in the bytecode | ||||
| // (and BTF info) of the target function and concatenate the instruction | ||||
| // streams. | ||||
| // | ||||
| // Libraries also linked. | ||||
| func link(prog *ProgramSpec, libs []*ProgramSpec) error { | ||||
| 	var ( | ||||
| 		linked  = make(map[*ProgramSpec]bool) | ||||
| 		pending = []asm.Instructions{prog.Instructions} | ||||
| 		insns   asm.Instructions | ||||
| 	) | ||||
| 	for len(pending) > 0 { | ||||
| 		insns, pending = pending[0], pending[1:] | ||||
| 		for _, lib := range libs { | ||||
| 			if linked[lib] { | ||||
| 				continue | ||||
| 			} | ||||
| // Later on in the pipeline, all call sites are fixed up with relative jumps | ||||
| // within this newly-created instruction stream to then finally hand off to | ||||
| // the kernel with BPF_PROG_LOAD. | ||||
| // | ||||
| // Each function is denoted by an ELF symbol and the compiler takes care of | ||||
| // register setup before each jump instruction. | ||||
| 
 | ||||
| 			needed, err := needSection(insns, lib.Instructions) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("linking %s: %w", lib.Name, err) | ||||
| 			} | ||||
| // hasFunctionReferences returns true if insns contains one or more bpf2bpf | ||||
| // function references. | ||||
| func hasFunctionReferences(insns asm.Instructions) bool { | ||||
| 	for _, i := range insns { | ||||
| 		if i.IsFunctionReference() { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| 			if !needed { | ||||
| 				continue | ||||
| 			} | ||||
| // applyRelocations collects and applies any CO-RE relocations in insns. | ||||
| // | ||||
| // Passing a nil target will relocate against the running kernel. insns are | ||||
| // modified in place. | ||||
| func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error { | ||||
| 	var relos []*btf.CORERelocation | ||||
| 	var reloInsns []*asm.Instruction | ||||
| 	iter := insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { | ||||
| 			relos = append(relos, relo) | ||||
| 			reloInsns = append(reloInsns, iter.Ins) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 			linked[lib] = true | ||||
| 			prog.Instructions = append(prog.Instructions, lib.Instructions...) | ||||
| 			pending = append(pending, lib.Instructions) | ||||
| 	if len(relos) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 			if prog.BTF != nil && lib.BTF != nil { | ||||
| 				if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil { | ||||
| 					return fmt.Errorf("linking BTF of %s: %w", lib.Name, err) | ||||
| 				} | ||||
| 			} | ||||
| 	target, err := maybeLoadKernelBTF(target) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	fixups, err := btf.CORERelocate(local, target, relos) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	for i, fixup := range fixups { | ||||
| 		if err := fixup.Apply(reloInsns[i]); err != nil { | ||||
| 			return fmt.Errorf("apply fixup %s: %w", &fixup, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func needSection(insns, section asm.Instructions) (bool, error) { | ||||
| 	// A map of symbols to the libraries which contain them. | ||||
| 	symbols, err := section.SymbolOffsets() | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| // flattenPrograms resolves bpf-to-bpf calls for a set of programs. | ||||
| // | ||||
| // Links all programs in names by modifying their ProgramSpec in progs. | ||||
| func flattenPrograms(progs map[string]*ProgramSpec, names []string) { | ||||
| 	// Pre-calculate all function references. | ||||
| 	refs := make(map[*ProgramSpec][]string) | ||||
| 	for _, prog := range progs { | ||||
| 		refs[prog] = prog.Instructions.FunctionReferences() | ||||
| 	} | ||||
| 
 | ||||
| 	for _, ins := range insns { | ||||
| 		if ins.Reference == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if ins.Constant != -1 { | ||||
| 			// This is already a valid call, no need to link again. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if _, ok := symbols[ins.Reference]; !ok { | ||||
| 			// Symbol isn't available in this section | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// At this point we know that at least one function in the | ||||
| 		// library is called from insns, so we have to link it. | ||||
| 		return true, nil | ||||
| 	// Create a flattened instruction stream, but don't modify progs yet to | ||||
| 	// avoid linking multiple times. | ||||
| 	flattened := make([]asm.Instructions, 0, len(names)) | ||||
| 	for _, name := range names { | ||||
| 		flattened = append(flattened, flattenInstructions(name, progs, refs)) | ||||
| 	} | ||||
| 
 | ||||
| 	// None of the functions in the section are called. | ||||
| 	return false, nil | ||||
| 	// Finally, assign the flattened instructions. | ||||
| 	for i, name := range names { | ||||
| 		progs[name].Instructions = flattened[i] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func fixupJumpsAndCalls(insns asm.Instructions) error { | ||||
| 	symbolOffsets := make(map[string]asm.RawInstructionOffset) | ||||
| // flattenInstructions resolves bpf-to-bpf calls for a single program. | ||||
| // | ||||
| // Flattens the instructions of prog by concatenating the instructions of all | ||||
| // direct and indirect dependencies. | ||||
| // | ||||
| // progs contains all referenceable programs, while refs contain the direct | ||||
| // dependencies of each program. | ||||
| func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { | ||||
| 	prog := progs[name] | ||||
| 
 | ||||
| 	insns := make(asm.Instructions, len(prog.Instructions)) | ||||
| 	copy(insns, prog.Instructions) | ||||
| 
 | ||||
| 	// Add all direct references of prog to the list of to be linked programs. | ||||
| 	pending := make([]string, len(refs[prog])) | ||||
| 	copy(pending, refs[prog]) | ||||
| 
 | ||||
| 	// All references for which we've appended instructions. | ||||
| 	linked := make(map[string]bool) | ||||
| 
 | ||||
| 	// Iterate all pending references. We can't use a range since pending is | ||||
| 	// modified in the body below. | ||||
| 	for len(pending) > 0 { | ||||
| 		var ref string | ||||
| 		ref, pending = pending[0], pending[1:] | ||||
| 
 | ||||
| 		if linked[ref] { | ||||
| 			// We've already linked this ref, don't append instructions again. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		progRef := progs[ref] | ||||
| 		if progRef == nil { | ||||
| 			// We don't have instructions that go with this reference. This | ||||
| 			// happens when calling extern functions. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		insns = append(insns, progRef.Instructions...) | ||||
| 		linked[ref] = true | ||||
| 
 | ||||
| 		// Make sure we link indirect references. | ||||
| 		pending = append(pending, refs[progRef]...) | ||||
| 	} | ||||
| 
 | ||||
| 	return insns | ||||
| } | ||||
| 
 | ||||
| // fixupAndValidate is called by the ELF reader right before marshaling the | ||||
| // instruction stream. It performs last-minute adjustments to the program and | ||||
| // runs some sanity checks before sending it off to the kernel. | ||||
| func fixupAndValidate(insns asm.Instructions) error { | ||||
| 	iter := insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		ins := iter.Ins | ||||
| 
 | ||||
| 		if ins.Symbol == "" { | ||||
| 			continue | ||||
| 		// Map load was tagged with a Reference, but does not contain a Map pointer. | ||||
| 		if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil { | ||||
| 			return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference) | ||||
| 		} | ||||
| 
 | ||||
| 		if _, ok := symbolOffsets[ins.Symbol]; ok { | ||||
| 			return fmt.Errorf("duplicate symbol %s", ins.Symbol) | ||||
| 		} | ||||
| 
 | ||||
| 		symbolOffsets[ins.Symbol] = iter.Offset | ||||
| 	} | ||||
| 
 | ||||
| 	iter = insns.Iterate() | ||||
| 	for iter.Next() { | ||||
| 		i := iter.Index | ||||
| 		offset := iter.Offset | ||||
| 		ins := iter.Ins | ||||
| 
 | ||||
| 		switch { | ||||
| 		case ins.IsFunctionCall() && ins.Constant == -1: | ||||
| 			// Rewrite bpf to bpf call | ||||
| 			callOffset, ok := symbolOffsets[ins.Reference] | ||||
| 			if !ok { | ||||
| 				return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference) | ||||
| 			} | ||||
| 
 | ||||
| 			ins.Constant = int64(callOffset - offset - 1) | ||||
| 
 | ||||
| 		case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1: | ||||
| 			// Rewrite jump to label | ||||
| 			jumpOffset, ok := symbolOffsets[ins.Reference] | ||||
| 			if !ok { | ||||
| 				return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference) | ||||
| 			} | ||||
| 
 | ||||
| 			ins.Offset = int16(jumpOffset - offset - 1) | ||||
| 		} | ||||
| 		fixupProbeReadKernel(ins) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) | ||||
| // with bpf_probe_read(_str) on kernels that don't support it yet. | ||||
| func fixupProbeReadKernel(ins *asm.Instruction) { | ||||
| 	if !ins.IsBuiltinCall() { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// Kernel supports bpf_probe_read_kernel, nothing to do. | ||||
| 	if haveProbeReadKernel() == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	switch asm.BuiltinFunc(ins.Constant) { | ||||
| 	case asm.FnProbeReadKernel, asm.FnProbeReadUser: | ||||
| 		ins.Constant = int64(asm.FnProbeRead) | ||||
| 	case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: | ||||
| 		ins.Constant = int64(asm.FnProbeReadStr) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var kernelBTF struct { | ||||
| 	sync.Mutex | ||||
| 	spec *btf.Spec | ||||
| } | ||||
| 
 | ||||
| // maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise | ||||
| // it returns spec unchanged. | ||||
| // | ||||
| // The kernel BTF is cached for the lifetime of the process. | ||||
| func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) { | ||||
| 	if spec != nil { | ||||
| 		return spec, nil | ||||
| 	} | ||||
| 
 | ||||
| 	kernelBTF.Lock() | ||||
| 	defer kernelBTF.Unlock() | ||||
| 
 | ||||
| 	if kernelBTF.spec != nil { | ||||
| 		return kernelBTF.spec, nil | ||||
| 	} | ||||
| 
 | ||||
| 	var err error | ||||
| 	kernelBTF.spec, err = btf.LoadKernelSpec() | ||||
| 	return kernelBTF.spec, err | ||||
| } | ||||
|  |  | |||
							
								
								
									
										712
									
								
								vendor/github.com/cilium/ebpf/map.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										712
									
								
								vendor/github.com/cilium/ebpf/map.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										83
									
								
								vendor/github.com/cilium/ebpf/marshalers.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										83
									
								
								vendor/github.com/cilium/ebpf/marshalers.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -8,9 +8,11 @@ import ( | |||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"runtime" | ||||
| 	"sync" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| ) | ||||
| 
 | ||||
| // marshalPtr converts an arbitrary value into a pointer suitable | ||||
|  | @ -18,17 +20,17 @@ import ( | |||
| // | ||||
| // As an optimization, it returns the original value if it is an | ||||
| // unsafe.Pointer. | ||||
| func marshalPtr(data interface{}, length int) (internal.Pointer, error) { | ||||
| func marshalPtr(data interface{}, length int) (sys.Pointer, error) { | ||||
| 	if ptr, ok := data.(unsafe.Pointer); ok { | ||||
| 		return internal.NewPointer(ptr), nil | ||||
| 		return sys.NewPointer(ptr), nil | ||||
| 	} | ||||
| 
 | ||||
| 	buf, err := marshalBytes(data, length) | ||||
| 	if err != nil { | ||||
| 		return internal.Pointer{}, err | ||||
| 		return sys.Pointer{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	return internal.NewSlicePointer(buf), nil | ||||
| 	return sys.NewSlicePointer(buf), nil | ||||
| } | ||||
| 
 | ||||
| // marshalBytes converts an arbitrary value into a byte buffer. | ||||
|  | @ -39,6 +41,10 @@ func marshalPtr(data interface{}, length int) (internal.Pointer, error) { | |||
| // Returns an error if the given value isn't representable in exactly | ||||
| // length bytes. | ||||
| func marshalBytes(data interface{}, length int) (buf []byte, err error) { | ||||
| 	if data == nil { | ||||
| 		return nil, errors.New("can't marshal a nil value") | ||||
| 	} | ||||
| 
 | ||||
| 	switch value := data.(type) { | ||||
| 	case encoding.BinaryMarshaler: | ||||
| 		buf, err = value.MarshalBinary() | ||||
|  | @ -68,29 +74,32 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) { | |||
| 	return buf, nil | ||||
| } | ||||
| 
 | ||||
| func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) { | ||||
| func makeBuffer(dst interface{}, length int) (sys.Pointer, []byte) { | ||||
| 	if ptr, ok := dst.(unsafe.Pointer); ok { | ||||
| 		return internal.NewPointer(ptr), nil | ||||
| 		return sys.NewPointer(ptr), nil | ||||
| 	} | ||||
| 
 | ||||
| 	buf := make([]byte, length) | ||||
| 	return internal.NewSlicePointer(buf), buf | ||||
| 	return sys.NewSlicePointer(buf), buf | ||||
| } | ||||
| 
 | ||||
| var bytesReaderPool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		return new(bytes.Reader) | ||||
| 	}, | ||||
| } | ||||
| 
 | ||||
| // unmarshalBytes converts a byte buffer into an arbitrary value. | ||||
| // | ||||
| // Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since | ||||
| // those have special cases that allow more types to be encoded. | ||||
| // | ||||
| // The common int32 and int64 types are directly handled to avoid | ||||
| // unnecessary heap allocations as happening in the default case. | ||||
| func unmarshalBytes(data interface{}, buf []byte) error { | ||||
| 	switch value := data.(type) { | ||||
| 	case unsafe.Pointer: | ||||
| 		sh := &reflect.SliceHeader{ | ||||
| 			Data: uintptr(value), | ||||
| 			Len:  len(buf), | ||||
| 			Cap:  len(buf), | ||||
| 		} | ||||
| 
 | ||||
| 		dst := *(*[]byte)(unsafe.Pointer(sh)) | ||||
| 		dst := unsafe.Slice((*byte)(value), len(buf)) | ||||
| 		copy(dst, buf) | ||||
| 		runtime.KeepAlive(value) | ||||
| 		return nil | ||||
|  | @ -104,12 +113,38 @@ func unmarshalBytes(data interface{}, buf []byte) error { | |||
| 	case *[]byte: | ||||
| 		*value = buf | ||||
| 		return nil | ||||
| 	case *int32: | ||||
| 		if len(buf) < 4 { | ||||
| 			return errors.New("int32 requires 4 bytes") | ||||
| 		} | ||||
| 		*value = int32(internal.NativeEndian.Uint32(buf)) | ||||
| 		return nil | ||||
| 	case *uint32: | ||||
| 		if len(buf) < 4 { | ||||
| 			return errors.New("uint32 requires 4 bytes") | ||||
| 		} | ||||
| 		*value = internal.NativeEndian.Uint32(buf) | ||||
| 		return nil | ||||
| 	case *int64: | ||||
| 		if len(buf) < 8 { | ||||
| 			return errors.New("int64 requires 8 bytes") | ||||
| 		} | ||||
| 		*value = int64(internal.NativeEndian.Uint64(buf)) | ||||
| 		return nil | ||||
| 	case *uint64: | ||||
| 		if len(buf) < 8 { | ||||
| 			return errors.New("uint64 requires 8 bytes") | ||||
| 		} | ||||
| 		*value = internal.NativeEndian.Uint64(buf) | ||||
| 		return nil | ||||
| 	case string: | ||||
| 		return errors.New("require pointer to string") | ||||
| 	case []byte: | ||||
| 		return errors.New("require pointer to []byte") | ||||
| 	default: | ||||
| 		rd := bytes.NewReader(buf) | ||||
| 		rd := bytesReaderPool.Get().(*bytes.Reader) | ||||
| 		rd.Reset(buf) | ||||
| 		defer bytesReaderPool.Put(rd) | ||||
| 		if err := binary.Read(rd, internal.NativeEndian, value); err != nil { | ||||
| 			return fmt.Errorf("decoding %T: %v", value, err) | ||||
| 		} | ||||
|  | @ -123,38 +158,38 @@ func unmarshalBytes(data interface{}, buf []byte) error { | |||
| // Values are initialized to zero if the slice has less elements than CPUs. | ||||
| // | ||||
| // slice must have a type like []elementType. | ||||
| func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) { | ||||
| func marshalPerCPUValue(slice interface{}, elemLength int) (sys.Pointer, error) { | ||||
| 	sliceType := reflect.TypeOf(slice) | ||||
| 	if sliceType.Kind() != reflect.Slice { | ||||
| 		return internal.Pointer{}, errors.New("per-CPU value requires slice") | ||||
| 		return sys.Pointer{}, errors.New("per-CPU value requires slice") | ||||
| 	} | ||||
| 
 | ||||
| 	possibleCPUs, err := internal.PossibleCPUs() | ||||
| 	if err != nil { | ||||
| 		return internal.Pointer{}, err | ||||
| 		return sys.Pointer{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	sliceValue := reflect.ValueOf(slice) | ||||
| 	sliceLen := sliceValue.Len() | ||||
| 	if sliceLen > possibleCPUs { | ||||
| 		return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") | ||||
| 		return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") | ||||
| 	} | ||||
| 
 | ||||
| 	alignedElemLength := align(elemLength, 8) | ||||
| 	alignedElemLength := internal.Align(elemLength, 8) | ||||
| 	buf := make([]byte, alignedElemLength*possibleCPUs) | ||||
| 
 | ||||
| 	for i := 0; i < sliceLen; i++ { | ||||
| 		elem := sliceValue.Index(i).Interface() | ||||
| 		elemBytes, err := marshalBytes(elem, elemLength) | ||||
| 		if err != nil { | ||||
| 			return internal.Pointer{}, err | ||||
| 			return sys.Pointer{}, err | ||||
| 		} | ||||
| 
 | ||||
| 		offset := i * alignedElemLength | ||||
| 		copy(buf[offset:offset+elemLength], elemBytes) | ||||
| 	} | ||||
| 
 | ||||
| 	return internal.NewSlicePointer(buf), nil | ||||
| 	return sys.NewSlicePointer(buf), nil | ||||
| } | ||||
| 
 | ||||
| // unmarshalPerCPUValue decodes a buffer into a slice containing one value per | ||||
|  | @ -210,7 +245,3 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro | |||
| 	reflect.ValueOf(slicePtr).Elem().Set(slice) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func align(n, alignment int) int { | ||||
| 	return (int(n) + alignment - 1) / alignment * alignment | ||||
| } | ||||
|  |  | |||
							
								
								
									
										42
									
								
								vendor/github.com/cilium/ebpf/pinning.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/cilium/ebpf/pinning.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,42 +0,0 @@ | |||
| package ebpf | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| ) | ||||
| 
 | ||||
| func pin(currentPath, newPath string, fd *internal.FD) error { | ||||
| 	if newPath == "" { | ||||
| 		return errors.New("given pinning path cannot be empty") | ||||
| 	} | ||||
| 	if currentPath == "" { | ||||
| 		return internal.BPFObjPin(newPath, fd) | ||||
| 	} | ||||
| 	if currentPath == newPath { | ||||
| 		return nil | ||||
| 	} | ||||
| 	var err error | ||||
| 	// Object is now moved to the new pinning path. | ||||
| 	if err = os.Rename(currentPath, newPath); err == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if !os.IsNotExist(err) { | ||||
| 		return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) | ||||
| 	} | ||||
| 	// Internal state not in sync with the file system so let's fix it. | ||||
| 	return internal.BPFObjPin(newPath, fd) | ||||
| } | ||||
| 
 | ||||
| func unpin(pinnedPath string) error { | ||||
| 	if pinnedPath == "" { | ||||
| 		return nil | ||||
| 	} | ||||
| 	err := os.Remove(pinnedPath) | ||||
| 	if err == nil || os.IsNotExist(err) { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
							
								
								
									
										693
									
								
								vendor/github.com/cilium/ebpf/prog.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										693
									
								
								vendor/github.com/cilium/ebpf/prog.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -7,12 +7,14 @@ import ( | |||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/btf" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/btf" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
|  | @ -41,6 +43,13 @@ type ProgramOptions struct { | |||
| 	// Controls the output buffer size for the verifier. Defaults to | ||||
| 	// DefaultVerifierLogSize. | ||||
| 	LogSize int | ||||
| 	// Type information used for CO-RE relocations and when attaching to | ||||
| 	// kernel functions. | ||||
| 	// | ||||
| 	// This is useful in environments where the kernel BTF is not available | ||||
| 	// (containers) or where it is in a non-standard location. Defaults to | ||||
| 	// use the kernel BTF from a well-known location if nil. | ||||
| 	KernelTypes *btf.Spec | ||||
| } | ||||
| 
 | ||||
| // ProgramSpec defines a Program. | ||||
|  | @ -48,29 +57,48 @@ type ProgramSpec struct { | |||
| 	// Name is passed to the kernel as a debug aid. Must only contain | ||||
| 	// alpha numeric and '_' characters. | ||||
| 	Name string | ||||
| 
 | ||||
| 	// Type determines at which hook in the kernel a program will run. | ||||
| 	Type       ProgramType | ||||
| 	Type ProgramType | ||||
| 
 | ||||
| 	// AttachType of the program, needed to differentiate allowed context | ||||
| 	// accesses in some newer program types like CGroupSockAddr. | ||||
| 	// | ||||
| 	// Available on kernels 4.17 and later. | ||||
| 	AttachType AttachType | ||||
| 	// Name of a kernel data structure to attach to. It's interpretation | ||||
| 	// depends on Type and AttachType. | ||||
| 	AttachTo     string | ||||
| 
 | ||||
| 	// Name of a kernel data structure or function to attach to. Its | ||||
| 	// interpretation depends on Type and AttachType. | ||||
| 	AttachTo string | ||||
| 
 | ||||
| 	// The program to attach to. Must be provided manually. | ||||
| 	AttachTarget *Program | ||||
| 
 | ||||
| 	// The name of the ELF section this program orininated from. | ||||
| 	SectionName string | ||||
| 
 | ||||
| 	Instructions asm.Instructions | ||||
| 
 | ||||
| 	// Flags is passed to the kernel and specifies additional program | ||||
| 	// load attributes. | ||||
| 	Flags uint32 | ||||
| 
 | ||||
| 	// License of the program. Some helpers are only available if | ||||
| 	// the license is deemed compatible with the GPL. | ||||
| 	// | ||||
| 	// See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1 | ||||
| 	License string | ||||
| 
 | ||||
| 	// Version used by tracing programs. | ||||
| 	// Version used by Kprobe programs. | ||||
| 	// | ||||
| 	// Deprecated: superseded by BTF. | ||||
| 	// Deprecated on kernels 5.0 and later. Leave empty to let the library | ||||
| 	// detect this value automatically. | ||||
| 	KernelVersion uint32 | ||||
| 
 | ||||
| 	// The BTF associated with this program. Changing Instructions | ||||
| 	// will most likely invalidate the contained data, and may | ||||
| 	// result in errors when attempting to load it into the kernel. | ||||
| 	BTF *btf.Program | ||||
| 	BTF *btf.Spec | ||||
| 
 | ||||
| 	// The byte order this program was compiled for, may be nil. | ||||
| 	ByteOrder binary.ByteOrder | ||||
|  | @ -95,6 +123,8 @@ func (ps *ProgramSpec) Tag() (string, error) { | |||
| 	return ps.Instructions.Tag(internal.NativeEndian) | ||||
| } | ||||
| 
 | ||||
| type VerifierError = internal.VerifierError | ||||
| 
 | ||||
| // Program represents BPF program loaded into the kernel. | ||||
| // | ||||
| // It is not safe to close a Program which is used by other goroutines. | ||||
|  | @ -103,7 +133,7 @@ type Program struct { | |||
| 	// otherwise it is empty. | ||||
| 	VerifierLog string | ||||
| 
 | ||||
| 	fd         *internal.FD | ||||
| 	fd         *sys.FD | ||||
| 	name       string | ||||
| 	pinnedPath string | ||||
| 	typ        ProgramType | ||||
|  | @ -111,8 +141,7 @@ type Program struct { | |||
| 
 | ||||
| // NewProgram creates a new Program. | ||||
| // | ||||
| // Loading a program for the first time will perform | ||||
| // feature detection by loading small, temporary programs. | ||||
| // See NewProgramWithOptions for details. | ||||
| func NewProgram(spec *ProgramSpec) (*Program, error) { | ||||
| 	return NewProgramWithOptions(spec, ProgramOptions{}) | ||||
| } | ||||
|  | @ -121,97 +150,129 @@ func NewProgram(spec *ProgramSpec) (*Program, error) { | |||
| // | ||||
| // Loading a program for the first time will perform | ||||
| // feature detection by loading small, temporary programs. | ||||
| // | ||||
| // Returns an error wrapping VerifierError if the program or its BTF is rejected | ||||
| // by the kernel. | ||||
| func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { | ||||
| 	btfs := make(btfHandleCache) | ||||
| 	defer btfs.close() | ||||
| 
 | ||||
| 	return newProgramWithOptions(spec, opts, btfs) | ||||
| } | ||||
| 
 | ||||
| func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandleCache) (*Program, error) { | ||||
| 	if len(spec.Instructions) == 0 { | ||||
| 		return nil, errors.New("Instructions cannot be empty") | ||||
| 	if spec == nil { | ||||
| 		return nil, errors.New("can't load a program from a nil spec") | ||||
| 	} | ||||
| 
 | ||||
| 	if len(spec.License) == 0 { | ||||
| 		return nil, errors.New("License cannot be empty") | ||||
| 	handles := newHandleCache() | ||||
| 	defer handles.close() | ||||
| 
 | ||||
| 	prog, err := newProgramWithOptions(spec, opts, handles) | ||||
| 	if errors.Is(err, asm.ErrUnsatisfiedMapReference) { | ||||
| 		return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) | ||||
| 	} | ||||
| 	return prog, err | ||||
| } | ||||
| 
 | ||||
| func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) { | ||||
| 	if len(spec.Instructions) == 0 { | ||||
| 		return nil, errors.New("instructions cannot be empty") | ||||
| 	} | ||||
| 
 | ||||
| 	if spec.Type == UnspecifiedProgram { | ||||
| 		return nil, errors.New("can't load program of unspecified type") | ||||
| 	} | ||||
| 
 | ||||
| 	if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { | ||||
| 		return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) | ||||
| 	} | ||||
| 
 | ||||
| 	insns := make(asm.Instructions, len(spec.Instructions)) | ||||
| 	copy(insns, spec.Instructions) | ||||
| 
 | ||||
| 	if err := fixupJumpsAndCalls(insns); err != nil { | ||||
| 		return nil, err | ||||
| 	// Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load") | ||||
| 	// require the version field to be set to the value of the KERNEL_VERSION | ||||
| 	// macro for kprobe-type programs. | ||||
| 	// Overwrite Kprobe program version if set to zero or the magic version constant. | ||||
| 	kv := spec.KernelVersion | ||||
| 	if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { | ||||
| 		v, err := internal.KernelVersion() | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("detecting kernel version: %w", err) | ||||
| 		} | ||||
| 		kv = v.Kernel() | ||||
| 	} | ||||
| 
 | ||||
| 	buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize)) | ||||
| 	err := insns.Marshal(buf, internal.NativeEndian) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	bytecode := buf.Bytes() | ||||
| 	insCount := uint32(len(bytecode) / asm.InstructionSize) | ||||
| 	attr := &bpfProgLoadAttr{ | ||||
| 		progType:           spec.Type, | ||||
| 		expectedAttachType: spec.AttachType, | ||||
| 		insCount:           insCount, | ||||
| 		instructions:       internal.NewSlicePointer(bytecode), | ||||
| 		license:            internal.NewStringPointer(spec.License), | ||||
| 		kernelVersion:      spec.KernelVersion, | ||||
| 	attr := &sys.ProgLoadAttr{ | ||||
| 		ProgType:           sys.ProgType(spec.Type), | ||||
| 		ProgFlags:          spec.Flags, | ||||
| 		ExpectedAttachType: sys.AttachType(spec.AttachType), | ||||
| 		License:            sys.NewStringPointer(spec.License), | ||||
| 		KernVersion:        kv, | ||||
| 	} | ||||
| 
 | ||||
| 	if haveObjName() == nil { | ||||
| 		attr.progName = newBPFObjName(spec.Name) | ||||
| 		attr.ProgName = sys.NewObjName(spec.Name) | ||||
| 	} | ||||
| 
 | ||||
| 	kernelTypes := opts.KernelTypes | ||||
| 
 | ||||
| 	insns := make(asm.Instructions, len(spec.Instructions)) | ||||
| 	copy(insns, spec.Instructions) | ||||
| 
 | ||||
| 	var btfDisabled bool | ||||
| 	if spec.BTF != nil { | ||||
| 		if relos, err := btf.ProgramRelocations(spec.BTF, nil); err != nil { | ||||
| 			return nil, fmt.Errorf("CO-RE relocations: %s", err) | ||||
| 		} else if len(relos) > 0 { | ||||
| 			return nil, fmt.Errorf("applying CO-RE relocations: %w", ErrNotSupported) | ||||
| 		if err := applyRelocations(insns, spec.BTF, kernelTypes); err != nil { | ||||
| 			return nil, fmt.Errorf("apply CO-RE relocations: %w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		handle, err := btfs.load(btf.ProgramSpec(spec.BTF)) | ||||
| 		handle, err := handles.btfHandle(spec.BTF) | ||||
| 		btfDisabled = errors.Is(err, btf.ErrNotSupported) | ||||
| 		if err != nil && !btfDisabled { | ||||
| 			return nil, fmt.Errorf("load BTF: %w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if handle != nil { | ||||
| 			attr.progBTFFd = uint32(handle.FD()) | ||||
| 			attr.ProgBtfFd = uint32(handle.FD()) | ||||
| 
 | ||||
| 			recSize, bytes, err := btf.ProgramLineInfos(spec.BTF) | ||||
| 			fib, lib, err := btf.MarshalExtInfos(insns, spec.BTF.TypeID) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("get BTF line infos: %w", err) | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			attr.lineInfoRecSize = recSize | ||||
| 			attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) | ||||
| 			attr.lineInfo = internal.NewSlicePointer(bytes) | ||||
| 
 | ||||
| 			recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("get BTF function infos: %w", err) | ||||
| 			} | ||||
| 			attr.funcInfoRecSize = recSize | ||||
| 			attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) | ||||
| 			attr.funcInfo = internal.NewSlicePointer(bytes) | ||||
| 			attr.FuncInfoRecSize = btf.FuncInfoSize | ||||
| 			attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize | ||||
| 			attr.FuncInfo = sys.NewSlicePointer(fib) | ||||
| 
 | ||||
| 			attr.LineInfoRecSize = btf.LineInfoSize | ||||
| 			attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize | ||||
| 			attr.LineInfo = sys.NewSlicePointer(lib) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if spec.AttachTo != "" { | ||||
| 		target, err := resolveBTFType(spec.AttachTo, spec.Type, spec.AttachType) | ||||
| 	if err := fixupAndValidate(insns); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) | ||||
| 	err := insns.Marshal(buf, internal.NativeEndian) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	bytecode := buf.Bytes() | ||||
| 	attr.Insns = sys.NewSlicePointer(bytecode) | ||||
| 	attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) | ||||
| 
 | ||||
| 	if spec.AttachTarget != nil { | ||||
| 		targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 			return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) | ||||
| 		} | ||||
| 		if target != nil { | ||||
| 			attr.attachBTFID = target.ID() | ||||
| 
 | ||||
| 		attr.AttachBtfId = uint32(targetID) | ||||
| 		attr.AttachProgFd = uint32(spec.AttachTarget.FD()) | ||||
| 		defer runtime.KeepAlive(spec.AttachTarget) | ||||
| 	} else if spec.AttachTo != "" { | ||||
| 		targetID, err := findTargetInKernel(kernelTypes, spec.AttachTo, spec.Type, spec.AttachType) | ||||
| 		if err != nil && !errors.Is(err, errUnrecognizedAttachType) { | ||||
| 			// We ignore errUnrecognizedAttachType since AttachTo may be non-empty | ||||
| 			// for programs that don't attach anywhere. | ||||
| 			return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) | ||||
| 		} | ||||
| 
 | ||||
| 		attr.AttachBtfId = uint32(targetID) | ||||
| 	} | ||||
| 
 | ||||
| 	logSize := DefaultVerifierLogSize | ||||
|  | @ -222,36 +283,46 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl | |||
| 	var logBuf []byte | ||||
| 	if opts.LogLevel > 0 { | ||||
| 		logBuf = make([]byte, logSize) | ||||
| 		attr.logLevel = opts.LogLevel | ||||
| 		attr.logSize = uint32(len(logBuf)) | ||||
| 		attr.logBuf = internal.NewSlicePointer(logBuf) | ||||
| 		attr.LogLevel = opts.LogLevel | ||||
| 		attr.LogSize = uint32(len(logBuf)) | ||||
| 		attr.LogBuf = sys.NewSlicePointer(logBuf) | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := bpfProgLoad(attr) | ||||
| 	fd, err := sys.ProgLoad(attr) | ||||
| 	if err == nil { | ||||
| 		return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil | ||||
| 		return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	logErr := err | ||||
| 	if opts.LogLevel == 0 { | ||||
| 	if opts.LogLevel == 0 && opts.LogSize >= 0 { | ||||
| 		// Re-run with the verifier enabled to get better error messages. | ||||
| 		logBuf = make([]byte, logSize) | ||||
| 		attr.logLevel = 1 | ||||
| 		attr.logSize = uint32(len(logBuf)) | ||||
| 		attr.logBuf = internal.NewSlicePointer(logBuf) | ||||
| 
 | ||||
| 		_, logErr = bpfProgLoad(attr) | ||||
| 		attr.LogLevel = 1 | ||||
| 		attr.LogSize = uint32(len(logBuf)) | ||||
| 		attr.LogBuf = sys.NewSlicePointer(logBuf) | ||||
| 		_, _ = sys.ProgLoad(attr) | ||||
| 	} | ||||
| 
 | ||||
| 	if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 { | ||||
| 		// EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can | ||||
| 		// check that the log is empty to reduce false positives. | ||||
| 		return nil, fmt.Errorf("load program: RLIMIT_MEMLOCK may be too low: %w", logErr) | ||||
| 	switch { | ||||
| 	case errors.Is(err, unix.EPERM): | ||||
| 		if len(logBuf) > 0 && logBuf[0] == 0 { | ||||
| 			// EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can | ||||
| 			// check that the log is empty to reduce false positives. | ||||
| 			return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) | ||||
| 		} | ||||
| 
 | ||||
| 		fallthrough | ||||
| 
 | ||||
| 	case errors.Is(err, unix.EINVAL): | ||||
| 		if hasFunctionReferences(spec.Instructions) { | ||||
| 			if err := haveBPFToBPFCalls(); err != nil { | ||||
| 				return nil, fmt.Errorf("load program: %w", err) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = internal.ErrorWithLog(err, logBuf, logErr) | ||||
| 	err = internal.ErrorWithLog(err, logBuf) | ||||
| 	if btfDisabled { | ||||
| 		return nil, fmt.Errorf("load program without BTF: %w", err) | ||||
| 		return nil, fmt.Errorf("load program: %w (BTF disabled)", err) | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("load program: %w", err) | ||||
| } | ||||
|  | @ -262,18 +333,21 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl | |||
| // | ||||
| // Requires at least Linux 4.10. | ||||
| func NewProgramFromFD(fd int) (*Program, error) { | ||||
| 	if fd < 0 { | ||||
| 		return nil, errors.New("invalid fd") | ||||
| 	f, err := sys.NewFD(fd) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return newProgramFromFD(internal.NewFD(uint32(fd))) | ||||
| 	return newProgramFromFD(f) | ||||
| } | ||||
| 
 | ||||
| // NewProgramFromID returns the program for a given id. | ||||
| // | ||||
| // Returns ErrNotExist, if there is no eBPF program with the given id. | ||||
| func NewProgramFromID(id ProgramID) (*Program, error) { | ||||
| 	fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) | ||||
| 	fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ | ||||
| 		Id: uint32(id), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("get program by id: %w", err) | ||||
| 	} | ||||
|  | @ -281,7 +355,7 @@ func NewProgramFromID(id ProgramID) (*Program, error) { | |||
| 	return newProgramFromFD(fd) | ||||
| } | ||||
| 
 | ||||
| func newProgramFromFD(fd *internal.FD) (*Program, error) { | ||||
| func newProgramFromFD(fd *sys.FD) (*Program, error) { | ||||
| 	info, err := newProgramInfoFromFd(fd) | ||||
| 	if err != nil { | ||||
| 		fd.Close() | ||||
|  | @ -310,18 +384,29 @@ func (p *Program) Info() (*ProgramInfo, error) { | |||
| 	return newProgramInfoFromFd(p.fd) | ||||
| } | ||||
| 
 | ||||
| // Handle returns a reference to the program's type information in the kernel. | ||||
| // | ||||
| // Returns ErrNotSupported if the kernel has no BTF support, or if there is no | ||||
| // BTF associated with the program. | ||||
| func (p *Program) Handle() (*btf.Handle, error) { | ||||
| 	info, err := p.Info() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	id, ok := info.BTFID() | ||||
| 	if !ok { | ||||
| 		return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) | ||||
| 	} | ||||
| 
 | ||||
| 	return btf.NewHandleFromID(id) | ||||
| } | ||||
| 
 | ||||
| // FD gets the file descriptor of the Program. | ||||
| // | ||||
| // It is invalid to call this function after Close has been called. | ||||
| func (p *Program) FD() int { | ||||
| 	fd, err := p.fd.Value() | ||||
| 	if err != nil { | ||||
| 		// Best effort: -1 is the number most likely to be an | ||||
| 		// invalid file descriptor. | ||||
| 		return -1 | ||||
| 	} | ||||
| 
 | ||||
| 	return int(fd) | ||||
| 	return p.fd.Int() | ||||
| } | ||||
| 
 | ||||
| // Clone creates a duplicate of the Program. | ||||
|  | @ -345,9 +430,12 @@ func (p *Program) Clone() (*Program, error) { | |||
| // Pin persists the Program on the BPF virtual file system past the lifetime of | ||||
| // the process that created it | ||||
| // | ||||
| // Calling Pin on a previously pinned program will overwrite the path, except when | ||||
| // the new path already exists. Re-pinning across filesystems is not supported. | ||||
| // | ||||
| // This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs | ||||
| func (p *Program) Pin(fileName string) error { | ||||
| 	if err := pin(p.pinnedPath, fileName, p.fd); err != nil { | ||||
| 	if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	p.pinnedPath = fileName | ||||
|  | @ -360,7 +448,7 @@ func (p *Program) Pin(fileName string) error { | |||
| // | ||||
| // Unpinning an unpinned Program returns nil. | ||||
| func (p *Program) Unpin() error { | ||||
| 	if err := unpin(p.pinnedPath); err != nil { | ||||
| 	if err := internal.Unpin(p.pinnedPath); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	p.pinnedPath = "" | ||||
|  | @ -369,13 +457,12 @@ func (p *Program) Unpin() error { | |||
| 
 | ||||
| // IsPinned returns true if the Program has a non-empty pinned path. | ||||
| func (p *Program) IsPinned() bool { | ||||
| 	if p.pinnedPath == "" { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| 	return p.pinnedPath != "" | ||||
| } | ||||
| 
 | ||||
| // Close unloads the program from the kernel. | ||||
| // Close the Program's underlying file descriptor, which could unload | ||||
| // the program from the kernel if it is not pinned or attached to a | ||||
| // kernel hook. | ||||
| func (p *Program) Close() error { | ||||
| 	if p == nil { | ||||
| 		return nil | ||||
|  | @ -384,6 +471,28 @@ func (p *Program) Close() error { | |||
| 	return p.fd.Close() | ||||
| } | ||||
| 
 | ||||
| // Various options for Run'ing a Program | ||||
| type RunOptions struct { | ||||
| 	// Program's data input. Required field. | ||||
| 	Data []byte | ||||
| 	// Program's data after Program has run. Caller must allocate. Optional field. | ||||
| 	DataOut []byte | ||||
| 	// Program's context input. Optional field. | ||||
| 	Context interface{} | ||||
| 	// Program's context after Program has run. Must be a pointer or slice. Optional field. | ||||
| 	ContextOut interface{} | ||||
| 	// Number of times to run Program. Optional field. Defaults to 1. | ||||
| 	Repeat uint32 | ||||
| 	// Optional flags. | ||||
| 	Flags uint32 | ||||
| 	// CPU to run Program on. Optional field. | ||||
| 	// Note not all program types support this field. | ||||
| 	CPU uint32 | ||||
| 	// Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer | ||||
| 	// or similar. Typically used during benchmarking. Optional field. | ||||
| 	Reset func() | ||||
| } | ||||
| 
 | ||||
| // Test runs the Program in the kernel with the given input and returns the | ||||
| // value returned by the eBPF program. outLen may be zero. | ||||
| // | ||||
|  | @ -392,11 +501,38 @@ func (p *Program) Close() error { | |||
| // | ||||
| // This function requires at least Linux 4.12. | ||||
| func (p *Program) Test(in []byte) (uint32, []byte, error) { | ||||
| 	ret, out, _, err := p.testRun(in, 1, nil) | ||||
| 	// Older kernels ignore the dataSizeOut argument when copying to user space. | ||||
| 	// Combined with things like bpf_xdp_adjust_head() we don't really know what the final | ||||
| 	// size will be. Hence we allocate an output buffer which we hope will always be large | ||||
| 	// enough, and panic if the kernel wrote past the end of the allocation. | ||||
| 	// See https://patchwork.ozlabs.org/cover/1006822/ | ||||
| 	var out []byte | ||||
| 	if len(in) > 0 { | ||||
| 		out = make([]byte, len(in)+outputPad) | ||||
| 	} | ||||
| 
 | ||||
| 	opts := RunOptions{ | ||||
| 		Data:    in, | ||||
| 		DataOut: out, | ||||
| 		Repeat:  1, | ||||
| 	} | ||||
| 
 | ||||
| 	ret, _, err := p.testRun(&opts) | ||||
| 	if err != nil { | ||||
| 		return ret, nil, fmt.Errorf("can't test program: %w", err) | ||||
| 	} | ||||
| 	return ret, out, nil | ||||
| 	return ret, opts.DataOut, nil | ||||
| } | ||||
| 
 | ||||
| // Run runs the Program in kernel with given RunOptions. | ||||
| // | ||||
| // Note: the same restrictions from Test apply. | ||||
| func (p *Program) Run(opts *RunOptions) (uint32, error) { | ||||
| 	ret, _, err := p.testRun(opts) | ||||
| 	if err != nil { | ||||
| 		return ret, fmt.Errorf("can't test program: %w", err) | ||||
| 	} | ||||
| 	return ret, nil | ||||
| } | ||||
| 
 | ||||
| // Benchmark runs the Program with the given input for a number of times | ||||
|  | @ -411,7 +547,17 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) { | |||
| // | ||||
| // This function requires at least Linux 4.12. | ||||
| func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { | ||||
| 	ret, _, total, err := p.testRun(in, repeat, reset) | ||||
| 	if uint(repeat) > math.MaxUint32 { | ||||
| 		return 0, 0, fmt.Errorf("repeat is too high") | ||||
| 	} | ||||
| 
 | ||||
| 	opts := RunOptions{ | ||||
| 		Data:   in, | ||||
| 		Repeat: uint32(repeat), | ||||
| 		Reset:  reset, | ||||
| 	} | ||||
| 
 | ||||
| 	ret, total, err := p.testRun(&opts) | ||||
| 	if err != nil { | ||||
| 		return ret, total, fmt.Errorf("can't benchmark program: %w", err) | ||||
| 	} | ||||
|  | @ -420,6 +566,7 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D | |||
| 
 | ||||
| var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error { | ||||
| 	prog, err := NewProgram(&ProgramSpec{ | ||||
| 		// SocketFilter does not require privileges on newer kernels. | ||||
| 		Type: SocketFilter, | ||||
| 		Instructions: asm.Instructions{ | ||||
| 			asm.LoadImm(asm.R0, 0, asm.DWord), | ||||
|  | @ -435,88 +582,109 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e | |||
| 
 | ||||
| 	// Programs require at least 14 bytes input | ||||
| 	in := make([]byte, 14) | ||||
| 	attr := bpfProgTestRunAttr{ | ||||
| 		fd:         uint32(prog.FD()), | ||||
| 		dataSizeIn: uint32(len(in)), | ||||
| 		dataIn:     internal.NewSlicePointer(in), | ||||
| 	attr := sys.ProgRunAttr{ | ||||
| 		ProgFd:     uint32(prog.FD()), | ||||
| 		DataSizeIn: uint32(len(in)), | ||||
| 		DataIn:     sys.NewSlicePointer(in), | ||||
| 	} | ||||
| 
 | ||||
| 	err = bpfProgTestRun(&attr) | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 	err = sys.ProgRun(&attr) | ||||
| 	switch { | ||||
| 	case errors.Is(err, unix.EINVAL): | ||||
| 		// Check for EINVAL specifically, rather than err != nil since we | ||||
| 		// otherwise misdetect due to insufficient permissions. | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	if errors.Is(err, unix.EINTR) { | ||||
| 
 | ||||
| 	case errors.Is(err, unix.EINTR): | ||||
| 		// We know that PROG_TEST_RUN is supported if we get EINTR. | ||||
| 		return nil | ||||
| 
 | ||||
| 	case errors.Is(err, unix.ENOTSUPP): | ||||
| 		// The first PROG_TEST_RUN patches shipped in 4.12 didn't include | ||||
| 		// a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is | ||||
| 		// supported, but not for the program type used in the probe. | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	return err | ||||
| }) | ||||
| 
 | ||||
| func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) { | ||||
| 	if uint(repeat) > math.MaxUint32 { | ||||
| 		return 0, nil, 0, fmt.Errorf("repeat is too high") | ||||
| 	} | ||||
| 
 | ||||
| 	if len(in) == 0 { | ||||
| 		return 0, nil, 0, fmt.Errorf("missing input") | ||||
| 	} | ||||
| 
 | ||||
| 	if uint(len(in)) > math.MaxUint32 { | ||||
| 		return 0, nil, 0, fmt.Errorf("input is too long") | ||||
| func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) { | ||||
| 	if uint(len(opts.Data)) > math.MaxUint32 { | ||||
| 		return 0, 0, fmt.Errorf("input is too long") | ||||
| 	} | ||||
| 
 | ||||
| 	if err := haveProgTestRun(); err != nil { | ||||
| 		return 0, nil, 0, err | ||||
| 		return 0, 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	// Older kernels ignore the dataSizeOut argument when copying to user space. | ||||
| 	// Combined with things like bpf_xdp_adjust_head() we don't really know what the final | ||||
| 	// size will be. Hence we allocate an output buffer which we hope will always be large | ||||
| 	// enough, and panic if the kernel wrote past the end of the allocation. | ||||
| 	// See https://patchwork.ozlabs.org/cover/1006822/ | ||||
| 	out := make([]byte, len(in)+outputPad) | ||||
| 
 | ||||
| 	fd, err := p.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return 0, nil, 0, err | ||||
| 	var ctxBytes []byte | ||||
| 	if opts.Context != nil { | ||||
| 		ctx := new(bytes.Buffer) | ||||
| 		if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil { | ||||
| 			return 0, 0, fmt.Errorf("cannot serialize context: %v", err) | ||||
| 		} | ||||
| 		ctxBytes = ctx.Bytes() | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfProgTestRunAttr{ | ||||
| 		fd:          fd, | ||||
| 		dataSizeIn:  uint32(len(in)), | ||||
| 		dataSizeOut: uint32(len(out)), | ||||
| 		dataIn:      internal.NewSlicePointer(in), | ||||
| 		dataOut:     internal.NewSlicePointer(out), | ||||
| 		repeat:      uint32(repeat), | ||||
| 	var ctxOut []byte | ||||
| 	if opts.ContextOut != nil { | ||||
| 		ctxOut = make([]byte, binary.Size(opts.ContextOut)) | ||||
| 	} | ||||
| 
 | ||||
| 	attr := sys.ProgRunAttr{ | ||||
| 		ProgFd:      p.fd.Uint(), | ||||
| 		DataSizeIn:  uint32(len(opts.Data)), | ||||
| 		DataSizeOut: uint32(len(opts.DataOut)), | ||||
| 		DataIn:      sys.NewSlicePointer(opts.Data), | ||||
| 		DataOut:     sys.NewSlicePointer(opts.DataOut), | ||||
| 		Repeat:      uint32(opts.Repeat), | ||||
| 		CtxSizeIn:   uint32(len(ctxBytes)), | ||||
| 		CtxSizeOut:  uint32(len(ctxOut)), | ||||
| 		CtxIn:       sys.NewSlicePointer(ctxBytes), | ||||
| 		CtxOut:      sys.NewSlicePointer(ctxOut), | ||||
| 		Flags:       opts.Flags, | ||||
| 		Cpu:         opts.CPU, | ||||
| 	} | ||||
| 
 | ||||
| 	for { | ||||
| 		err = bpfProgTestRun(&attr) | ||||
| 		err := sys.ProgRun(&attr) | ||||
| 		if err == nil { | ||||
| 			break | ||||
| 		} | ||||
| 
 | ||||
| 		if errors.Is(err, unix.EINTR) { | ||||
| 			if reset != nil { | ||||
| 				reset() | ||||
| 			if opts.Reset != nil { | ||||
| 				opts.Reset() | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		return 0, nil, 0, fmt.Errorf("can't run test: %w", err) | ||||
| 		if errors.Is(err, unix.ENOTSUPP) { | ||||
| 			return 0, 0, fmt.Errorf("kernel doesn't support testing program type %s: %w", p.Type(), ErrNotSupported) | ||||
| 		} | ||||
| 
 | ||||
| 		return 0, 0, fmt.Errorf("can't run test: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if int(attr.dataSizeOut) > cap(out) { | ||||
| 		// Houston, we have a problem. The program created more data than we allocated, | ||||
| 		// and the kernel wrote past the end of our buffer. | ||||
| 		panic("kernel wrote past end of output buffer") | ||||
| 	if opts.DataOut != nil { | ||||
| 		if int(attr.DataSizeOut) > cap(opts.DataOut) { | ||||
| 			// Houston, we have a problem. The program created more data than we allocated, | ||||
| 			// and the kernel wrote past the end of our buffer. | ||||
| 			panic("kernel wrote past end of output buffer") | ||||
| 		} | ||||
| 		opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] | ||||
| 	} | ||||
| 	out = out[:int(attr.dataSizeOut)] | ||||
| 
 | ||||
| 	total := time.Duration(attr.duration) * time.Nanosecond | ||||
| 	return attr.retval, out, total, nil | ||||
| 	if len(ctxOut) != 0 { | ||||
| 		b := bytes.NewReader(ctxOut) | ||||
| 		if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { | ||||
| 			return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	total := time.Duration(attr.Duration) * time.Nanosecond | ||||
| 	return attr.Retval, total, nil | ||||
| } | ||||
| 
 | ||||
| func unmarshalProgram(buf []byte) (*Program, error) { | ||||
|  | @ -535,70 +703,19 @@ func marshalProgram(p *Program, length int) ([]byte, error) { | |||
| 		return nil, fmt.Errorf("can't marshal program to %d bytes", length) | ||||
| 	} | ||||
| 
 | ||||
| 	value, err := p.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	buf := make([]byte, 4) | ||||
| 	internal.NativeEndian.PutUint32(buf, value) | ||||
| 	internal.NativeEndian.PutUint32(buf, p.fd.Uint()) | ||||
| 	return buf, nil | ||||
| } | ||||
| 
 | ||||
| // Attach a Program. | ||||
| // | ||||
| // Deprecated: use link.RawAttachProgram instead. | ||||
| func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { | ||||
| 	if fd < 0 { | ||||
| 		return errors.New("invalid fd") | ||||
| 	} | ||||
| 
 | ||||
| 	pfd, err := p.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := internal.BPFProgAttachAttr{ | ||||
| 		TargetFd:    uint32(fd), | ||||
| 		AttachBpfFd: pfd, | ||||
| 		AttachType:  uint32(typ), | ||||
| 		AttachFlags: uint32(flags), | ||||
| 	} | ||||
| 
 | ||||
| 	return internal.BPFProgAttach(&attr) | ||||
| } | ||||
| 
 | ||||
| // Detach a Program. | ||||
| // | ||||
| // Deprecated: use link.RawDetachProgram instead. | ||||
| func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { | ||||
| 	if fd < 0 { | ||||
| 		return errors.New("invalid fd") | ||||
| 	} | ||||
| 
 | ||||
| 	if flags != 0 { | ||||
| 		return errors.New("flags must be zero") | ||||
| 	} | ||||
| 
 | ||||
| 	pfd, err := p.fd.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := internal.BPFProgDetachAttr{ | ||||
| 		TargetFd:    uint32(fd), | ||||
| 		AttachBpfFd: pfd, | ||||
| 		AttachType:  uint32(typ), | ||||
| 	} | ||||
| 
 | ||||
| 	return internal.BPFProgDetach(&attr) | ||||
| } | ||||
| 
 | ||||
| // LoadPinnedProgram loads a Program from a BPF file. | ||||
| // | ||||
| // Requires at least Linux 4.11. | ||||
| func LoadPinnedProgram(fileName string) (*Program, error) { | ||||
| 	fd, err := internal.BPFObjGet(fileName) | ||||
| func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { | ||||
| 	fd, err := sys.ObjGet(&sys.ObjGetAttr{ | ||||
| 		Pathname:  sys.NewStringPointer(fileName), | ||||
| 		FileFlags: opts.Marshal(), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -609,7 +726,7 @@ func LoadPinnedProgram(fileName string) (*Program, error) { | |||
| 		return nil, fmt.Errorf("info for %s: %w", fileName, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &Program{"", fd, filepath.Base(fileName), "", info.Type}, nil | ||||
| 	return &Program{"", fd, filepath.Base(fileName), fileName, info.Type}, nil | ||||
| } | ||||
| 
 | ||||
| // SanitizeName replaces all invalid characters in name with replacement. | ||||
|  | @ -632,67 +749,127 @@ func SanitizeName(name string, replacement rune) string { | |||
| // | ||||
| // Returns ErrNotExist, if there is no next eBPF program. | ||||
| func ProgramGetNextID(startID ProgramID) (ProgramID, error) { | ||||
| 	id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID)) | ||||
| 	return ProgramID(id), err | ||||
| 	attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} | ||||
| 	return ProgramID(attr.NextId), sys.ProgGetNextId(attr) | ||||
| } | ||||
| 
 | ||||
| // ID returns the systemwide unique ID of the program. | ||||
| // BindMap binds map to the program and is only released once program is released. | ||||
| // | ||||
| // Deprecated: use ProgramInfo.ID() instead. | ||||
| func (p *Program) ID() (ProgramID, error) { | ||||
| 	info, err := bpfGetProgInfoByFD(p.fd) | ||||
| 	if err != nil { | ||||
| 		return ProgramID(0), err | ||||
| 	} | ||||
| 	return ProgramID(info.id), nil | ||||
| } | ||||
| 
 | ||||
| func findKernelType(name string, typ btf.Type) error { | ||||
| 	kernel, err := btf.LoadKernelSpec() | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("can't load kernel spec: %w", err) | ||||
| // This may be used in cases where metadata should be associated with the program | ||||
| // which otherwise does not contain any references to the map. | ||||
| func (p *Program) BindMap(m *Map) error { | ||||
| 	attr := &sys.ProgBindMapAttr{ | ||||
| 		ProgFd: uint32(p.FD()), | ||||
| 		MapFd:  uint32(m.FD()), | ||||
| 	} | ||||
| 
 | ||||
| 	return kernel.FindType(name, typ) | ||||
| 	return sys.ProgBindMap(attr) | ||||
| } | ||||
| 
 | ||||
| func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) { | ||||
| var errUnrecognizedAttachType = errors.New("unrecognized attach type") | ||||
| 
 | ||||
| // find an attach target type in the kernel. | ||||
| // | ||||
| // spec may be nil and defaults to the canonical kernel BTF. name together with | ||||
| // progType and attachType determine which type we need to attach to. | ||||
| // | ||||
| // Returns errUnrecognizedAttachType. | ||||
| func findTargetInKernel(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { | ||||
| 	type match struct { | ||||
| 		p ProgramType | ||||
| 		a AttachType | ||||
| 	} | ||||
| 
 | ||||
| 	target := match{progType, attachType} | ||||
| 	switch target { | ||||
| 	var ( | ||||
| 		typeName, featureName string | ||||
| 		isBTFTypeFunc         = true | ||||
| 	) | ||||
| 
 | ||||
| 	switch (match{progType, attachType}) { | ||||
| 	case match{LSM, AttachLSMMac}: | ||||
| 		var target btf.Func | ||||
| 		err := findKernelType("bpf_lsm_"+name, &target) | ||||
| 		if errors.Is(err, btf.ErrNotFound) { | ||||
| 			return nil, &internal.UnsupportedFeatureError{ | ||||
| 				Name: name + " LSM hook", | ||||
| 			} | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("resolve BTF for LSM hook %s: %w", name, err) | ||||
| 		} | ||||
| 
 | ||||
| 		return &target, nil | ||||
| 
 | ||||
| 		typeName = "bpf_lsm_" + name | ||||
| 		featureName = name + " LSM hook" | ||||
| 	case match{Tracing, AttachTraceIter}: | ||||
| 		var target btf.Func | ||||
| 		err := findKernelType("bpf_iter_"+name, &target) | ||||
| 		typeName = "bpf_iter_" + name | ||||
| 		featureName = name + " iterator" | ||||
| 	case match{Tracing, AttachTraceFEntry}: | ||||
| 		typeName = name | ||||
| 		featureName = fmt.Sprintf("fentry %s", name) | ||||
| 	case match{Tracing, AttachTraceFExit}: | ||||
| 		typeName = name | ||||
| 		featureName = fmt.Sprintf("fexit %s", name) | ||||
| 	case match{Tracing, AttachModifyReturn}: | ||||
| 		typeName = name | ||||
| 		featureName = fmt.Sprintf("fmod_ret %s", name) | ||||
| 	case match{Tracing, AttachTraceRawTp}: | ||||
| 		typeName = fmt.Sprintf("btf_trace_%s", name) | ||||
| 		featureName = fmt.Sprintf("raw_tp %s", name) | ||||
| 		isBTFTypeFunc = false | ||||
| 	default: | ||||
| 		return 0, errUnrecognizedAttachType | ||||
| 	} | ||||
| 
 | ||||
| 	spec, err := maybeLoadKernelBTF(spec) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("load kernel spec: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	var target btf.Type | ||||
| 	if isBTFTypeFunc { | ||||
| 		var targetFunc *btf.Func | ||||
| 		err = spec.TypeByName(typeName, &targetFunc) | ||||
| 		target = targetFunc | ||||
| 	} else { | ||||
| 		var targetTypedef *btf.Typedef | ||||
| 		err = spec.TypeByName(typeName, &targetTypedef) | ||||
| 		target = targetTypedef | ||||
| 	} | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		if errors.Is(err, btf.ErrNotFound) { | ||||
| 			return nil, &internal.UnsupportedFeatureError{ | ||||
| 				Name: name + " iterator", | ||||
| 			return 0, &internal.UnsupportedFeatureError{ | ||||
| 				Name: featureName, | ||||
| 			} | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("resolve BTF for iterator %s: %w", name, err) | ||||
| 		} | ||||
| 
 | ||||
| 		return &target, nil | ||||
| 
 | ||||
| 	default: | ||||
| 		return nil, nil | ||||
| 		return 0, fmt.Errorf("find target for %s: %w", featureName, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return spec.TypeID(target) | ||||
| } | ||||
| 
 | ||||
| // find an attach target type in a program. | ||||
| // | ||||
| // Returns errUnrecognizedAttachType. | ||||
| func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { | ||||
| 	type match struct { | ||||
| 		p ProgramType | ||||
| 		a AttachType | ||||
| 	} | ||||
| 
 | ||||
| 	var typeName string | ||||
| 	switch (match{progType, attachType}) { | ||||
| 	case match{Extension, AttachNone}: | ||||
| 		typeName = name | ||||
| 	default: | ||||
| 		return 0, errUnrecognizedAttachType | ||||
| 	} | ||||
| 
 | ||||
| 	btfHandle, err := prog.Handle() | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("load target BTF: %w", err) | ||||
| 	} | ||||
| 	defer btfHandle.Close() | ||||
| 
 | ||||
| 	spec, err := btfHandle.Spec(nil) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	var targetFunc *btf.Func | ||||
| 	err = spec.TypeByName(typeName, &targetFunc) | ||||
| 	if err != nil { | ||||
| 		return 0, fmt.Errorf("find target %s: %w", typeName, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return spec.TypeID(targetFunc) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										156
									
								
								vendor/github.com/cilium/ebpf/run-tests.sh
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										156
									
								
								vendor/github.com/cilium/ebpf/run-tests.sh
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,91 +1,145 @@ | |||
| #!/bin/bash | ||||
| #!/usr/bin/env bash | ||||
| # Test the current package under a different kernel. | ||||
| # Requires virtme and qemu to be installed. | ||||
| # Examples: | ||||
| #     Run all tests on a 5.4 kernel | ||||
| #     $ ./run-tests.sh 5.4 | ||||
| #     Run a subset of tests: | ||||
| #     $ ./run-tests.sh 5.4 ./link | ||||
| 
 | ||||
| set -eu | ||||
| set -o pipefail | ||||
| set -euo pipefail | ||||
| 
 | ||||
| if [[ "${1:-}" = "--in-vm" ]]; then | ||||
| script="$(realpath "$0")" | ||||
| readonly script | ||||
| 
 | ||||
| # This script is a bit like a Matryoshka doll since it keeps re-executing itself | ||||
| # in various different contexts: | ||||
| # | ||||
| #   1. invoked by the user like run-tests.sh 5.4 | ||||
| #   2. invoked by go test like run-tests.sh --exec-vm | ||||
| #   3. invoked by init in the vm like run-tests.sh --exec-test | ||||
| # | ||||
| # This allows us to use all available CPU on the host machine to compile our | ||||
| # code, and then only use the VM to execute the test. This is because the VM | ||||
| # is usually slower at compiling than the host. | ||||
| if [[ "${1:-}" = "--exec-vm" ]]; then | ||||
|   shift | ||||
| 
 | ||||
|   input="$1" | ||||
|   shift | ||||
| 
 | ||||
|   # Use sudo if /dev/kvm isn't accessible by the current user. | ||||
|   sudo="" | ||||
|   if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then | ||||
|     sudo="sudo" | ||||
|   fi | ||||
|   readonly sudo | ||||
| 
 | ||||
|   testdir="$(dirname "$1")" | ||||
|   output="$(mktemp -d)" | ||||
|   printf -v cmd "%q " "$@" | ||||
| 
 | ||||
|   if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then | ||||
|     # stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a | ||||
|     # blocking substitute. | ||||
|     mkfifo "${output}/fake-stdin" | ||||
|     # Open for reading and writing to avoid blocking. | ||||
|     exec 0<> "${output}/fake-stdin" | ||||
|     rm "${output}/fake-stdin" | ||||
|   fi | ||||
| 
 | ||||
|   for ((i = 0; i < 3; i++)); do | ||||
|     if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ | ||||
|       --rwdir="${testdir}=${testdir}" \ | ||||
|       --rodir=/run/input="${input}" \ | ||||
|       --rwdir=/run/output="${output}" \ | ||||
|       --script-sh "PATH=\"$PATH\" CI_MAX_KERNEL_VERSION="${CI_MAX_KERNEL_VERSION:-}" \"$script\" --exec-test $cmd" \ | ||||
|       --kopt possible_cpus=2; then # need at least two CPUs for some tests | ||||
|       exit 23 | ||||
|     fi | ||||
| 
 | ||||
|     if [[ -e "${output}/status" ]]; then | ||||
|       break | ||||
|     fi | ||||
| 
 | ||||
|     if [[ -v CI ]]; then | ||||
|       echo "Retrying test run due to qemu crash" | ||||
|       continue | ||||
|     fi | ||||
| 
 | ||||
|     exit 42 | ||||
|   done | ||||
| 
 | ||||
|   rc=$(<"${output}/status") | ||||
|   $sudo rm -r "$output" | ||||
|   exit $rc | ||||
| elif [[ "${1:-}" = "--exec-test" ]]; then | ||||
|   shift | ||||
| 
 | ||||
|   mount -t bpf bpf /sys/fs/bpf | ||||
|   export CGO_ENABLED=0 | ||||
|   export GOFLAGS=-mod=readonly | ||||
|   export GOPATH=/run/go-path | ||||
|   export GOPROXY=file:///run/go-path/pkg/mod/cache/download | ||||
|   export GOSUMDB=off | ||||
|   export GOCACHE=/run/go-cache | ||||
|   mount -t tracefs tracefs /sys/kernel/debug/tracing | ||||
| 
 | ||||
|   if [[ -d "/run/input/bpf" ]]; then | ||||
|     export KERNEL_SELFTESTS="/run/input/bpf" | ||||
|   fi | ||||
| 
 | ||||
|   readonly output="${1}" | ||||
|   shift | ||||
|   if [[ -f "/run/input/bpf/bpf_testmod/bpf_testmod.ko" ]]; then | ||||
|     insmod "/run/input/bpf/bpf_testmod/bpf_testmod.ko" | ||||
|   fi | ||||
| 
 | ||||
|   echo Running tests... | ||||
|   go test -v -coverpkg=./... -coverprofile="$output/coverage.txt" -count 1 ./... | ||||
|   touch "$output/success" | ||||
|   exit 0 | ||||
|   dmesg --clear | ||||
|   rc=0 | ||||
|   "$@" || rc=$? | ||||
|   dmesg | ||||
|   echo $rc > "/run/output/status" | ||||
|   exit $rc # this return code is "swallowed" by qemu | ||||
| fi | ||||
| 
 | ||||
| # Pull all dependencies, so that we can run tests without the | ||||
| # vm having network access. | ||||
| go mod download | ||||
| 
 | ||||
| # Use sudo if /dev/kvm isn't accessible by the current user. | ||||
| sudo="" | ||||
| if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then | ||||
|   sudo="sudo" | ||||
| fi | ||||
| readonly sudo | ||||
| 
 | ||||
| readonly kernel_version="${1:-}" | ||||
| if [[ -z "${kernel_version}" ]]; then | ||||
|   echo "Expecting kernel version as first argument" | ||||
|   exit 1 | ||||
| fi | ||||
| shift | ||||
| 
 | ||||
| readonly kernel="linux-${kernel_version}.bz" | ||||
| readonly selftests="linux-${kernel_version}-selftests-bpf.bz" | ||||
| readonly selftests="linux-${kernel_version}-selftests-bpf.tgz" | ||||
| readonly input="$(mktemp -d)" | ||||
| readonly output="$(mktemp -d)" | ||||
| readonly tmp_dir="${TMPDIR:-/tmp}" | ||||
| readonly branch="${BRANCH:-master}" | ||||
| 
 | ||||
| fetch() { | ||||
|     echo Fetching "${1}" | ||||
|     wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" | ||||
|     pushd "${tmp_dir}" > /dev/null | ||||
|     curl -s -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" | ||||
|     local ret=$? | ||||
|     popd > /dev/null | ||||
|     return $ret | ||||
| } | ||||
| 
 | ||||
| fetch "${kernel}" | ||||
| cp "${tmp_dir}/${kernel}" "${input}/bzImage" | ||||
| 
 | ||||
| if fetch "${selftests}"; then | ||||
|   echo "Decompressing selftests" | ||||
|   mkdir "${input}/bpf" | ||||
|   tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf" | ||||
|   tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf" | ||||
| else | ||||
|   echo "No selftests found, disabling" | ||||
| fi | ||||
| 
 | ||||
| echo Testing on "${kernel_version}" | ||||
| $sudo virtme-run --kimg "${tmp_dir}/${kernel}" --memory 512M --pwd \ | ||||
|   --rw \ | ||||
|   --rwdir=/run/input="${input}" \ | ||||
|   --rwdir=/run/output="${output}" \ | ||||
|   --rodir=/run/go-path="$(go env GOPATH)" \ | ||||
|   --rwdir=/run/go-cache="$(go env GOCACHE)" \ | ||||
|   --script-sh "PATH=\"$PATH\" $(realpath "$0") --in-vm /run/output" \ | ||||
|   --qemu-opts -smp 2 # need at least two CPUs for some tests | ||||
| 
 | ||||
| if [[ ! -e "${output}/success" ]]; then | ||||
|   echo "Test failed on ${kernel_version}" | ||||
|   exit 1 | ||||
| else | ||||
|   echo "Test successful on ${kernel_version}" | ||||
|   if [[ -v COVERALLS_TOKEN ]]; then | ||||
|     goveralls -coverprofile="${output}/coverage.txt" -service=semaphore -repotoken "$COVERALLS_TOKEN" | ||||
|   fi | ||||
| args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) | ||||
| if (( $# > 0 )); then | ||||
|   args=("$@") | ||||
| fi | ||||
| 
 | ||||
| $sudo rm -r "${input}" | ||||
| $sudo rm -r "${output}" | ||||
| export GOFLAGS=-mod=readonly | ||||
| export CGO_ENABLED=0 | ||||
| # LINUX_VERSION_CODE test compares this to discovered value. | ||||
| export KERNEL_VERSION="${kernel_version}" | ||||
| 
 | ||||
| echo Testing on "${kernel_version}" | ||||
| go test -exec "$script --exec-vm $input" "${args[@]}" | ||||
| echo "Test successful on ${kernel_version}" | ||||
| 
 | ||||
| rm -r "${input}" | ||||
|  |  | |||
							
								
								
									
										491
									
								
								vendor/github.com/cilium/ebpf/syscalls.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										491
									
								
								vendor/github.com/cilium/ebpf/syscalls.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,29 +1,16 @@ | |||
| package ebpf | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	"github.com/cilium/ebpf/asm" | ||||
| 	"github.com/cilium/ebpf/internal" | ||||
| 	"github.com/cilium/ebpf/internal/btf" | ||||
| 	"github.com/cilium/ebpf/internal/sys" | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| // Generic errors returned by BPF syscalls. | ||||
| var ErrNotExist = errors.New("requested object does not exist") | ||||
| 
 | ||||
| // bpfObjName is a null-terminated string made up of | ||||
| // 'A-Za-z0-9_' characters. | ||||
| type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte | ||||
| 
 | ||||
| // newBPFObjName truncates the result if it is too long. | ||||
| func newBPFObjName(name string) bpfObjName { | ||||
| 	var result bpfObjName | ||||
| 	copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) | ||||
| 	return result | ||||
| } | ||||
| 
 | ||||
| // invalidBPFObjNameChar returns true if char may not appear in | ||||
| // a BPF object name. | ||||
| func invalidBPFObjNameChar(char rune) bool { | ||||
|  | @ -45,183 +32,29 @@ func invalidBPFObjNameChar(char rune) bool { | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| type bpfMapCreateAttr struct { | ||||
| 	mapType        MapType | ||||
| 	keySize        uint32 | ||||
| 	valueSize      uint32 | ||||
| 	maxEntries     uint32 | ||||
| 	flags          uint32 | ||||
| 	innerMapFd     uint32     // since 4.12 56f668dfe00d | ||||
| 	numaNode       uint32     // since 4.14 96eabe7a40aa | ||||
| 	mapName        bpfObjName // since 4.15 ad5b177bd73f | ||||
| 	mapIfIndex     uint32 | ||||
| 	btfFd          uint32 | ||||
| 	btfKeyTypeID   btf.TypeID | ||||
| 	btfValueTypeID btf.TypeID | ||||
| } | ||||
| 
 | ||||
| type bpfMapOpAttr struct { | ||||
| 	mapFd   uint32 | ||||
| 	padding uint32 | ||||
| 	key     internal.Pointer | ||||
| 	value   internal.Pointer | ||||
| 	flags   uint64 | ||||
| } | ||||
| 
 | ||||
| type bpfBatchMapOpAttr struct { | ||||
| 	inBatch   internal.Pointer | ||||
| 	outBatch  internal.Pointer | ||||
| 	keys      internal.Pointer | ||||
| 	values    internal.Pointer | ||||
| 	count     uint32 | ||||
| 	mapFd     uint32 | ||||
| 	elemFlags uint64 | ||||
| 	flags     uint64 | ||||
| } | ||||
| 
 | ||||
| type bpfMapInfo struct { | ||||
| 	map_type                  uint32 // since 4.12 1e2709769086 | ||||
| 	id                        uint32 | ||||
| 	key_size                  uint32 | ||||
| 	value_size                uint32 | ||||
| 	max_entries               uint32 | ||||
| 	map_flags                 uint32 | ||||
| 	name                      bpfObjName // since 4.15 ad5b177bd73f | ||||
| 	ifindex                   uint32     // since 4.16 52775b33bb50 | ||||
| 	btf_vmlinux_value_type_id uint32     // since 5.6  85d33df357b6 | ||||
| 	netns_dev                 uint64     // since 4.16 52775b33bb50 | ||||
| 	netns_ino                 uint64 | ||||
| 	btf_id                    uint32 // since 4.18 78958fca7ead | ||||
| 	btf_key_type_id           uint32 // since 4.18 9b2cf328b2ec | ||||
| 	btf_value_type_id         uint32 | ||||
| } | ||||
| 
 | ||||
| type bpfProgLoadAttr struct { | ||||
| 	progType           ProgramType | ||||
| 	insCount           uint32 | ||||
| 	instructions       internal.Pointer | ||||
| 	license            internal.Pointer | ||||
| 	logLevel           uint32 | ||||
| 	logSize            uint32 | ||||
| 	logBuf             internal.Pointer | ||||
| 	kernelVersion      uint32     // since 4.1  2541517c32be | ||||
| 	progFlags          uint32     // since 4.11 e07b98d9bffe | ||||
| 	progName           bpfObjName // since 4.15 067cae47771c | ||||
| 	progIfIndex        uint32     // since 4.15 1f6f4cb7ba21 | ||||
| 	expectedAttachType AttachType // since 4.17 5e43f899b03a | ||||
| 	progBTFFd          uint32 | ||||
| 	funcInfoRecSize    uint32 | ||||
| 	funcInfo           internal.Pointer | ||||
| 	funcInfoCnt        uint32 | ||||
| 	lineInfoRecSize    uint32 | ||||
| 	lineInfo           internal.Pointer | ||||
| 	lineInfoCnt        uint32 | ||||
| 	attachBTFID        btf.TypeID | ||||
| 	attachProgFd       uint32 | ||||
| } | ||||
| 
 | ||||
| type bpfProgInfo struct { | ||||
| 	prog_type                uint32 | ||||
| 	id                       uint32 | ||||
| 	tag                      [unix.BPF_TAG_SIZE]byte | ||||
| 	jited_prog_len           uint32 | ||||
| 	xlated_prog_len          uint32 | ||||
| 	jited_prog_insns         internal.Pointer | ||||
| 	xlated_prog_insns        internal.Pointer | ||||
| 	load_time                uint64 // since 4.15 cb4d2b3f03d8 | ||||
| 	created_by_uid           uint32 | ||||
| 	nr_map_ids               uint32 | ||||
| 	map_ids                  internal.Pointer | ||||
| 	name                     bpfObjName // since 4.15 067cae47771c | ||||
| 	ifindex                  uint32 | ||||
| 	gpl_compatible           uint32 | ||||
| 	netns_dev                uint64 | ||||
| 	netns_ino                uint64 | ||||
| 	nr_jited_ksyms           uint32 | ||||
| 	nr_jited_func_lens       uint32 | ||||
| 	jited_ksyms              internal.Pointer | ||||
| 	jited_func_lens          internal.Pointer | ||||
| 	btf_id                   uint32 | ||||
| 	func_info_rec_size       uint32 | ||||
| 	func_info                internal.Pointer | ||||
| 	nr_func_info             uint32 | ||||
| 	nr_line_info             uint32 | ||||
| 	line_info                internal.Pointer | ||||
| 	jited_line_info          internal.Pointer | ||||
| 	nr_jited_line_info       uint32 | ||||
| 	line_info_rec_size       uint32 | ||||
| 	jited_line_info_rec_size uint32 | ||||
| 	nr_prog_tags             uint32 | ||||
| 	prog_tags                internal.Pointer | ||||
| 	run_time_ns              uint64 | ||||
| 	run_cnt                  uint64 | ||||
| } | ||||
| 
 | ||||
| type bpfProgTestRunAttr struct { | ||||
| 	fd          uint32 | ||||
| 	retval      uint32 | ||||
| 	dataSizeIn  uint32 | ||||
| 	dataSizeOut uint32 | ||||
| 	dataIn      internal.Pointer | ||||
| 	dataOut     internal.Pointer | ||||
| 	repeat      uint32 | ||||
| 	duration    uint32 | ||||
| } | ||||
| 
 | ||||
| type bpfGetFDByIDAttr struct { | ||||
| 	id   uint32 | ||||
| 	next uint32 | ||||
| } | ||||
| 
 | ||||
| type bpfMapFreezeAttr struct { | ||||
| 	mapFd uint32 | ||||
| } | ||||
| 
 | ||||
| type bpfObjGetNextIDAttr struct { | ||||
| 	startID   uint32 | ||||
| 	nextID    uint32 | ||||
| 	openFlags uint32 | ||||
| } | ||||
| 
 | ||||
| func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) { | ||||
| 	for { | ||||
| 		fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 		// As of ~4.20 the verifier can be interrupted by a signal, | ||||
| 		// and returns EAGAIN in that case. | ||||
| 		if err == unix.EAGAIN { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		return internal.NewFD(uint32(fd)), nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func bpfProgTestRun(attr *bpfProgTestRunAttr) error { | ||||
| 	_, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) { | ||||
| 	fd, err := internal.BPF(internal.BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) | ||||
| 	if err != nil { | ||||
| func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { | ||||
| 	buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) | ||||
| 	if err := insns.Marshal(buf, internal.NativeEndian); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	bytecode := buf.Bytes() | ||||
| 
 | ||||
| 	return internal.NewFD(uint32(fd)), nil | ||||
| 	return sys.ProgLoad(&sys.ProgLoadAttr{ | ||||
| 		ProgType: sys.ProgType(typ), | ||||
| 		License:  sys.NewStringPointer(license), | ||||
| 		Insns:    sys.NewSlicePointer(bytecode), | ||||
| 		InsnCnt:  uint32(len(bytecode) / asm.InstructionSize), | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { | ||||
| 	_, err := bpfMapCreate(&bpfMapCreateAttr{ | ||||
| 		mapType:    ArrayOfMaps, | ||||
| 		keySize:    4, | ||||
| 		valueSize:  4, | ||||
| 		maxEntries: 1, | ||||
| 	_, err := sys.MapCreate(&sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(ArrayOfMaps), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: 1, | ||||
| 		// Invalid file descriptor. | ||||
| 		innerMapFd: ^uint32(0), | ||||
| 		InnerMapFd: ^uint32(0), | ||||
| 	}) | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 		return internal.ErrNotSupported | ||||
|  | @ -235,12 +68,12 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { | |||
| var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error { | ||||
| 	// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since | ||||
| 	// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. | ||||
| 	m, err := bpfMapCreate(&bpfMapCreateAttr{ | ||||
| 		mapType:    Array, | ||||
| 		keySize:    4, | ||||
| 		valueSize:  4, | ||||
| 		maxEntries: 1, | ||||
| 		flags:      unix.BPF_F_RDONLY_PROG, | ||||
| 	m, err := sys.MapCreate(&sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(Array), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: 1, | ||||
| 		MapFlags:   unix.BPF_F_RDONLY_PROG, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return internal.ErrNotSupported | ||||
|  | @ -249,122 +82,53 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps | |||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error { | ||||
| 	fd, err := m.Value() | ||||
| var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error { | ||||
| 	// This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. | ||||
| 	m, err := sys.MapCreate(&sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(Array), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: 1, | ||||
| 		MapFlags:   unix.BPF_F_MMAPABLE, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	_ = m.Close() | ||||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| 	attr := bpfMapOpAttr{ | ||||
| 		mapFd: fd, | ||||
| 		key:   key, | ||||
| 		value: valueOut, | ||||
| 	} | ||||
| 	_, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return wrapMapError(err) | ||||
| } | ||||
| 
 | ||||
| func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error { | ||||
| 	fd, err := m.Value() | ||||
| var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { | ||||
| 	// This checks BPF_F_INNER_MAP, which appeared in 5.10. | ||||
| 	m, err := sys.MapCreate(&sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(Array), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: 1, | ||||
| 		MapFlags:   unix.BPF_F_INNER_MAP, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	_ = m.Close() | ||||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| 	attr := bpfMapOpAttr{ | ||||
| 		mapFd: fd, | ||||
| 		key:   key, | ||||
| 		value: valueOut, | ||||
| 	} | ||||
| 	_, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return wrapMapError(err) | ||||
| } | ||||
| 
 | ||||
| func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error { | ||||
| 	fd, err := m.Value() | ||||
| var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error { | ||||
| 	// This checks BPF_F_NO_PREALLOC, which appeared in 4.6. | ||||
| 	m, err := sys.MapCreate(&sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(Hash), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: 1, | ||||
| 		MapFlags:   unix.BPF_F_NO_PREALLOC, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfMapOpAttr{ | ||||
| 		mapFd: fd, | ||||
| 		key:   key, | ||||
| 		value: valueOut, | ||||
| 		flags: flags, | ||||
| 	} | ||||
| 	_, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return wrapMapError(err) | ||||
| } | ||||
| 
 | ||||
| func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error { | ||||
| 	fd, err := m.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfMapOpAttr{ | ||||
| 		mapFd: fd, | ||||
| 		key:   key, | ||||
| 	} | ||||
| 	_, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return wrapMapError(err) | ||||
| } | ||||
| 
 | ||||
| func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error { | ||||
| 	fd, err := m.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfMapOpAttr{ | ||||
| 		mapFd: fd, | ||||
| 		key:   key, | ||||
| 		value: nextKeyOut, | ||||
| 	} | ||||
| 	_, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return wrapMapError(err) | ||||
| } | ||||
| 
 | ||||
| func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) { | ||||
| 	attr := bpfObjGetNextIDAttr{ | ||||
| 		startID: start, | ||||
| 	} | ||||
| 	_, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return attr.nextID, wrapObjError(err) | ||||
| } | ||||
| 
 | ||||
| func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) { | ||||
| 	fd, err := m.Value() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfBatchMapOpAttr{ | ||||
| 		inBatch:  inBatch, | ||||
| 		outBatch: outBatch, | ||||
| 		keys:     keys, | ||||
| 		values:   values, | ||||
| 		count:    count, | ||||
| 		mapFd:    fd, | ||||
| 	} | ||||
| 	if opts != nil { | ||||
| 		attr.elemFlags = opts.ElemFlags | ||||
| 		attr.flags = opts.Flags | ||||
| 	} | ||||
| 	_, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	// always return count even on an error, as things like update might partially be fulfilled. | ||||
| 	return attr.count, wrapMapError(err) | ||||
| } | ||||
| 
 | ||||
| func wrapObjError(err error) error { | ||||
| 	if err == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if errors.Is(err, unix.ENOENT) { | ||||
| 		return fmt.Errorf("%w", ErrNotExist) | ||||
| 	} | ||||
| 
 | ||||
| 	return errors.New(err.Error()) | ||||
| } | ||||
| 	_ = m.Close() | ||||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| func wrapMapError(err error) error { | ||||
| 	if err == nil { | ||||
|  | @ -372,60 +136,34 @@ func wrapMapError(err error) error { | |||
| 	} | ||||
| 
 | ||||
| 	if errors.Is(err, unix.ENOENT) { | ||||
| 		return ErrKeyNotExist | ||||
| 		return sys.Error(ErrKeyNotExist, unix.ENOENT) | ||||
| 	} | ||||
| 
 | ||||
| 	if errors.Is(err, unix.EEXIST) { | ||||
| 		return ErrKeyExist | ||||
| 		return sys.Error(ErrKeyExist, unix.EEXIST) | ||||
| 	} | ||||
| 
 | ||||
| 	if errors.Is(err, unix.ENOTSUPP) { | ||||
| 		return ErrNotSupported | ||||
| 		return sys.Error(ErrNotSupported, unix.ENOTSUPP) | ||||
| 	} | ||||
| 
 | ||||
| 	return errors.New(err.Error()) | ||||
| } | ||||
| 
 | ||||
| func bpfMapFreeze(m *internal.FD) error { | ||||
| 	fd, err := m.Value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	if errors.Is(err, unix.E2BIG) { | ||||
| 		return fmt.Errorf("key too big for map: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfMapFreezeAttr{ | ||||
| 		mapFd: fd, | ||||
| 	} | ||||
| 	_, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) { | ||||
| 	var info bpfProgInfo | ||||
| 	if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { | ||||
| 		return nil, fmt.Errorf("can't get program info: %w", err) | ||||
| 	} | ||||
| 	return &info, nil | ||||
| } | ||||
| 
 | ||||
| func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) { | ||||
| 	var info bpfMapInfo | ||||
| 	err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("can't get map info: %w", err) | ||||
| 	} | ||||
| 	return &info, nil | ||||
| } | ||||
| 
 | ||||
| var haveObjName = internal.FeatureTest("object names", "4.15", func() error { | ||||
| 	attr := bpfMapCreateAttr{ | ||||
| 		mapType:    Array, | ||||
| 		keySize:    4, | ||||
| 		valueSize:  4, | ||||
| 		maxEntries: 1, | ||||
| 		mapName:    newBPFObjName("feature_test"), | ||||
| 	attr := sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(Array), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: 1, | ||||
| 		MapName:    sys.NewObjName("feature_test"), | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := bpfMapCreate(&attr) | ||||
| 	fd, err := sys.MapCreate(&attr) | ||||
| 	if err != nil { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
|  | @ -439,15 +177,15 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() | |||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	attr := bpfMapCreateAttr{ | ||||
| 		mapType:    Array, | ||||
| 		keySize:    4, | ||||
| 		valueSize:  4, | ||||
| 		maxEntries: 1, | ||||
| 		mapName:    newBPFObjName(".test"), | ||||
| 	attr := sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(Array), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: 1, | ||||
| 		MapName:    sys.NewObjName(".test"), | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := bpfMapCreate(&attr) | ||||
| 	fd, err := sys.MapCreate(&attr) | ||||
| 	if err != nil { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
|  | @ -458,34 +196,69 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() | |||
| 
 | ||||
| var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { | ||||
| 	var maxEntries uint32 = 2 | ||||
| 	attr := bpfMapCreateAttr{ | ||||
| 		mapType:    Hash, | ||||
| 		keySize:    4, | ||||
| 		valueSize:  4, | ||||
| 		maxEntries: maxEntries, | ||||
| 	attr := sys.MapCreateAttr{ | ||||
| 		MapType:    sys.MapType(Hash), | ||||
| 		KeySize:    4, | ||||
| 		ValueSize:  4, | ||||
| 		MaxEntries: maxEntries, | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := bpfMapCreate(&attr) | ||||
| 	fd, err := sys.MapCreate(&attr) | ||||
| 	if err != nil { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	defer fd.Close() | ||||
| 
 | ||||
| 	keys := []uint32{1, 2} | ||||
| 	values := []uint32{3, 4} | ||||
| 	kp, _ := marshalPtr(keys, 8) | ||||
| 	vp, _ := marshalPtr(values, 8) | ||||
| 	nilPtr := internal.NewPointer(nil) | ||||
| 	_, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil) | ||||
| 
 | ||||
| 	err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ | ||||
| 		MapFd:  fd.Uint(), | ||||
| 		Keys:   kp, | ||||
| 		Values: vp, | ||||
| 		Count:  maxEntries, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) { | ||||
| 	attr := bpfGetFDByIDAttr{ | ||||
| 		id: id, | ||||
| var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error { | ||||
| 	insns := asm.Instructions{ | ||||
| 		asm.Mov.Reg(asm.R1, asm.R10), | ||||
| 		asm.Add.Imm(asm.R1, -8), | ||||
| 		asm.Mov.Imm(asm.R2, 8), | ||||
| 		asm.Mov.Imm(asm.R3, 0), | ||||
| 		asm.FnProbeReadKernel.Call(), | ||||
| 		asm.Return(), | ||||
| 	} | ||||
| 	ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) | ||||
| 	return internal.NewFD(uint32(ptr)), wrapObjError(err) | ||||
| } | ||||
| 
 | ||||
| 	fd, err := progLoad(insns, Kprobe, "GPL") | ||||
| 	if err != nil { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	_ = fd.Close() | ||||
| 	return nil | ||||
| }) | ||||
| 
 | ||||
| var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() error { | ||||
| 	insns := asm.Instructions{ | ||||
| 		asm.Call.Label("prog2").WithSymbol("prog1"), | ||||
| 		asm.Return(), | ||||
| 		asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), | ||||
| 		asm.Return(), | ||||
| 	} | ||||
| 
 | ||||
| 	fd, err := progLoad(insns, SocketFilter, "MIT") | ||||
| 	if errors.Is(err, unix.EINVAL) { | ||||
| 		return internal.ErrNotSupported | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	_ = fd.Close() | ||||
| 	return nil | ||||
| }) | ||||
|  |  | |||
							
								
								
									
										75
									
								
								vendor/github.com/cilium/ebpf/types.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										75
									
								
								vendor/github.com/cilium/ebpf/types.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,11 +1,20 @@ | |||
| package ebpf | ||||
| 
 | ||||
| //go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType | ||||
| import ( | ||||
| 	"github.com/cilium/ebpf/internal/unix" | ||||
| ) | ||||
| 
 | ||||
| //go:generate stringer -output types_string.go -type=MapType,ProgramType,PinType | ||||
| 
 | ||||
| // MapType indicates the type map structure | ||||
| // that will be initialized in the kernel. | ||||
| type MapType uint32 | ||||
| 
 | ||||
| // Max returns the latest supported MapType. | ||||
| func (MapType) Max() MapType { | ||||
| 	return maxMapType - 1 | ||||
| } | ||||
| 
 | ||||
| // All the various map types that can be created | ||||
| const ( | ||||
| 	UnspecifiedMap MapType = iota | ||||
|  | @ -81,11 +90,22 @@ const ( | |||
| 	SkStorage | ||||
| 	// DevMapHash - Hash-based indexing scheme for references to network devices. | ||||
| 	DevMapHash | ||||
| 	// StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF | ||||
| 	// program. | ||||
| 	StructOpsMap | ||||
| 	// RingBuf - Similar to PerfEventArray, but shared across all CPUs. | ||||
| 	RingBuf | ||||
| 	// InodeStorage - Specialized local storage map for inodes. | ||||
| 	InodeStorage | ||||
| 	// TaskStorage - Specialized local storage map for task_struct. | ||||
| 	TaskStorage | ||||
| 	// maxMapType - Bound enum of MapTypes, has to be last in enum. | ||||
| 	maxMapType | ||||
| ) | ||||
| 
 | ||||
| // hasPerCPUValue returns true if the Map stores a value per CPU. | ||||
| func (mt MapType) hasPerCPUValue() bool { | ||||
| 	return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash | ||||
| 	return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage | ||||
| } | ||||
| 
 | ||||
| // canStoreMap returns true if the map type accepts a map fd | ||||
|  | @ -100,9 +120,25 @@ func (mt MapType) canStoreProgram() bool { | |||
| 	return mt == ProgramArray | ||||
| } | ||||
| 
 | ||||
| // hasBTF returns true if the map type supports BTF key/value metadata. | ||||
| func (mt MapType) hasBTF() bool { | ||||
| 	switch mt { | ||||
| 	case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap, | ||||
| 		DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf: | ||||
| 		return false | ||||
| 	default: | ||||
| 		return true | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // ProgramType of the eBPF program | ||||
| type ProgramType uint32 | ||||
| 
 | ||||
| // Max return the latest supported ProgramType. | ||||
| func (ProgramType) Max() ProgramType { | ||||
| 	return maxProgramType - 1 | ||||
| } | ||||
| 
 | ||||
| // eBPF program types | ||||
| const ( | ||||
| 	UnspecifiedProgram ProgramType = iota | ||||
|  | @ -136,6 +172,8 @@ const ( | |||
| 	Extension | ||||
| 	LSM | ||||
| 	SkLookup | ||||
| 	Syscall | ||||
| 	maxProgramType | ||||
| ) | ||||
| 
 | ||||
| // AttachType of the eBPF program, needed to differentiate allowed context accesses in | ||||
|  | @ -143,6 +181,8 @@ const ( | |||
| // Will cause invalid argument (EINVAL) at program load time if set incorrectly. | ||||
| type AttachType uint32 | ||||
| 
 | ||||
| //go:generate stringer -type AttachType -trimprefix Attach | ||||
| 
 | ||||
| // AttachNone is an alias for AttachCGroupInetIngress for readability reasons. | ||||
| const AttachNone AttachType = 0 | ||||
| 
 | ||||
|  | @ -185,6 +225,10 @@ const ( | |||
| 	AttachXDPCPUMap | ||||
| 	AttachSkLookup | ||||
| 	AttachXDP | ||||
| 	AttachSkSKBVerdict | ||||
| 	AttachSkReuseportSelect | ||||
| 	AttachSkReuseportSelectOrMigrate | ||||
| 	AttachPerfEvent | ||||
| ) | ||||
| 
 | ||||
| // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command | ||||
|  | @ -202,6 +246,33 @@ const ( | |||
| 	PinByName | ||||
| ) | ||||
| 
 | ||||
| // LoadPinOptions control how a pinned object is loaded. | ||||
| type LoadPinOptions struct { | ||||
| 	// Request a read-only or write-only object. The default is a read-write | ||||
| 	// object. Only one of the flags may be set. | ||||
| 	ReadOnly  bool | ||||
| 	WriteOnly bool | ||||
| 
 | ||||
| 	// Raw flags for the syscall. Other fields of this struct take precedence. | ||||
| 	Flags uint32 | ||||
| } | ||||
| 
 | ||||
| // Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter. | ||||
| func (lpo *LoadPinOptions) Marshal() uint32 { | ||||
| 	if lpo == nil { | ||||
| 		return 0 | ||||
| 	} | ||||
| 
 | ||||
| 	flags := lpo.Flags | ||||
| 	if lpo.ReadOnly { | ||||
| 		flags |= unix.BPF_F_RDONLY | ||||
| 	} | ||||
| 	if lpo.WriteOnly { | ||||
| 		flags |= unix.BPF_F_WRONLY | ||||
| 	} | ||||
| 	return flags | ||||
| } | ||||
| 
 | ||||
| // BatchOptions batch map operations options | ||||
| // | ||||
| // Mirrors libbpf struct bpf_map_batch_opts | ||||
|  |  | |||
							
								
								
									
										72
									
								
								vendor/github.com/cilium/ebpf/types_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										72
									
								
								vendor/github.com/cilium/ebpf/types_string.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,4 +1,4 @@ | |||
| // Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType"; DO NOT EDIT. | ||||
| // Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. | ||||
| 
 | ||||
| package ebpf | ||||
| 
 | ||||
|  | @ -34,11 +34,16 @@ func _() { | |||
| 	_ = x[Stack-23] | ||||
| 	_ = x[SkStorage-24] | ||||
| 	_ = x[DevMapHash-25] | ||||
| 	_ = x[StructOpsMap-26] | ||||
| 	_ = x[RingBuf-27] | ||||
| 	_ = x[InodeStorage-28] | ||||
| 	_ = x[TaskStorage-29] | ||||
| 	_ = x[maxMapType-30] | ||||
| } | ||||
| 
 | ||||
| const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHash" | ||||
| const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType" | ||||
| 
 | ||||
| var _MapType_index = [...]uint8{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248} | ||||
| var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300} | ||||
| 
 | ||||
| func (i MapType) String() string { | ||||
| 	if i >= MapType(len(_MapType_index)-1) { | ||||
|  | @ -81,11 +86,13 @@ func _() { | |||
| 	_ = x[Extension-28] | ||||
| 	_ = x[LSM-29] | ||||
| 	_ = x[SkLookup-30] | ||||
| 	_ = x[Syscall-31] | ||||
| 	_ = x[maxProgramType-32] | ||||
| } | ||||
| 
 | ||||
| const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookup" | ||||
| const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType" | ||||
| 
 | ||||
| var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294} | ||||
| var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315} | ||||
| 
 | ||||
| func (i ProgramType) String() string { | ||||
| 	if i >= ProgramType(len(_ProgramType_index)-1) { | ||||
|  | @ -93,61 +100,6 @@ func (i ProgramType) String() string { | |||
| 	} | ||||
| 	return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] | ||||
| } | ||||
| func _() { | ||||
| 	// An "invalid array index" compiler error signifies that the constant values have changed. | ||||
| 	// Re-run the stringer command to generate them again. | ||||
| 	var x [1]struct{} | ||||
| 	_ = x[AttachNone-0] | ||||
| 	_ = x[AttachCGroupInetIngress-0] | ||||
| 	_ = x[AttachCGroupInetEgress-1] | ||||
| 	_ = x[AttachCGroupInetSockCreate-2] | ||||
| 	_ = x[AttachCGroupSockOps-3] | ||||
| 	_ = x[AttachSkSKBStreamParser-4] | ||||
| 	_ = x[AttachSkSKBStreamVerdict-5] | ||||
| 	_ = x[AttachCGroupDevice-6] | ||||
| 	_ = x[AttachSkMsgVerdict-7] | ||||
| 	_ = x[AttachCGroupInet4Bind-8] | ||||
| 	_ = x[AttachCGroupInet6Bind-9] | ||||
| 	_ = x[AttachCGroupInet4Connect-10] | ||||
| 	_ = x[AttachCGroupInet6Connect-11] | ||||
| 	_ = x[AttachCGroupInet4PostBind-12] | ||||
| 	_ = x[AttachCGroupInet6PostBind-13] | ||||
| 	_ = x[AttachCGroupUDP4Sendmsg-14] | ||||
| 	_ = x[AttachCGroupUDP6Sendmsg-15] | ||||
| 	_ = x[AttachLircMode2-16] | ||||
| 	_ = x[AttachFlowDissector-17] | ||||
| 	_ = x[AttachCGroupSysctl-18] | ||||
| 	_ = x[AttachCGroupUDP4Recvmsg-19] | ||||
| 	_ = x[AttachCGroupUDP6Recvmsg-20] | ||||
| 	_ = x[AttachCGroupGetsockopt-21] | ||||
| 	_ = x[AttachCGroupSetsockopt-22] | ||||
| 	_ = x[AttachTraceRawTp-23] | ||||
| 	_ = x[AttachTraceFEntry-24] | ||||
| 	_ = x[AttachTraceFExit-25] | ||||
| 	_ = x[AttachModifyReturn-26] | ||||
| 	_ = x[AttachLSMMac-27] | ||||
| 	_ = x[AttachTraceIter-28] | ||||
| 	_ = x[AttachCgroupInet4GetPeername-29] | ||||
| 	_ = x[AttachCgroupInet6GetPeername-30] | ||||
| 	_ = x[AttachCgroupInet4GetSockname-31] | ||||
| 	_ = x[AttachCgroupInet6GetSockname-32] | ||||
| 	_ = x[AttachXDPDevMap-33] | ||||
| 	_ = x[AttachCgroupInetSockRelease-34] | ||||
| 	_ = x[AttachXDPCPUMap-35] | ||||
| 	_ = x[AttachSkLookup-36] | ||||
| 	_ = x[AttachXDP-37] | ||||
| } | ||||
| 
 | ||||
| const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIterAttachCgroupInet4GetPeernameAttachCgroupInet6GetPeernameAttachCgroupInet4GetSocknameAttachCgroupInet6GetSocknameAttachXDPDevMapAttachCgroupInetSockReleaseAttachXDPCPUMapAttachSkLookupAttachXDP" | ||||
| 
 | ||||
| var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582, 610, 638, 666, 694, 709, 736, 751, 765, 774} | ||||
| 
 | ||||
| func (i AttachType) String() string { | ||||
| 	if i >= AttachType(len(_AttachType_index)-1) { | ||||
| 		return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" | ||||
| 	} | ||||
| 	return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] | ||||
| } | ||||
| func _() { | ||||
| 	// An "invalid array index" compiler error signifies that the constant values have changed. | ||||
| 	// Re-run the stringer command to generate them again. | ||||
|  |  | |||
							
								
								
									
										46
									
								
								vendor/github.com/containerd/cgroups/Protobuild.toml
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								vendor/github.com/containerd/cgroups/Protobuild.toml
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,46 +0,0 @@ | |||
| version = "unstable" | ||||
| generator = "gogoctrd" | ||||
| plugins = ["grpc"] | ||||
| 
 | ||||
| # Control protoc include paths. Below are usually some good defaults, but feel | ||||
| # free to try it without them if it works for your project. | ||||
| [includes] | ||||
|   # Include paths that will be added before all others. Typically, you want to | ||||
|   # treat the root of the project as an include, but this may not be necessary. | ||||
|   # before = ["."] | ||||
| 
 | ||||
|   # Paths that should be treated as include roots in relation to the vendor | ||||
|   # directory. These will be calculated with the vendor directory nearest the | ||||
|   # target package. | ||||
|   # vendored = ["github.com/gogo/protobuf"] | ||||
|   packages = ["github.com/gogo/protobuf"] | ||||
| 
 | ||||
|   # Paths that will be added untouched to the end of the includes. We use | ||||
|   # `/usr/local/include` to pickup the common install location of protobuf. | ||||
|   # This is the default. | ||||
|   after = ["/usr/local/include", "/usr/include"] | ||||
| 
 | ||||
| # This section maps protobuf imports to Go packages. These will become | ||||
| # `-M` directives in the call to the go protobuf generator. | ||||
| [packages] | ||||
|   "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" | ||||
|   "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" | ||||
|   "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" | ||||
|   "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" | ||||
|   "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" | ||||
| 
 | ||||
| # Aggregrate the API descriptors to lock down API changes. | ||||
| [[descriptors]] | ||||
| prefix = "github.com/containerd/cgroups/stats/v1" | ||||
| target = "stats/v1/metrics.pb.txt" | ||||
| ignore_files = [ | ||||
| 	"google/protobuf/descriptor.proto", | ||||
| 	"gogoproto/gogo.proto" | ||||
| ] | ||||
| [[descriptors]] | ||||
| prefix = "github.com/containerd/cgroups/v2/stats" | ||||
| target = "v2/stats/metrics.pb.txt" | ||||
| ignore_files = [ | ||||
| 	"google/protobuf/descriptor.proto", | ||||
| 	"gogoproto/gogo.proto" | ||||
| ] | ||||
							
								
								
									
										46
									
								
								vendor/github.com/containerd/cgroups/Vagrantfile
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								vendor/github.com/containerd/cgroups/Vagrantfile
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,46 +0,0 @@ | |||
| # -*- mode: ruby -*- | ||||
| # vi: set ft=ruby : | ||||
| 
 | ||||
| Vagrant.configure("2") do |config| | ||||
| # Fedora box is used for testing cgroup v2 support | ||||
|   config.vm.box = "fedora/35-cloud-base" | ||||
|   config.vm.provider :virtualbox do |v| | ||||
|     v.memory = 4096 | ||||
|     v.cpus = 2 | ||||
|   end | ||||
|   config.vm.provider :libvirt do |v| | ||||
|     v.memory = 4096 | ||||
|     v.cpus = 2 | ||||
|   end | ||||
|   config.vm.provision "shell", inline: <<-SHELL | ||||
|     set -eux -o pipefail | ||||
|     # configuration | ||||
|     GO_VERSION="1.17.7" | ||||
| 
 | ||||
|     # install gcc and Golang | ||||
|     dnf -y install gcc | ||||
|     curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local | ||||
| 
 | ||||
|     # setup env vars | ||||
|     cat >> /etc/profile.d/sh.local <<EOF | ||||
| PATH=/usr/local/go/bin:$PATH | ||||
| GO111MODULE=on | ||||
| export PATH GO111MODULE | ||||
| EOF | ||||
|     source /etc/profile.d/sh.local | ||||
| 
 | ||||
|     # enter /root/go/src/github.com/containerd/cgroups | ||||
|     mkdir -p /root/go/src/github.com/containerd | ||||
|     ln -s /vagrant /root/go/src/github.com/containerd/cgroups | ||||
|     cd /root/go/src/github.com/containerd/cgroups | ||||
| 
 | ||||
|     # create /test.sh | ||||
|     cat > /test.sh <<EOF | ||||
| #!/bin/bash | ||||
| set -eux -o pipefail | ||||
| cd /root/go/src/github.com/containerd/cgroups | ||||
| go test -v ./... | ||||
| EOF | ||||
|     chmod +x /test.sh | ||||
|   SHELL | ||||
| end | ||||
							
								
								
									
										6125
									
								
								vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6125
									
								
								vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										3992
									
								
								vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3992
									
								
								vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -22,3 +22,5 @@ cgutil: | |||
| 
 | ||||
| proto: | ||||
| 	protobuild --quiet ${PACKAGES} | ||||
| 	# Keep them Go-idiomatic and backward-compatible with the gogo/protobuf era. | ||||
| 	go-fix-acronym -w -a '(Cpu|Tcp|Rss)' $(shell find cgroup1/stats/ cgroup2/stats/ -name '*.pb.go') | ||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue