This commit adds struct_ops support to the ELF reader: it classifies non-executable PROGBITS sections,
parses their BTF Datasec to build MapSpecs, associates relocs with func-pointer members to
set ps.AttachTo, and adds TestStructOps.
Related: #1845
Signed-off-by: shun159 <dreamdiagnosis@gmail.com>
testdata/errors \
testdata/variables \
testdata/arena \
+ testdata/struct_ops \
btf/testdata/relocs \
btf/testdata/relocs_read \
btf/testdata/relocs_read_tgt \
case sec.Type == elf.SHT_REL:
// Store relocations under the section index of the target
relSections[elf.SectionIndex(sec.Info)] = sec
- case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
- sections[idx] = newElfSection(sec, programSection)
+ case sec.Type == elf.SHT_PROGBITS && sec.Size > 0:
+ if (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0 {
+ sections[idx] = newElfSection(sec, programSection)
+ } else if sec.Name == structOpsLinkSec {
+ // classification based on sec names so that struct_ops-specific
+ // sections (.struct_ops.link) is correctly recognized
+ // as non-executable PROGBITS, allowing value placement and link metadata to be loaded.
+ sections[idx] = newElfSection(sec, structOpsSection)
+ } else if sec.Name == structOpsSec {
+ return nil, fmt.Errorf("section %q: got '.struct_ops' section: %w", sec.Name, ErrNotSupported)
+ }
}
}
return nil, fmt.Errorf("load programs: %w", err)
}
+ // assiociate members in structs with ProgramSpecs using relo
+ if err := ec.associateStructOpsRelocs(progs); err != nil {
+ return nil, fmt.Errorf("load struct_ops: %w", err)
+ }
+
return &CollectionSpec{
ec.maps,
progs,
btfMapSection
programSection
dataSection
+ structOpsSection
)
type elfSection struct {
return nil
}
+// associateStructOpsRelocs handles `.struct_ops.link`
+// and associates the target function with the correct struct member in the map.
+func (ec *elfCode) associateStructOpsRelocs(progs map[string]*ProgramSpec) error {
+ for _, sec := range ec.sections {
+ if sec.kind != structOpsSection {
+ continue
+ }
+
+ userData, err := sec.Data()
+ if err != nil {
+ return fmt.Errorf("failed to read section data: %w", err)
+ }
+
+ // Resolve the BTF datasec describing variables in this section.
+ var ds *btf.Datasec
+ if err := ec.btf.TypeByName(sec.Name, &ds); err != nil {
+ return fmt.Errorf("datasec %s: %w", sec.Name, err)
+ }
+
+ // Set flags for .struct_ops.link (BPF_F_LINK).
+ flags := uint32(0)
+ if sec.Name == structOpsLinkSec {
+ flags = sys.BPF_F_LINK
+ }
+
+ for _, vsi := range ds.Vars {
+ userSt, baseOff, err := ec.createStructOpsMap(vsi, userData, flags)
+ if err != nil {
+ return err
+ }
+
+ if err := structOpsSetAttachTo(sec, baseOff, userSt, progs); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// createStructOpsMap() creates and registers a MapSpec for a struct_ops
+func (ec *elfCode) createStructOpsMap(vsi btf.VarSecinfo, userData []byte, flags uint32) (*btf.Struct, uint32, error) {
+ varType, ok := btf.As[*btf.Var](vsi.Type)
+ if !ok {
+ return nil, 0, fmt.Errorf("vsi: expect var, got %T", vsi.Type)
+ }
+
+ mapName := varType.Name
+
+ userSt, ok := btf.As[*btf.Struct](varType.Type)
+ if !ok {
+ return nil, 0, fmt.Errorf("var %s: expect struct, got %T", varType.Name, varType.Type)
+ }
+
+ userSize := userSt.Size
+ baseOff := vsi.Offset
+ if baseOff+userSize > uint32(len(userData)) {
+ return nil, 0, fmt.Errorf("%s exceeds section", mapName)
+ }
+
+ // Register the MapSpec for this struct_ops instance if doesn't exist
+ if _, exists := ec.maps[mapName]; exists {
+ return nil, 0, fmt.Errorf("struct_ops map %s: already exists", mapName)
+ }
+
+ ec.maps[mapName] = &MapSpec{
+ Name: mapName,
+ Type: StructOpsMap,
+ Key: &btf.Int{Size: 4},
+ KeySize: structOpsKeySize,
+ ValueSize: userSize, // length of the user-struct type
+ Value: userSt,
+ Flags: flags,
+ MaxEntries: 1,
+ Contents: []MapKV{
+ {
+ Key: uint32(0),
+ Value: append([]byte(nil), userData[baseOff:baseOff+userSize]...),
+ },
+ },
+ }
+
+ return userSt, baseOff, nil
+}
+
type libbpfElfSectionDef struct {
pattern string
programType sys.ProgType
// This has been in the library since the beginning of time. Not sure
// where it came from.
{"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
+ // Override libbpf definition because we want ignoreExtra.
+ {"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE | ignoreExtra},
+ {"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE | ignoreExtra},
}, elfSectionDefs...)
}
"syscall"
"testing"
+ "github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/kallsyms"
mustNewCollection(t, coll, nil)
}
+func TestStructOps(t *testing.T) {
+ file := testutils.NativeFile(t, "testdata/struct_ops-%s.elf")
+ coll, err := LoadCollectionSpec(file)
+ qt.Assert(t, qt.IsNil(err))
+
+ userData := []byte{
+ // test_1 func ptr (8B)
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // test_2 func ptr (8B)
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // data (4B) + padding (4B)
+ 0xef, 0xbe, 0xad, 0xde, 0x00, 0x00, 0x00, 0x00,
+ }
+
+ want := &CollectionSpec{
+ Maps: map[string]*MapSpec{
+ "testmod_ops": {
+ Name: "testmod_ops",
+ Type: StructOpsMap,
+ MaxEntries: 1,
+ Flags: sys.BPF_F_LINK,
+ Key: &btf.Int{Size: 4},
+ KeySize: 4,
+ ValueSize: 24,
+ Value: &btf.Struct{
+ Name: "bpf_testmod_ops",
+ Size: 24,
+ Members: []btf.Member{
+ {
+ Name: "test_1",
+ Type: &btf.Pointer{
+ Target: &btf.FuncProto{
+ Params: []btf.FuncParam{},
+ Return: &btf.Int{Name: "int", Size: 4, Encoding: btf.Signed}}},
+ Offset: 0,
+ },
+ {
+ Name: "test_2",
+ Type: &btf.Pointer{
+ Target: &btf.FuncProto{
+ Params: []btf.FuncParam{
+ {Type: &btf.Int{Name: "int", Size: 4, Encoding: btf.Signed}},
+ {Type: &btf.Int{Name: "int", Size: 4, Encoding: btf.Signed}},
+ },
+ Return: (*btf.Void)(nil),
+ },
+ },
+ Offset: 64,
+ },
+ {
+ Name: "data",
+ Type: &btf.Int{Name: "int", Size: 4, Encoding: btf.Signed},
+ Offset: 128, // bits
+ },
+ },
+ },
+ Contents: []MapKV{
+ {
+ Key: uint32(0),
+ Value: userData,
+ },
+ },
+ },
+ },
+ Programs: map[string]*ProgramSpec{
+ "test_1": {
+ Name: "test_1",
+ Type: StructOps,
+ AttachTo: "bpf_testmod_ops:test_1",
+ License: "GPL",
+ SectionName: "struct_ops/test_1",
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ },
+ },
+ Variables: map[string]*VariableSpec{},
+ }
+
+ testModOps, ok := coll.Maps["testmod_ops"]
+ if !ok {
+ t.Fatalf("testmod_ops doesn't exist")
+ }
+
+ data, ok := testModOps.Contents[0].Value.([]byte)
+ if !ok {
+ t.Fatalf("Contents[0].Value should be an array of byte")
+ }
+
+ qt.Assert(t, qt.CmpEquals(coll.Programs, want.Programs, csCmpOpts))
+ qt.Assert(t, qt.CmpEquals(coll.Maps, want.Maps, csCmpOpts))
+ qt.Assert(t, qt.CmpEquals(testModOps.Value, want.Maps["testmod_ops"].Value, csCmpOpts))
+ qt.Assert(t, qt.CmpEquals(data, userData, csCmpOpts))
+}
+
var (
elfPath = flag.String("elfs", os.Getenv("CI_KERNEL_SELFTESTS"), "`Path` containing libbpf-compatible ELFs (defaults to $CI_KERNEL_SELFTESTS)")
elfPattern = flag.String("elf-pattern", "*.o", "Glob `pattern` for object files that should be tested")
t.Fatal("Expected an error during load")
}
} else if err != nil {
+ if errors.Is(err, errUnknownStructOps) {
+ t.Skip("Skipping since the struct_ops target doesn't exist in kernel")
+ }
t.Fatal("Error during loading:", err)
}
}
}
}
- for _, ps := range spec.Programs {
- if ps.Type == StructOps {
- ps.AttachTo = ""
- }
- }
-
coreFiles := sourceOfBTF(t, path)
if len(coreFiles) == 0 {
// NB: test_core_reloc_kernel.o doesn't have dedicated BTF and
// This error is detected based on heuristics and therefore may not be reliable.
var errUnknownKfunc = errors.New("unknown kfunc")
+// errUnknownStructOps is returned when the struct_ops target doesn't exist in kernel
+var errUnknownStructOps = errors.New("unknown struct_ops target")
+
// ProgramID represents the unique ID of an eBPF program.
type ProgramID = sys.ProgramID
package ebpf
import (
+ "errors"
"fmt"
"reflect"
"strings"
)
const structOpsValuePrefix = "bpf_struct_ops_"
+const structOpsLinkSec = ".struct_ops.link"
+const structOpsSec = ".struct_ops"
+const structOpsKeySize = 4
// structOpsFindInnerType returns the "inner" struct inside a value struct_ops type.
//
target := btf.Type((*btf.Struct)(nil))
spec, module, err := findTargetInKernel(vTypeName, &target, cache)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil, 0, nil, fmt.Errorf("%q doesn't exist in kernel: %w", vTypeName, errUnknownStructOps)
+ }
if err != nil {
return nil, 0, nil, fmt.Errorf("lookup value type %q: %w", vTypeName, err)
}
}
return true
}
+
+// structOpsSetAttachTo sets p.AttachTo in the expected "struct_name:memberName" format
+// based on the struct definition.
+//
+// this relies on the assumption that each member in the
+// `.struct_ops` section has a relocation at its starting byte offset.
+func structOpsSetAttachTo(
+ sec *elfSection,
+ baseOff uint32,
+ userSt *btf.Struct,
+ progs map[string]*ProgramSpec) error {
+ for _, m := range userSt.Members {
+ memberOff := m.Offset
+ sym, ok := sec.relocations[uint64(baseOff+memberOff.Bytes())]
+ if !ok {
+ continue
+ }
+ p, ok := progs[sym.Name]
+ if !ok || p == nil {
+ return fmt.Errorf("program %s not found", sym.Name)
+ }
+
+ if p.Type != StructOps {
+ return fmt.Errorf("program %s is not StructOps", sym.Name)
+ }
+ p.AttachTo = userSt.Name + ":" + m.Name
+ }
+ return nil
+}
--- /dev/null
+#include "common.h"
+
+char _license[] __section("license") = "GPL";
+
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+ void (*test_2)(int, int);
+ int data;
+};
+
+__section("struct_ops/test_1") int test_1(void) {
+ return 0;
+}
+
+__section(".struct_ops.link") struct bpf_testmod_ops testmod_ops = {
+ .test_1 = (void *)test_1,
+ .data = 0xdeadbeef,
+};