Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions kernel/bpf/bpf_local_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -609,6 +609,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
copy_map_value_locked(&smap->map, old_sdata->data,
value, false);
bpf_obj_free_fields(smap->map.record, old_sdata->data);
return old_sdata;
}
}
Expand Down Expand Up @@ -641,6 +642,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
if (old_sdata && (map_flags & BPF_F_LOCK)) {
copy_map_value_locked(&smap->map, old_sdata->data, value,
false);
bpf_obj_free_fields(smap->map.record, old_sdata->data);
selem = SELEM(old_sdata);
goto unlock;
}
Expand All @@ -654,6 +656,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,

/* Third, remove old selem, SELEM(old_sdata) */
if (old_sdata) {
bpf_obj_free_fields(smap->map.record, old_sdata->data);
bpf_selem_unlink_map(SELEM(old_sdata));
bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
true, &old_selem_free_list);
Expand Down
4 changes: 4 additions & 0 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -950,12 +950,14 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
if (!onallcpus) {
/* copy true value_size bytes */
copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
bpf_obj_free_fields(htab->map.record, this_cpu_ptr(pptr));
} else {
u32 size = round_up(htab->map.value_size, 8);
int off = 0, cpu;

for_each_possible_cpu(cpu) {
copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
off += size;
}
}
Expand Down Expand Up @@ -1122,6 +1124,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
copy_map_value_locked(map,
htab_elem_value(l_old, key_size),
value, false);
check_and_free_fields(htab, l_old);
return 0;
}
/* fall through, grab the bucket lock and lookup again.
Expand Down Expand Up @@ -1150,6 +1153,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
copy_map_value_locked(map,
htab_elem_value(l_old, key_size),
value, false);
check_and_free_fields(htab, l_old);
ret = 0;
goto err;
}
Expand Down
178 changes: 177 additions & 1 deletion tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

#include <test_progs.h>
#include <network_helpers.h>

#include "cgroup_helpers.h"
#include "refcounted_kptr.skel.h"
#include "refcounted_kptr_fail.skel.h"

Expand Down Expand Up @@ -44,3 +44,179 @@ void test_refcounted_kptr_wrong_owner(void)
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
refcounted_kptr__destroy(skel);
}

static void test_refcnt_leak(void *values, size_t values_sz, u64 flags, struct bpf_map *map,
struct bpf_program *prog_leak, struct bpf_program *prog_check)
{
int ret, fd, key = 0;
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);

ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
if (!ASSERT_OK(ret, "bpf_map__update_elem init"))
return;

fd = bpf_program__fd(prog_leak);
ret = bpf_prog_test_run_opts(fd, &opts);
if (!ASSERT_OK(ret, "test_run_opts"))
return;
if (!ASSERT_EQ(opts.retval, 2, "retval refcount"))
return;

ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
if (!ASSERT_OK(ret, "bpf_map__update_elem dec refcount"))
return;

fd = bpf_program__fd(prog_check);
ret = bpf_prog_test_run_opts(fd, &opts);
ASSERT_OK(ret, "test_run_opts");
ASSERT_EQ(opts.retval, 1, "retval");
}

static void test_percpu_hash_refcount_leak(void)
{
struct refcounted_kptr *skel;
size_t values_sz;
u64 *values;
int cpu_nr;

cpu_nr = libbpf_num_possible_cpus();
if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus"))
return;

values = calloc(cpu_nr, sizeof(u64));
if (!ASSERT_OK_PTR(values, "calloc values"))
return;

skel = refcounted_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) {
free(values);
return;
}

values_sz = cpu_nr * sizeof(u64);
memset(values, 0, values_sz);

test_refcnt_leak(values, values_sz, 0, skel->maps.pcpu_hash,
skel->progs.pcpu_hash_refcount_leak,
skel->progs.check_pcpu_hash_refcount);

refcounted_kptr__destroy(skel);
free(values);
}

struct lock_map_value {
u64 kptr;
struct bpf_spin_lock lock;
int value;
};

static void test_hash_lock_refcount_leak(void)
{
struct lock_map_value value = {};
struct refcounted_kptr *skel;

skel = refcounted_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
return;

test_refcnt_leak(&value, sizeof(value), BPF_F_LOCK, skel->maps.lock_hash,
skel->progs.hash_lock_refcount_leak,
skel->progs.check_hash_lock_refcount);

refcounted_kptr__destroy(skel);
}

static void test_cgrp_storage_refcount_leak(u64 flags)
{
int server_fd = -1, client_fd = -1;
struct lock_map_value value = {};
struct refcounted_kptr *skel;
struct bpf_link *link;
struct bpf_map *map;
int cgroup, err;

cgroup = test__join_cgroup("/cg_refcount_leak");
if (!ASSERT_GE(cgroup, 0, "test__join_cgroup"))
return;

skel = refcounted_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
goto out;

link = bpf_program__attach_cgroup(skel->progs.cgroup_storage_refcount_leak, cgroup);
if (!ASSERT_OK_PTR(link, "bpf_program__attach_cgroup"))
goto out;
skel->links.cgroup_storage_refcount_leak = link;

server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (!ASSERT_GE(server_fd, 0, "start_server"))
goto out;

client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
goto out;

map = skel->maps.cgrp_strg;
err = bpf_map__lookup_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags);
if (!ASSERT_OK(err, "bpf_map__lookup_elem"))
goto out;

ASSERT_EQ(value.value, 2, "refcount");

err = bpf_map__update_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags);
if (!ASSERT_OK(err, "bpf_map__update_elem"))
goto out;

err = bpf_link__detach(skel->links.cgroup_storage_refcount_leak);
if (!ASSERT_OK(err, "bpf_link__detach"))
goto out;

link = bpf_program__attach(skel->progs.check_cgroup_storage_refcount);
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
goto out;
skel->links.check_cgroup_storage_refcount = link;

close(client_fd);
client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
goto out;

err = bpf_map__lookup_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags);
if (!ASSERT_OK(err, "bpf_map__lookup_elem"))
goto out;

ASSERT_EQ(value.value, 1, "refcount");
out:
close(cgroup);
refcounted_kptr__destroy(skel);
if (client_fd >= 0)
close(client_fd);
if (server_fd >= 0)
close(server_fd);
}

static void test_cgroup_storage_refcount_leak(void)
{
test_cgrp_storage_refcount_leak(0);
}

static void test_cgroup_storage_lock_refcount_leak(void)
{
test_cgrp_storage_refcount_leak(BPF_F_LOCK);
}

void test_kptr_refcount_leak(void)
{
if (test__start_subtest("percpu_hash_refcount_leak"))
test_percpu_hash_refcount_leak();
if (test__start_subtest("hash_lock_refcount_leak"))
test_hash_lock_refcount_leak();
if (test__start_subtest("cgroup_storage_refcount_leak"))
test_cgroup_storage_refcount_leak();
if (test__start_subtest("cgroup_storage_lock_refcount_leak"))
test_cgroup_storage_lock_refcount_leak();
}
Loading
Loading