We have pointers to struct map instances in several places, like in the
hist_entry instances, so we need a way to know when we can destroy them,
otherwise we may either keep leaking them or end up referencing deleted
instances.
Start fixing it by reference counting them.
This patch puts the reference count for struct map in place, replacing
direct map__delete() calls with map__put() ones and then grabbing a
reference count when adding it to the maps struct where maps for a
struct thread are kept.
Next we'll grab reference counts when setting pointers to struct map
instances, in places like in the hist_entry code.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-wi19xczk0t2a41r1i2chuio5@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
kmap->ref_reloc_sym = NULL;
}
- map__delete(machine->vmlinux_maps[type]);
machine->vmlinux_maps[type] = NULL;
}
}
thread__insert_map(thread, map);
thread__put(thread);
+ map__put(map);
return 0;
out_problem_map:
thread__insert_map(thread, map);
thread__put(thread);
+ map__put(map);
return 0;
out_problem_map:
map->groups = NULL;
map->referenced = false;
map->erange_warned = false;
+ atomic_set(&map->refcnt, 1);
}
struct map *map__new(struct machine *machine, u64 start, u64 len,
free(map);
}
+void map__put(struct map *map)
+{
+ if (map && atomic_dec_and_test(&map->refcnt))
+ map__delete(map);
+}
+
void map__fixup_start(struct map *map)
{
struct rb_root *symbols = &map->dso->symbols[map->type];
next = rb_next(&pos->rb_node);
rb_erase_init(&pos->rb_node, root);
- map__delete(pos);
+ map__put(pos);
}
}
list_for_each_entry_safe(pos, n, &maps->removed_maps, node) {
list_del_init(&pos->node);
- map__delete(pos);
+ map__put(pos);
}
}
if (before == NULL) {
err = -ENOMEM;
- goto move_map;
+ goto put_map;
}
before->end = map->start;
if (after == NULL) {
err = -ENOMEM;
- goto move_map;
+ goto put_map;
}
after->start = map->end;
if (verbose >= 2)
map__fprintf(after, fp);
}
-move_map:
+put_map:
/*
* If we have references, just move them to a separate list.
*/
if (pos->referenced)
list_add_tail(&pos->node, &maps->removed_maps);
else
- map__delete(pos);
+ map__put(pos);
if (err)
goto out;
rb_link_node(&map->rb_node, parent, p);
rb_insert_color(&map->rb_node, &maps->entries);
+ map__get(map);
}
void maps__insert(struct maps *maps, struct map *map)
static void __maps__remove(struct maps *maps, struct map *map)
{
rb_erase_init(&map->rb_node, &maps->entries);
+ map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)
struct dso *dso;
struct map_groups *groups;
+ atomic_t refcnt;
};
struct kmap {
struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
void map__delete(struct map *map);
struct map *map__clone(struct map *map);
+
+static inline struct map *map__get(struct map *map)
+{
+ if (map)
+ atomic_inc(&map->refcnt);
+ return map;
+}
+
+void map__put(struct map *map);
+
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
{
if (map && user) {
/* Only the user map needs to be released */
- map__delete(map);
+ map__put(map);
}
}
out:
if (map && !is_kprobe) {
- map__delete(map);
+ map__put(map);
}
return ret;
dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
end:
if (user) {
- map__delete(map);
+ map__put(map);
}
exit_symbol_maps();
map->unmap_ip = map__unmap_ip;
/* Ensure maps are correctly ordered */
if (kmaps) {
+ map__get(map);
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
+ map__put(map);
}
}
map->pgoff = new_map->pgoff;
map->map_ip = new_map->map_ip;
map->unmap_ip = new_map->unmap_ip;
- map__delete(new_map);
/* Ensure maps are correctly ordered */
+ map__get(map);
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
+ map__put(map);
} else {
map_groups__insert(kmaps, new_map);
}
+
+ map__put(new_map);
}
/*
while (!list_empty(&md.maps)) {
map = list_entry(md.maps.next, struct map, node);
list_del_init(&map->node);
- map__delete(map);
+ map__put(map);
}
close(fd);
return -EINVAL;