sp = (unsigned long) regs[PERF_REG_ARM_SP];
- map = maps__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
sp = (unsigned long) regs[PERF_REG_ARM64_SP];
- map = maps__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
sp = (unsigned long) regs[PERF_REG_POWERPC_R1];
- map = maps__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
sp = (unsigned long) regs[PERF_REG_X86_SP];
- map = maps__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
fprintf(fp, "%s\n", thread__comm_str(thread));
- maps__fprintf_task(thread->mg, comm_indent, fp);
+ maps__fprintf_task(thread->maps, comm_indent, fp);
if (!list_empty(&task->children)) {
list_for_each_entry(child, &task->children, list)
len = al.map->end - addr;
/* Read the object code using perf */
- ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
+ ret_len = dso__data_read_offset(al.map->dso, thread->maps->machine,
al.addr, buf1, len);
if (ret_len != len) {
pr_debug("dso__data_read_offset failed\n");
TEST_ASSERT_VAL("failed to create threads",
leader && t1 && t2 && t3 && other);
- mg = leader->mg;
+ mg = leader->maps;
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 4);
/* test the map groups pointer is shared */
- TEST_ASSERT_VAL("map groups don't match", mg == t1->mg);
- TEST_ASSERT_VAL("map groups don't match", mg == t2->mg);
- TEST_ASSERT_VAL("map groups don't match", mg == t3->mg);
+ TEST_ASSERT_VAL("map groups don't match", mg == t1->maps);
+ TEST_ASSERT_VAL("map groups don't match", mg == t2->maps);
+ TEST_ASSERT_VAL("map groups don't match", mg == t3->maps);
/*
* Verify the other leader was created by previous call.
machine__remove_thread(machine, other);
machine__remove_thread(machine, other_leader);
- other_mg = other->mg;
+ other_mg = other->maps;
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&other_mg->refcnt), 2);
- TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
+ TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->maps);
/* release thread group */
thread__put(leader);
}
if (h->ms.map == NULL && verbose > 1) {
- maps__fprintf(h->thread->mg, fp);
+ maps__fprintf(h->thread->maps, fp);
fprintf(fp, "%.10s end\n", graph_dotted_line);
}
}
*/
al.sym = node->ms.sym;
al.map = node->ms.map;
- al.mg = thread->mg;
+ al.mg = thread->maps;
al.addr = node->ip;
if (al.map && !al.sym)
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
- struct maps *mg = thread->mg;
+ struct maps *mg = thread->maps;
struct machine *machine = mg->machine;
bool load_map = false;
struct addr_location *al)
{
struct map *map = thread__find_map(thread, cpumode, addr, al);
- struct machine *machine = thread->mg->machine;
+ struct machine *machine = thread->maps->machine;
u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
if (map || addr_cpumode == cpumode)
if (!leader)
goto out_err;
- if (!leader->mg)
- leader->mg = maps__new(machine);
+ if (!leader->maps)
+ leader->maps = maps__new(machine);
- if (!leader->mg)
+ if (!leader->maps)
goto out_err;
- if (th->mg == leader->mg)
+ if (th->maps == leader->maps)
return;
- if (th->mg) {
+ if (th->maps) {
/*
* Maps are created from MMAP events which provide the pid and
* tid. Consequently there never should be any maps on a thread
* with an unknown pid. Just print an error if there are.
*/
- if (!maps__empty(th->mg))
+ if (!maps__empty(th->maps))
pr_err("Discarding thread maps for %d:%d\n",
th->pid_, th->tid);
- maps__put(th->mg);
+ maps__put(th->maps);
}
- th->mg = maps__get(leader->mg);
+ th->maps = maps__get(leader->maps);
out_put:
thread__put(leader);
return;
*/
int maps__clone(struct thread *thread, struct maps *parent)
{
- struct maps *mg = thread->mg;
+ struct maps *mg = thread->maps;
int err = -ENOMEM;
struct map *map;
if (err)
return err;
- if (thread->mg && thread->mg->machine) {
- struct machine *machine = thread->mg->machine;
+ if (thread->maps && thread->maps->machine) {
+ struct machine *machine = thread->maps->machine;
const char *arch = perf_env__arch(machine->env);
ts->kernel_start = machine__kernel_start(machine);
pid_t pid = thread->pid_;
if (pid == thread->tid || pid == -1) {
- thread->mg = maps__new(machine);
+ thread->maps = maps__new(machine);
} else {
struct thread *leader = __machine__findnew_thread(machine, pid, pid);
if (leader) {
- thread->mg = maps__get(leader->mg);
+ thread->maps = maps__get(leader->maps);
thread__put(leader);
}
}
- return thread->mg ? 0 : -1;
+ return thread->maps ? 0 : -1;
}
struct thread *thread__new(pid_t pid, pid_t tid)
thread_stack__free(thread);
- if (thread->mg) {
- maps__put(thread->mg);
- thread->mg = NULL;
+ if (thread->maps) {
+ maps__put(thread->maps);
+ thread->maps = NULL;
}
down_write(&thread->namespaces_lock);
list_for_each_entry_safe(namespaces, tmp_namespaces,
list_add(&new->list, &thread->comm_list);
if (exec)
- unwind__flush_access(thread->mg);
+ unwind__flush_access(thread->maps);
}
thread->comm_set = true;
size_t thread__fprintf(struct thread *thread, FILE *fp)
{
return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
- maps__fprintf(thread->mg, fp);
+ maps__fprintf(thread->maps, fp);
}
int thread__insert_map(struct thread *thread, struct map *map)
{
int ret;
- ret = unwind__prepare_access(thread->mg, map, NULL);
+ ret = unwind__prepare_access(thread->maps, map, NULL);
if (ret)
return ret;
- maps__fixup_overlappings(thread->mg, map, stderr);
- maps__insert(thread->mg, map);
+ maps__fixup_overlappings(thread->maps, map, stderr);
+ maps__insert(thread->maps, map);
return 0;
}
{
bool initialized = false;
int err = 0;
- struct maps *maps = thread->mg;
+ struct maps *maps = thread->maps;
struct map *map;
down_read(&maps->lock);
maps__for_each_entry(maps, map) {
- err = unwind__prepare_access(thread->mg, map, &initialized);
+ err = unwind__prepare_access(thread->maps, map, &initialized);
if (err || initialized)
break;
}
if (thread->pid_ == parent->pid_)
return thread__prepare_access(thread);
- if (thread->mg == parent->mg) {
+ if (thread->maps == parent->maps) {
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
thread->pid_, thread->tid, parent->pid_, parent->tid);
return 0;
}
/* But this one is new process, copy maps. */
- return do_maps_clone ? maps__clone(thread, parent->mg) : 0;
+ return do_maps_clone ? maps__clone(thread, parent->maps) : 0;
}
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
struct rb_node rb_node;
struct list_head node;
};
- struct maps *mg;
+ struct maps *maps;
pid_t pid_; /* Not all tools update this */
pid_t tid;
pid_t ppid;
struct unwind_info *ui, ui_buf = {
.sample = data,
.thread = thread,
- .machine = thread->mg->machine,
+ .machine = thread->maps->machine,
.cb = cb,
.arg = arg,
.max_stack = max_stack,
*/
if (max_stack - 1 > 0) {
WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
- addr_space = ui->thread->mg->addr_space;
+ addr_space = ui->thread->maps->addr_space;
if (addr_space == NULL)
return -1;
struct unwind_info ui = {
.sample = data,
.thread = thread,
- .machine = thread->mg->machine,
+ .machine = thread->maps->machine,
};
if (!data->user_regs.regs)
struct thread *thread,
struct perf_sample *data, int max_stack)
{
- if (thread->mg->unwind_libunwind_ops)
- return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
+ if (thread->maps->unwind_libunwind_ops)
+ return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
return 0;
}
enum dso_type dso_type = DSO__TYPE_UNKNOWN;
struct map *map;
- maps__for_each_entry(thread->mg, map) {
+ maps__for_each_entry(thread->maps, map) {
struct dso *dso = map->dso;
if (!dso || dso->long_name[0] != '/')
continue;