return drained;
}
+static int ptrace_bts_allocate_buffer(struct task_struct *child, size_t size)
+{
+ child->bts_buffer = alloc_locked_buffer(size);
+ if (!child->bts_buffer)
+ return -ENOMEM;
+
+ child->bts_size = size;
+
+ return 0;
+}
+
+static void ptrace_bts_free_buffer(struct task_struct *child)
+{
+ free_locked_buffer(child->bts_buffer, child->bts_size);
+ child->bts_buffer = NULL;
+ child->bts_size = 0;
+}
+
static int ptrace_bts_config(struct task_struct *child,
long cfg_size,
const struct ptrace_bts_config __user *ucfg)
if ((cfg.flags & PTRACE_BTS_O_ALLOC) &&
(cfg.size != child->bts_size)) {
- kfree(child->bts_buffer);
+ int error;
- child->bts_size = cfg.size;
- child->bts_buffer = kzalloc(cfg.size, GFP_KERNEL);
- if (!child->bts_buffer) {
- child->bts_size = 0;
- return -ENOMEM;
- }
+ ptrace_bts_free_buffer(child);
+
+ error = ptrace_bts_allocate_buffer(child, cfg.size);
+ if (error < 0)
+ return error;
}
if (cfg.flags & PTRACE_BTS_O_TRACE)
if (IS_ERR(child->bts)) {
int error = PTR_ERR(child->bts);
- kfree(child->bts_buffer);
+ ptrace_bts_free_buffer(child);
child->bts = NULL;
- child->bts_buffer = NULL;
- child->bts_size = 0;
return error;
}
ds_release_bts(child->bts);
child->bts = NULL;
+ /* We cannot update total_vm and locked_vm since
+ child's mm is already gone. But we can reclaim the
+ memory. */
kfree(child->bts_buffer);
child->bts_buffer = NULL;
child->bts_size = 0;
static void ptrace_bts_detach(struct task_struct *child)
{
- ptrace_bts_untrace(child);
+ if (unlikely(child->bts)) {
+ ds_release_bts(child->bts);
+ child->bts = NULL;
+
+ ptrace_bts_free_buffer(child);
+ }
}
#else
static inline void ptrace_bts_fork(struct task_struct *tsk) {}
spin_unlock(&shmlock_user_lock);
free_uid(user);
}
+
+void *alloc_locked_buffer(size_t size)
+{
+ unsigned long rlim, vm, pgsz;
+ void *buffer = NULL;
+
+ pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ down_write(¤t->mm->mmap_sem);
+
+ rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+ vm = current->mm->total_vm + pgsz;
+ if (rlim < vm)
+ goto out;
+
+ rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+ vm = current->mm->locked_vm + pgsz;
+ if (rlim < vm)
+ goto out;
+
+ buffer = kzalloc(size, GFP_KERNEL);
+ if (!buffer)
+ goto out;
+
+ current->mm->total_vm += pgsz;
+ current->mm->locked_vm += pgsz;
+
+ out:
+ up_write(¤t->mm->mmap_sem);
+ return buffer;
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+ unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ down_write(¤t->mm->mmap_sem);
+
+ current->mm->total_vm -= pgsz;
+ current->mm->locked_vm -= pgsz;
+
+ up_write(¤t->mm->mmap_sem);
+
+ kfree(buffer);
+}