* Mempolicy objects are reference counted. A mempolicy will be freed when
* mpol_put() decrements the reference count to zero.
*
- * Copying policy objects:
- * mpol_copy() allocates a new mempolicy and copies the specified mempolicy
+ * Duplicating policy objects:
+ * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
* to the new storage. The reference count of the new object is initialized
- * to 1, representing the caller of mpol_copy().
+ * to 1, representing the caller of mpol_dup().
*/
struct mempolicy {
atomic_t refcnt;
__mpol_put(pol);
}
-extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
-static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
+extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{
if (pol)
- pol = __mpol_copy(pol);
+ pol = __mpol_dup(pol);
return pol;
}
{
}
-static inline struct mempolicy *mpol_copy(struct mempolicy *old)
+static inline struct mempolicy *mpol_dup(struct mempolicy *old)
{
return NULL;
}
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
- cpuset_being_rebound = cs; /* causes mpol_copy() rebind */
+ cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
fudge = 10; /* spare mmarray[] slots */
fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
* rebind the vma mempolicies of each mm in mmarray[] to their
* new cpuset, and release that mm. The mpol_rebind_mm()
* call takes mmap_sem, which we couldn't take while holding
- * tasklist_lock. Forks can happen again now - the mpol_copy()
+ * tasklist_lock. Forks can happen again now - the mpol_dup()
* cpuset_being_rebound check will catch such forks, and rebind
* their vma mempolicies too. Because we still hold the global
* cgroup_mutex, we know that no other rebind effort will
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
- pol = mpol_copy(vma_policy(mpnt));
+ pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
- p->mempolicy = mpol_copy(p->mempolicy);
+ p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
EXPORT_SYMBOL(alloc_pages_current);
/*
- * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
+ * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
* with the mems_allowed returned by cpuset_mems_allowed(). This
* keeps mempolicies cpuset relative after its cpuset moves. See
* further kernel/cpuset.c update_nodemask().
*/
-/* Slow path of a mempolicy copy */
-struct mempolicy *__mpol_copy(struct mempolicy *old)
+/* Slow path of a mempolicy duplicate */
+struct mempolicy *__mpol_dup(struct mempolicy *old)
{
struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
- pol = mpol_copy(vma_policy(vma));
+ pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new);
return PTR_ERR(pol);
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (new_vma) {
*new_vma = *vma;
- pol = mpol_copy(vma_policy(vma));
+ pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new_vma);
return NULL;