*/
int page_referenced(struct page *, int is_locked);
int try_to_unmap(struct page *, int ignore_refs);
+void remove_from_swap(struct page *page);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
struct backing_dev_info;
extern spinlock_t swap_lock;
+extern int remove_vma_swap(struct vm_area_struct *vma, struct page *page);
/* linux/mm/thrash.c */
extern struct mm_struct * swap_token_mm;
return anon_vma;
}
+#ifdef CONFIG_MIGRATION
+/*
+ * Remove an anonymous page from swap replacing the swap pte's
+ * through real pte's pointing to valid pages and then releasing
+ * the page from the swap cache.
+ *
+ * Must hold page lock on page.
+ */
+void remove_from_swap(struct page *page)
+{
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+
+ if (!PageAnon(page) || !PageSwapCache(page))
+ return;
+
+ anon_vma = page_lock_anon_vma(page);
+ if (!anon_vma)
+ return;
+
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
+ remove_vma_swap(vma, page);
+
+ spin_unlock(&anon_vma->lock);
+
+ delete_from_swap_cache(page);
+}
+#endif
+
/*
* At what user virtual address is page expected in vma?
*/
return 0;
}
+#ifdef CONFIG_MIGRATION
+int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
+{
+ swp_entry_t entry = { .val = page_private(page) };
+
+ return unuse_vma(vma, entry, page);
+}
+#endif
+
/*
* Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
migrate_page_copy(newpage, page);
+ /*
+ * Remove auxiliary swap entries and replace
+ * them with real ptes.
+ *
+ * Note that a real pte entry will allow processes that are not
+ * waiting on the page lock to use the new page via the page tables
+ * before the new page is unlocked.
+ */
+ remove_from_swap(newpage);
return 0;
}