* but this is different - made simpler by ksm_thread_mutex being held, but
* interesting for assuming that no other use of the struct page could ever
* put our expected_mapping into page->mapping (or a field of the union which
- * coincides with page->mapping). The RCU calls are not for KSM at all, but
- * to keep the page_count protocol described with page_cache_get_speculative.
+ * coincides with page->mapping).
*
* Note: it is possible that get_ksm_page() will return NULL one moment,
* then page the next, if the page is in between page_freeze_refs() and
* page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct stable_node *stable_node)
+static struct page *get_ksm_page(struct stable_node *stable_node, bool locked)
{
struct page *page;
void *expected_mapping;
page = pfn_to_page(stable_node->kpfn);
expected_mapping = (void *)stable_node +
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
- rcu_read_lock();
if (page->mapping != expected_mapping)
goto stale;
if (!get_page_unless_zero(page))
put_page(page);
goto stale;
}
- rcu_read_unlock();
+ if (locked) {
+ lock_page(page);
+ if (page->mapping != expected_mapping) {
+ unlock_page(page);
+ put_page(page);
+ goto stale;
+ }
+ }
return page;
stale:
- rcu_read_unlock();
remove_node_from_stable_tree(stable_node);
return NULL;
}
struct page *page;
stable_node = rmap_item->head;
- page = get_ksm_page(stable_node);
+ page = get_ksm_page(stable_node, true);
if (!page)
goto out;
- lock_page(page);
hlist_del(&rmap_item->hlist);
unlock_page(page);
put_page(page);
cond_resched();
stable_node = rb_entry(node, struct stable_node, node);
- tree_page = get_ksm_page(stable_node);
+ tree_page = get_ksm_page(stable_node, false);
if (!tree_page)
return NULL;
cond_resched();
stable_node = rb_entry(*new, struct stable_node, node);
- tree_page = get_ksm_page(stable_node);
+ tree_page = get_ksm_page(stable_node, false);
if (!tree_page)
return NULL;