#define queue_max_phys_segments(rq) queue_max_segments(rq)
#define queue_max_hw_segments(rq) queue_max_segments(rq)
-#define ll_kmap_atomic(a, b) kmap_atomic(a)
-#define ll_kunmap_atomic(a, b) kunmap_atomic(a)
-
#define ll_d_hlist_node hlist_node
#define ll_d_hlist_empty(list) hlist_empty(list)
src_page = (rw == WRITE) ? pages[i] : vmpage;
dst_page = (rw == WRITE) ? vmpage : pages[i];
- src = ll_kmap_atomic(src_page, KM_USER0);
- dst = ll_kmap_atomic(dst_page, KM_USER1);
+ src = kmap_atomic(src_page);
+ dst = kmap_atomic(dst_page);
memcpy(dst, src, min(page_size, size));
- ll_kunmap_atomic(dst, KM_USER1);
- ll_kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
/* make sure page will be added to the transfer by
* cl_io_submit()->...->vvp_page_prep_write(). */
* purposes here we can treat it like i_size.
*/
if (attr->cat_kms <= offset) {
- char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);
+ char *kaddr = kmap_atomic(cp->cpg_page);
memset(kaddr, 0, cl_page_size(obj));
- ll_kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
} else if (cp->cpg_defer_uptodate)
cp->cpg_ra_used = 1;
else