*/
#include "drmP.h"
+#include "ttm/ttm_page_alloc.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
+ .ttm_tt_populate = &ttm_pool_populate,
+ .ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
+ .ttm_tt_populate = &ttm_pool_populate,
+ .ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags,
unsigned long page,
pgprot_t prot)
{
- struct page *d = ttm_tt_get_page(ttm, page);
+ struct page *d = ttm->pages[page];
void *dst;
if (!d)
unsigned long page,
pgprot_t prot)
{
- struct page *s = ttm_tt_get_page(ttm, page);
+ struct page *s = ttm->pages[page];
void *src;
if (!s)
if (old_iomap == NULL && ttm == NULL)
goto out2;
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ goto out1;
+ }
+
add = 0;
dir = 1;
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
- struct page *d;
- int i;
+ int ret;
BUG_ON(!ttm);
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+ }
+
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
*/
map->bo_kmap_type = ttm_bo_map_kmap;
- map->page = ttm_tt_get_page(ttm, start_page);
+ map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
- /*
- * Populate the part we're mapping;
- */
- for (i = start_page; i < start_page + num_pages; ++i) {
- d = ttm_tt_get_page(ttm, i);
- if (!d)
- return -ENOMEM;
- }
-
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+ /* Allocate all page at once, most common usage */
+ if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ retval = VM_FAULT_OOM;
+ goto out_io_unlock;
+ }
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
-
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
- page = ttm_tt_get_page(ttm, page_offset);
+ page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
_manager = NULL;
}
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned i;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags,
+ ttm->caching_state, 1,
+ &ttm->dma_address[i]);
+ if (ret != 0) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_populate);
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+ unsigned i;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i]) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ ttm_put_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state,
+ ttm->dma_address);
+ }
+ }
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL(ttm_pool_unpopulate);
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
#include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h"
-static int ttm_tt_swapin(struct ttm_tt *ttm);
-
/**
* Allocates storage for pointers to the pages that back the ttm.
*/
ttm->dma_address = NULL;
}
-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
-{
- struct page *p;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- int ret;
-
- if (NULL == (p = ttm->pages[index])) {
-
- ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1,
- &ttm->dma_address[index]);
- if (ret != 0)
- return NULL;
-
- ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
- if (unlikely(ret != 0))
- goto out_err;
-
- ttm->pages[index] = p;
- }
- return p;
-out_err:
- ttm_put_pages(&p, 1, ttm->page_flags,
- ttm->caching_state, &ttm->dma_address[index]);
- return NULL;
-}
-
-struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
-{
- int ret;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return NULL;
- }
- return __ttm_tt_get_page(ttm, index);
-}
-
-int ttm_tt_populate(struct ttm_tt *ttm)
-{
- struct page *page;
- unsigned long i;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return ret;
- }
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = __ttm_tt_get_page(ttm, i);
- if (!page)
- return -ENOMEM;
- }
- ttm->state = tt_unbound;
- return 0;
-}
-EXPORT_SYMBOL(ttm_tt_populate);
-
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
}
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
-{
- unsigned i;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i]) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags,
- ttm->caching_state, &ttm->dma_address[i]);
- }
- }
- ttm->state = tt_unpopulated;
-}
-
void ttm_tt_destroy(struct ttm_tt *ttm)
{
if (unlikely(ttm == NULL))
}
if (likely(ttm->pages != NULL)) {
- ttm_tt_free_alloced_pages(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm_tt_free_page_directory(ttm);
}
if (ttm->state == tt_bound)
return 0;
- ret = ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
}
EXPORT_SYMBOL(ttm_tt_bind);
-static int ttm_tt_swapin(struct ttm_tt *ttm)
+int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
ret = PTR_ERR(from_page);
goto out_err;
}
- to_page = __ttm_tt_get_page(ttm, i);
+ to_page = ttm->pages[i];
if (unlikely(to_page == NULL))
goto out_err;
return 0;
out_err:
- ttm_tt_free_alloced_pages(ttm);
return ret;
}
page_cache_release(to_page);
}
- ttm_tt_free_alloced_pages(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
+ .ttm_tt_populate = &ttm_pool_populate,
+ .ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
uint32_t page_flags,
struct page *dummy_read_page);
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
+
/**
* struct ttm_bo_driver member invalidate_caches
*
*/
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
-/**
- * ttm_tt_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-extern int ttm_tt_populate(struct ttm_tt *ttm);
-
/**
* ttm_ttm_destroy:
*
extern void ttm_tt_unbind(struct ttm_tt *ttm);
/**
- * ttm_ttm_destroy:
+ * ttm_tt_swapin:
*
* @ttm: The struct ttm_tt.
- * @index: Index of the desired page.
*
- * Return a pointer to the struct page backing @ttm at page
- * index @index. If the page is unpopulated, one will be allocated to
- * populate that index.
- *
- * Returns:
- * NULL on OOM.
+ * Swap in a previously swap out ttm_tt.
*/
-extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* ttm_tt_cache_flush:
*/
void ttm_page_alloc_fini(void);
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
+/**
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
+ *
+ * Free all pages of @ttm
+ */
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
/**
* Output the state of pools to debugfs file
*/