return drm_vma_node_offset_addr(&obj->vma_node);
}
-static bool is_contiguous(struct omap_gem_object *omap_obj)
+static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
* Eviction
*/
-static void evict_entry(struct drm_gem_object *obj,
+static void omap_gem_evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
}
/* Evict a buffer from usergart, if it is mapped there */
-static void evict(struct drm_gem_object *obj)
+static void omap_gem_evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
&priv->usergart[fmt].entry[i];
if (entry->obj == obj)
- evict_entry(obj, fmt, entry);
+ omap_gem_evict_entry(obj, fmt, entry);
}
}
}
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
-static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+static int __omap_gem_get_pages(struct drm_gem_object *obj,
+ struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
*/
/* Normal handling for the case of faulting in non-tiled buffers */
-static vm_fault_t fault_1d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
omap_gem_cpu_sync_page(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
- BUG_ON(!is_contiguous(omap_obj));
+ BUG_ON(!omap_gem_is_contiguous(omap_obj));
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
}
}
/* Special handling for the case of faulting in 2d tiled buffers */
-static vm_fault_t fault_2d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
- evict_entry(entry->obj, fmt, entry);
+ omap_gem_evict_entry(entry->obj, fmt, entry);
entry->obj = obj;
entry->obj_pgoff = base_pgoff;
mutex_lock(&dev->struct_mutex);
/* if a shmem backed object, make sure we have pages attached now */
- err = get_pages(obj, &pages);
+ err = __omap_gem_get_pages(obj, &pages);
if (err) {
ret = vmf_error(err);
goto fail;
*/
if (omap_obj->flags & OMAP_BO_TILED)
- ret = fault_2d(obj, vma, vmf);
+ ret = omap_gem_fault_2d(obj, vma, vmf);
else
- ret = fault_1d(obj, vma, vmf);
+ ret = omap_gem_fault_1d(obj, vma, vmf);
fail:
/* if we aren't mapped yet, we don't need to do anything */
if (omap_obj->block) {
struct page **pages;
- ret = get_pages(obj, &pages);
+
+ ret = __omap_gem_get_pages(obj, &pages);
if (ret)
goto fail;
ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
* the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
* unmapped from the CPU.
*/
-static inline bool is_cached_coherent(struct drm_gem_object *obj)
+static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
if (omap_obj->dma_addrs[pgoff]) {
struct page **pages = omap_obj->pages;
bool dirty = false;
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
for (i = 0; i < npages; i++) {
mutex_lock(&obj->dev->struct_mutex);
- if (!is_contiguous(omap_obj) && priv->has_dmm) {
+ if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) {
struct page **pages;
u32 npages = obj->size >> PAGE_SHIFT;
BUG_ON(omap_obj->block);
- ret = get_pages(obj, &pages);
+ ret = __omap_gem_get_pages(obj, &pages);
if (ret)
goto fail;
omap_obj->dma_addr_cnt++;
*dma_addr = omap_obj->dma_addr;
- } else if (is_contiguous(omap_obj)) {
+ } else if (omap_gem_is_contiguous(omap_obj)) {
*dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = get_pages(obj, pages);
+ ret = __omap_gem_get_pages(obj, pages);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
if (!omap_obj->vaddr) {
struct page **pages;
- int ret = get_pages(obj, &pages);
+ int ret;
+
+ ret = __omap_gem_get_pages(obj, &pages);
if (ret)
return ERR_PTR(ret);
omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- evict(obj);
+ omap_gem_evict(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));