struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo = NULL;
int ret = 0;
+ u8 *src, *dst;
unsigned int i, row, col;
uint32_t colour_set[16];
uint32_t *next_space = &colour_set[0];
dev_err(&dev->pdev->dev, "failed to reserve user bo\n");
goto out1;
}
- if (!gbo->kmap.virtual) {
- ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, &gbo->kmap);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to kmap user buffer updates\n");
- goto out2;
- }
+ src = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ dev_err(&dev->pdev->dev, "failed to kmap user buffer updates\n");
+ goto out2;
}
memset(&colour_set[0], 0, sizeof(uint32_t)*16);
/* width*height*4 = 16384 */
for (i = 0; i < 16384; i += 4) {
- this_colour = ioread32(gbo->kmap.virtual + i);
+ this_colour = ioread32(src + i);
/* No transparency */
if (this_colour>>24 != 0xff &&
this_colour>>24 != 0x0) {
}
/* Map up-coming buffer to write colour indices */
- if (!pixels_prev->kmap.virtual) {
- ret = ttm_bo_kmap(&pixels_prev->bo, 0,
- pixels_prev->bo.num_pages,
- &pixels_prev->kmap);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to kmap cursor updates\n");
- goto out3;
- }
+ dst = drm_gem_vram_kmap(pixels_prev, true, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ dev_err(&dev->pdev->dev, "failed to kmap cursor updates\n");
+ goto out3;
}
/* now write colour indices into hardware cursor buffer */
for (row = 0; row < 64; row++) {
memset(&this_row[0], 0, 48);
for (col = 0; col < 64; col++) {
- this_colour = ioread32(gbo->kmap.virtual + 4*(col + 64*row));
+ this_colour = ioread32(src + 4*(col + 64*row));
/* write transparent pixels */
if (this_colour>>24 == 0x0) {
this_row[47 - col/8] |= 0x80>>(col%8);
}
}
}
- memcpy_toio(pixels_prev->kmap.virtual + row*48, &this_row[0], 48);
+ memcpy_toio(dst + row*48, &this_row[0], 48);
}
/* Program gpu address of cursor buffer */
}
ret = 0;
- ttm_bo_kunmap(&pixels_prev->kmap);
+ drm_gem_vram_kunmap(pixels_prev);
out3:
- ttm_bo_kunmap(&gbo->kmap);
+ drm_gem_vram_kunmap(gbo);
out2:
drm_gem_vram_unreserve(gbo);
out1:
int src_offset, dst_offset;
int bpp = mfbdev->mfb.base.format->cpp[0];
int ret = -EBUSY;
+ u8 *dst;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
mfbdev->x2 = mfbdev->y2 = 0;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
- if (!gbo->kmap.virtual) {
- ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, &gbo->kmap);
- if (ret) {
+ dst = drm_gem_vram_kmap(gbo, false, NULL);
+ if (IS_ERR(dst)) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ goto out;
+ } else if (!dst) {
+ dst = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(dst)) {
DRM_ERROR("failed to kmap fb updates\n");
- drm_gem_vram_unreserve(gbo);
- return;
+ goto out;
}
unmap = true;
}
+
for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset =
i * mfbdev->mfb.base.pitches[0] + (x * bpp);
- memcpy_toio(gbo->kmap.virtual + src_offset,
- mfbdev->sysram + dst_offset, (x2 - x + 1) * bpp);
-
+ memcpy_toio(dst + dst_offset, mfbdev->sysram + src_offset,
+ (x2 - x + 1) * bpp);
}
+
if (unmap)
- ttm_bo_kunmap(&gbo->kmap);
+ drm_gem_vram_kunmap(gbo);
+out:
drm_gem_vram_unreserve(gbo);
}
struct drm_gem_vram_object *gbo;
int ret;
s64 gpu_addr;
+ void *base;
/* push the previous fb to system ram */
if (!atomic && fb) {
if (&mdev->mfbdev->mfb == mga_fb) {
/* if pushing console in kmap it */
- ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, &gbo->kmap);
- if (ret)
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
DRM_ERROR("failed to kmap fbcon\n");
-
+ }
}
+
drm_gem_vram_unreserve(gbo);
mga_set_start_address(crtc, (u32)gpu_addr);