return ret;
}
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ int i;
+
+ DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid dequeue node.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
+
+ /* put gem buffer */
+ for_each_ipp_planar(i) {
+ unsigned long handle = m_node->buf_info.handles[i];
+ if (handle)
+ exynos_drm_gem_put_dma_addr(drm_dev, handle,
+ c_node->filp);
+ }
+
+ list_del(&m_node->list);
+ kfree(m_node);
+
+ return 0;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_get_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info *buf_info;
+ int i;
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node)
+ return ERR_PTR(-ENOMEM);
+
+ buf_info = &m_node->buf_info;
+
+ /* operations, buffer id */
+ m_node->ops_id = qbuf->ops_id;
+ m_node->prop_id = qbuf->prop_id;
+ m_node->buf_id = qbuf->buf_id;
+ INIT_LIST_HEAD(&m_node->list);
+
+ DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
+
+ for_each_ipp_planar(i) {
+ DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
+
+ /* get dma address by handle */
+ if (qbuf->handle[i]) {
+ dma_addr_t *addr;
+
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ qbuf->handle[i], file);
+ if (IS_ERR(addr)) {
+ DRM_ERROR("failed to get addr.\n");
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ return ERR_PTR(-EFAULT);
+ }
+
+ buf_info->handles[i] = qbuf->handle[i];
+ buf_info->base[i] = *addr;
+ DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
+ buf_info->base[i], buf_info->handles[i]);
+ }
+ }
+
+ mutex_lock(&c_node->mem_lock);
+ list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+ mutex_unlock(&c_node->mem_lock);
+
+ return m_node;
+}
+
+static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node, int ops)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct list_head *head = &c_node->mem_list[ops];
+
+ mutex_lock(&c_node->mem_lock);
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ int ret;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ }
+
+ mutex_unlock(&c_node->mem_lock);
+}
+
static void ipp_clean_cmd_node(struct ipp_context *ctx,
struct drm_exynos_ipp_cmd_node *c_node)
{
return ret;
}
-static int ipp_put_mem_node(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_mem_node *m_node)
-{
- int i;
-
- DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
-
- if (!m_node) {
- DRM_ERROR("invalid dequeue node.\n");
- return -EFAULT;
- }
-
- DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
-
- /* put gem buffer */
- for_each_ipp_planar(i) {
- unsigned long handle = m_node->buf_info.handles[i];
- if (handle)
- exynos_drm_gem_put_dma_addr(drm_dev, handle,
- c_node->filp);
- }
-
- list_del(&m_node->list);
- kfree(m_node);
-
- return 0;
-}
-
-static struct drm_exynos_ipp_mem_node
- *ipp_get_mem_node(struct drm_device *drm_dev,
- struct drm_file *file,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_buf_info *buf_info;
- int i;
-
- m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
- if (!m_node)
- return ERR_PTR(-ENOMEM);
-
- buf_info = &m_node->buf_info;
-
- /* operations, buffer id */
- m_node->ops_id = qbuf->ops_id;
- m_node->prop_id = qbuf->prop_id;
- m_node->buf_id = qbuf->buf_id;
- INIT_LIST_HEAD(&m_node->list);
-
- DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
- DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
-
- for_each_ipp_planar(i) {
- DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
-
- /* get dma address by handle */
- if (qbuf->handle[i]) {
- dma_addr_t *addr;
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev,
- qbuf->handle[i], file);
- if (IS_ERR(addr)) {
- DRM_ERROR("failed to get addr.\n");
- ipp_put_mem_node(drm_dev, c_node, m_node);
- return ERR_PTR(-EFAULT);
- }
-
- buf_info->handles[i] = qbuf->handle[i];
- buf_info->base[i] = *addr;
- DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
- buf_info->base[i], buf_info->handles[i]);
- }
- }
-
- mutex_lock(&c_node->mem_lock);
- list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
- mutex_unlock(&c_node->mem_lock);
-
- return m_node;
-}
-
static void ipp_free_event(struct drm_pending_event *event)
{
kfree(event);
struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_cmd_node *c_node)
{
- struct drm_exynos_ipp_mem_node *m_node, *tm_node;
struct drm_exynos_ipp_property *property = &c_node->property;
- struct list_head *head;
int ret = 0, i;
DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
/* put event */
ipp_put_event(c_node, NULL);
- mutex_lock(&c_node->mem_lock);
-
/* check command */
switch (property->cmd) {
case IPP_CMD_M2M:
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- list_for_each_entry_safe(m_node, tm_node,
- head, list) {
- ret = ipp_put_mem_node(drm_dev, c_node,
- m_node);
- if (ret) {
- DRM_ERROR("failed to put m_node.\n");
- goto err_clear;
- }
- }
- }
+ for_each_ipp_ops(i)
+ ipp_clean_mem_nodes(drm_dev, c_node, i);
break;
case IPP_CMD_WB:
- /* destination memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
-
- list_for_each_entry_safe(m_node, tm_node, head, list) {
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to put m_node.\n");
- goto err_clear;
- }
- }
+ ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
break;
case IPP_CMD_OUTPUT:
- /* source memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
-
- list_for_each_entry_safe(m_node, tm_node, head, list) {
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to put m_node.\n");
- goto err_clear;
- }
- }
+ ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
break;
default:
DRM_ERROR("invalid operations.\n");
}
err_clear:
- mutex_unlock(&c_node->mem_lock);
-
/* stop operations */
if (ippdrv->stop)
ippdrv->stop(ippdrv->dev, property->cmd);