#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
-#include "msm_gem.h"
-#include "msm_fence.h"
static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
return msm_framebuffer_prepare(new_state->fb, kms->aspace);
}
-static void msm_atomic_commit_tail(struct drm_atomic_state *state)
+void msm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* NOTE: _wait_for_vblanks() only waits for vblank on
- * enabled CRTCs. So we end up faulting when disabling
- * due to (potentially) unref'ing the outgoing fb's
- * before the vblank when the disable has latched.
- *
- * But if it did wait on disabled (or newly disabled)
- * CRTCs, that would be racy (ie. we could have missed
- * the irq. We need some way to poll for pipe shut
- * down. Or just live with occasionally hitting the
- * timeout in the CRTC disable path (which really should
- * not be critical path)
- */
-
msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
drm_atomic_helper_cleanup_planes(dev, state);
}
-
-/* The (potentially) asynchronous part of the commit. At this point
- * nothing can fail short of armageddon.
- */
-static void commit_tail(struct drm_atomic_state *state)
-{
- drm_atomic_helper_wait_for_fences(state->dev, state, false);
-
- drm_atomic_helper_wait_for_dependencies(state);
-
- msm_atomic_commit_tail(state);
-
- drm_atomic_helper_commit_cleanup_done(state);
-
- drm_atomic_state_put(state);
-}
-
-static void commit_work(struct work_struct *work)
-{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
- commit_tail(state);
-}
-
-/**
- * drm_atomic_helper_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int msm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool nonblock)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- int i, ret;
-
- /*
- * Note that plane->atomic_async_check() should fail if we need
- * to re-assign hwpipe or anything that touches global atomic
- * state, so we'll never go down the async update path in those
- * cases.
- */
- if (state->async_update) {
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- drm_atomic_helper_async_commit(dev, state);
- drm_atomic_helper_cleanup_planes(dev, state);
- return 0;
- }
-
- ret = drm_atomic_helper_setup_commit(state, nonblock);
- if (ret)
- return ret;
-
- INIT_WORK(&state->commit_work, commit_work);
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- if (!nonblock) {
- ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret)
- goto error;
- }
-
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- *
- * swap driver private state while still holding state_lock
- */
- BUG_ON(drm_atomic_helper_swap_state(state, true) < 0);
-
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one conditions: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_state_get(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
- commit_tail(state);
-
- return 0;
-
-error:
- drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
-}