char *buf);
static int test_calc_pbn_mode(void);
-static void drm_dp_put_port(struct drm_dp_mst_port *port);
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id,
*/
list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
list_del(&port->next);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
/* drop any tx slots msg */
kref_put(kref, drm_dp_free_mst_branch_device);
}
-static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
}
case DP_PEER_DEVICE_MST_BRANCHING:
mstb = port->mstb;
port->mstb = NULL;
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
break;
}
}
kfree(port);
}
-static void drm_dp_put_port(struct drm_dp_mst_port *port)
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
kref_put(&port->kref, drm_dp_destroy_port);
}
-static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_branch *to_find)
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *rmstb;
}
list_for_each_entry(port, &mstb->ports, next) {
if (port->mstb) {
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+ port->mstb, to_find);
if (rmstb)
return rmstb;
}
return NULL;
}
-static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_branch *rmstb = NULL;
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+ mgr->mst_primary, mstb);
mutex_unlock(&mgr->lock);
return rmstb;
}
return NULL;
}
-static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+static struct drm_dp_mst_port *
+drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *rport = NULL;
mutex_lock(&mgr->lock);
list_del(&port->next);
mutex_unlock(&mstb->mgr->lock);
/* drop port list reference */
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
goto out;
}
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
out:
/* put reference to this port */
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
dowork = true;
}
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
drm_dp_send_enum_path_resources(mgr, mstb, port);
if (port->mstb) {
- mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
+ mstb_child = drm_dp_mst_topology_get_mstb_validated(
+ mgr, port->mstb);
if (mstb_child) {
drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_put_mst_branch_device(mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
}
}
}
mutex_unlock(&mgr->lock);
if (mstb) {
drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
}
}
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return -EINVAL;
port_num = port->port_num;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
port->parent,
&port_num);
if (!mstb) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return -EINVAL;
}
}
}
kfree(txmsg);
fail_put:
- drm_dp_put_mst_branch_device(mstb);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_mstb(mstb);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
struct drm_dp_sideband_msg_tx *txmsg;
int len, ret;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return -ENOMEM;
}
ret = 0;
}
kfree(txmsg);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
if (vcpi) {
port = container_of(vcpi, struct drm_dp_mst_port,
vcpi);
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr,
+ port);
if (!port) {
mutex_unlock(&mgr->payload_lock);
return -EINVAL;
cur_slots += req_payload.num_slots;
if (port)
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EINVAL;
}
kfree(txmsg);
fail_put:
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
out_unlock:
mutex_unlock(&mgr->lock);
if (mstb)
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
mgr->down_rep_recv.initial_hdr.lct,
mgr->down_rep_recv.initial_hdr.rad[0],
mgr->down_rep_recv.msg[0]);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
}
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
}
if (mstb)
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
enum drm_connector_status status = connector_status_disconnected;
/* we need to search for the port in the mgr in case its gone */
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
break;
}
out:
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return status;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
{
bool ret = false;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return ret;
ret = port->has_audio;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
struct edid *edid = NULL;
/* we need to search for the port in the mgr in case its gone */
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return NULL;
drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return edid;
}
EXPORT_SYMBOL(drm_dp_mst_get_edid);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (port == NULL)
return -EINVAL;
req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
req_slots, topology_state->avail_slots);
if (req_slots > topology_state->avail_slots) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return -ENOSPC;
}
topology_state->avail_slots -= req_slots;
DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return req_slots;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
{
int ret;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return false;
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return true;
}
}
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
pbn, port->vcpi.num_slots);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return true;
out:
return false;
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
int slots = 0;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return slots;
slots = port->vcpi.num_slots;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return slots;
}
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
*/
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return;
port->vcpi.num_slots = 0;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return;
port->vcpi.pbn = 0;
port->vcpi.aligned_pbn = 0;
port->vcpi.vcpi = 0;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EREMOTEIO;
}
out:
kfree(txmsg);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}