{
int i = 0;
- lp->link_status = 0;
+ lp->link_up = 0;
+ lp->qfull = 0;
lp->max_retry_count = 3;
lp->e_d_tov = 2 * 1000; /* FC-FS default */
lp->r_a_tov = 2 * 2 * 1000;
if (fc_set_mfs(lp, mfs))
return -EINVAL;
- lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
if (!fcoe_link_ok(lp))
- lp->link_status |= FC_LINK_UP;
+ lp->link_up = 1;
/* offload features support */
if (fc->real_dev->features & NETIF_F_SG)
if (rc) {
fcoe_insert_wait_queue(lp, skb);
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- fc_pause(lp);
+ lp->qfull = 1;
}
return 0;
* fcoe_watchdog - fcoe timer callback
* @vp:
*
- * This checks the pending queue length for fcoe and put fcoe to be paused state
+ * This checks the pending queue length for fcoe and set lport qfull
* if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
* fcoe_hostlist.
*
{
struct fc_lport *lp;
struct fcoe_softc *fc;
- int paused = 0;
+ int qfilled = 0;
read_lock(&fcoe_hostlist_lock);
list_for_each_entry(fc, &fcoe_hostlist, list) {
lp = fc->lp;
if (lp) {
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- paused = 1;
+ qfilled = 1;
if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
- if (paused)
- fc_unpause(lp);
+ if (qfilled)
+ lp->qfull = 0;
}
}
}
**/
static int fcoe_check_wait_queue(struct fc_lport *lp)
{
- int rc, unpause = 0;
- int paused = 0;
+ int rc;
struct sk_buff *skb;
struct fcoe_softc *fc;
spin_lock_bh(&fc->fcoe_pending_queue.lock);
/*
- * is this interface paused?
+ * if interface pending queue full then set qfull in lport.
*/
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- paused = 1;
+ lp->qfull = 1;
if (fc->fcoe_pending_queue.qlen) {
while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
spin_unlock_bh(&fc->fcoe_pending_queue.lock);
spin_lock_bh(&fc->fcoe_pending_queue.lock);
}
if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
- unpause = 1;
+ lp->qfull = 0;
}
spin_unlock_bh(&fc->fcoe_pending_queue.lock);
- if ((unpause) && (paused))
- fc_unpause(lp);
return fc->fcoe_pending_queue.qlen;
}
struct net_device *real_dev = ptr;
struct fcoe_softc *fc;
struct fcoe_dev_stats *stats;
- u16 new_status;
+ u32 new_link_up;
u32 mfs;
int rc = NOTIFY_OK;
goto out;
}
- new_status = lp->link_status;
+ new_link_up = lp->link_up;
switch (event) {
case NETDEV_DOWN:
case NETDEV_GOING_DOWN:
- new_status &= ~FC_LINK_UP;
+ new_link_up = 0;
break;
case NETDEV_UP:
case NETDEV_CHANGE:
- new_status &= ~FC_LINK_UP;
- if (!fcoe_link_ok(lp))
- new_status |= FC_LINK_UP;
+ new_link_up = !fcoe_link_ok(lp);
break;
case NETDEV_CHANGEMTU:
mfs = fc->real_dev->mtu -
sizeof(struct fcoe_crc_eof));
if (mfs >= FC_MIN_MAX_FRAME)
fc_set_mfs(lp, mfs);
- new_status &= ~FC_LINK_UP;
- if (!fcoe_link_ok(lp))
- new_status |= FC_LINK_UP;
+ new_link_up = !fcoe_link_ok(lp);
break;
case NETDEV_REGISTER:
break;
default:
FC_DBG("unknown event %ld call", event);
}
- if (lp->link_status != new_status) {
- if ((new_status & FC_LINK_UP) == FC_LINK_UP)
+ if (lp->link_up != new_link_up) {
+ if (new_link_up)
fc_linkup(lp);
else {
stats = lp->dev_stats[smp_processor_id()];
static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
{
/* lock ? */
- return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
+ return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull;
}
/**
lp = shost_priv(sc_cmd->device->host);
if (lp->state != LPORT_ST_READY)
return rc;
- else if (!(lp->link_status & FC_LINK_UP))
+ else if (!lp->link_up)
return rc;
spin_lock_irqsave(lp->host->host_lock, flags);
{
struct fc_lport *lp = shost_priv(shost);
- if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
+ if (lp->link_up)
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
else
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
- if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
- lport->link_status |= FC_LINK_UP;
+ if (!lport->link_up) {
+ lport->link_up = 1;
if (lport->state == LPORT_ST_RESET)
fc_lport_enter_flogi(lport);
FC_DEBUG_LPORT("Link is down for port (%6x)\n",
fc_host_port_id(lport->host));
- if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
- lport->link_status &= ~(FC_LINK_UP);
+ if (lport->link_up) {
+ lport->link_up = 0;
fc_lport_enter_reset(lport);
lport->tt.fcp_cleanup(lport);
}
}
EXPORT_SYMBOL(fc_linkdown);
-/**
- * fc_pause - Pause the flow of frames
- * @lport: The lport to be paused
- */
-void fc_pause(struct fc_lport *lport)
-{
- mutex_lock(&lport->lp_mutex);
- lport->link_status |= FC_PAUSE;
- mutex_unlock(&lport->lp_mutex);
-}
-EXPORT_SYMBOL(fc_pause);
-
-/**
- * fc_unpause - Unpause the flow of frames
- * @lport: The lport to be unpaused
- */
-void fc_unpause(struct fc_lport *lport)
-{
- mutex_lock(&lport->lp_mutex);
- lport->link_status &= ~(FC_PAUSE);
- mutex_unlock(&lport->lp_mutex);
-}
-EXPORT_SYMBOL(fc_unpause);
-
/**
* fc_fabric_logoff - Logout of the fabric
* @lport: fc_lport pointer to logoff the fabric
fc_host_fabric_name(lport->host) = 0;
fc_host_port_id(lport->host) = 0;
- if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
+ if (lport->link_up)
fc_lport_enter_flogi(lport);
}
/*
* FC HBA status
*/
-#define FC_PAUSE (1 << 1)
-#define FC_LINK_UP (1 << 0)
-
enum fc_lport_state {
LPORT_ST_NONE = 0,
LPORT_ST_FLOGI,
/* Operational Information */
struct libfc_function_template tt;
- u16 link_status;
+ u8 link_up;
+ u8 qfull;
enum fc_lport_state state;
unsigned long boot_time;
*/
void fc_linkdown(struct fc_lport *);
-/*
- * Pause and unpause traffic.
- */
-void fc_pause(struct fc_lport *);
-void fc_unpause(struct fc_lport *);
-
/*
* Configure the local port.
*/