if (time < HZ)
return 0;
/* Wait for discovery to finish */
- scsi_flush_work(shost);
+ sas_drain_work(SHOST_TO_SAS_HA(shost));
return 1;
}
int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = ha->lldd_ha;
if (test_bit(IHOST_START_PENDING, &ihost->flags))
return 0;
- /* todo: use sas_flush_discovery once it is upstream */
- scsi_flush_work(shost);
-
- scsi_flush_work(shost);
+ sas_drain_work(ha);
dev_dbg(&ihost->pdev->dev,
"%s: ihost->status = %d, time = %ld\n",
/* ---------- Events ---------- */
+static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work)
+{
+ /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */
+ scsi_queue_work(ha->core.shost, work);
+}
+
+static void sas_chain_event(int event, unsigned long *pending,
+ struct work_struct *work,
+ struct sas_ha_struct *ha)
+{
+ if (!test_and_set_bit(event, pending)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->state_lock, flags);
+ sas_chain_work(ha, work);
+ spin_unlock_irqrestore(&ha->state_lock, flags);
+ }
+}
+
int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
{
struct sas_discovery *disc;
BUG_ON(ev >= DISC_NUM_EVENTS);
- sas_queue_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
+ sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
return 0;
}
*
*/
+#include <linux/export.h>
#include <scsi/scsi_host.h>
#include "sas_internal.h"
#include "sas_dump.h"
+static void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work)
+{
+ if (!test_bit(SAS_HA_REGISTERED, &ha->state))
+ return;
+
+ if (test_bit(SAS_HA_DRAINING, &ha->state))
+ list_add(&work->entry, &ha->defer_q);
+ else
+ scsi_queue_work(ha->core.shost, work);
+}
+
+static void sas_queue_event(int event, unsigned long *pending,
+ struct work_struct *work,
+ struct sas_ha_struct *ha)
+{
+ if (!test_and_set_bit(event, pending)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->state_lock, flags);
+ sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->state_lock, flags);
+ }
+}
+
+int sas_drain_work(struct sas_ha_struct *ha)
+{
+ struct workqueue_struct *wq = ha->core.shost->work_q;
+ struct work_struct *w, *_w;
+ int err;
+
+ err = mutex_lock_interruptible(&ha->drain_mutex);
+ if (err)
+ return err;
+
+ set_bit(SAS_HA_DRAINING, &ha->state);
+ /* flush submitters */
+ spin_lock_irq(&ha->state_lock);
+ spin_unlock_irq(&ha->state_lock);
+
+ drain_workqueue(wq);
+
+ spin_lock_irq(&ha->state_lock);
+ clear_bit(SAS_HA_DRAINING, &ha->state);
+ list_for_each_entry_safe(w, _w, &ha->defer_q, entry) {
+ list_del_init(&w->entry);
+ sas_queue_work(ha, w);
+ }
+ spin_unlock_irq(&ha->state_lock);
+ mutex_unlock(&ha->drain_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sas_drain_work);
+
static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
{
BUG_ON(event >= HA_NUM_EVENTS);
set_bit(SAS_HA_REGISTERED, &sas_ha->state);
spin_lock_init(&sas_ha->state_lock);
+ mutex_init(&sas_ha->drain_mutex);
+ INIT_LIST_HEAD(&sas_ha->defer_q);
error = sas_register_phys(sas_ha);
if (error) {
{
unsigned long flags;
- /* Set the state to unregistered to avoid further
- * events to be queued */
+ /* Set the state to unregistered to avoid further unchained
+ * events to be queued
+ */
spin_lock_irqsave(&sas_ha->state_lock, flags);
clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
spin_unlock_irqrestore(&sas_ha->state_lock, flags);
- scsi_flush_work(sas_ha->core.shost);
+ sas_drain_work(sas_ha);
sas_unregister_ports(sas_ha);
}
#endif
-static inline void sas_queue_event(int event, unsigned long *pending,
- struct work_struct *work,
- struct sas_ha_struct *sas_ha)
-{
- if (!test_and_set_bit(event, pending)) {
- unsigned long flags;
-
- spin_lock_irqsave(&sas_ha->state_lock, flags);
- if (test_bit(SAS_HA_REGISTERED, &sas_ha->state))
- scsi_queue_work(sas_ha->core.shost, work);
- spin_unlock_irqrestore(&sas_ha->state_lock, flags);
- }
-}
-
static inline void sas_fill_in_rphy(struct domain_device *dev,
struct sas_rphy *rphy)
{
if (mvs_prv->scan_finished == 0)
return 0;
- scsi_flush_work(shost);
+ sas_drain_work(sha);
return 1;
}
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+
/* give the phy enabling interrupt event time to come in (1s
* is empirically about all it takes) */
if (time < HZ)
return 0;
/* Wait for discovery to finish */
- scsi_flush_work(shost);
+ sas_drain_work(ha);
return 1;
}
enum sas_ha_state {
SAS_HA_REGISTERED,
+ SAS_HA_DRAINING,
};
struct sas_ha_struct {
struct sas_ha_event ha_events[HA_NUM_EVENTS];
unsigned long pending;
+ struct list_head defer_q; /* work queued while draining */
+ struct mutex drain_mutex;
unsigned long state;
spinlock_t state_lock;
extern void sas_target_destroy(struct scsi_target *);
extern int sas_slave_alloc(struct scsi_device *);
extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+extern int sas_drain_work(struct sas_ha_struct *ha);
extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req);