Merge branches 'x86/apic', 'x86/cleanups', 'x86/cpufeature', 'x86/crashdump', 'x86...
[deliverable/linux.git] / drivers / scsi / scsi_lib.c
index e5a9526d20376ab341b54618248dbdd93db3ded3..148d3af92aefad04fa42c48780ba4138e8033e59 100644 (file)
@@ -529,6 +529,14 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
        spin_unlock_irqrestore(shost->host_lock, flags);
 }
 
+static inline int scsi_device_is_busy(struct scsi_device *sdev)
+{
+       if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
+               return 1;
+
+       return 0;
+}
+
 static inline int scsi_target_is_busy(struct scsi_target *starget)
 {
        return ((starget->can_queue > 0 &&
@@ -536,6 +544,15 @@ static inline int scsi_target_is_busy(struct scsi_target *starget)
                 starget->target_blocked);
 }
 
+static inline int scsi_host_is_busy(struct Scsi_Host *shost)
+{
+       if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
+           shost->host_blocked || shost->host_self_blocked)
+               return 1;
+
+       return 0;
+}
+
 /*
  * Function:   scsi_run_queue()
  *
@@ -550,19 +567,18 @@ static inline int scsi_target_is_busy(struct scsi_target *starget)
  */
 static void scsi_run_queue(struct request_queue *q)
 {
-       struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
+       struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost = sdev->host;
+       LIST_HEAD(starved_list);
        unsigned long flags;
 
        if (scsi_target(sdev)->single_lun)
                scsi_single_lun_run(sdev);
 
        spin_lock_irqsave(shost->host_lock, flags);
-       while (!list_empty(&shost->starved_list) &&
-              !shost->host_blocked && !shost->host_self_blocked &&
-               !((shost->can_queue > 0) &&
-                 (shost->host_busy >= shost->can_queue))) {
+       list_splice_init(&shost->starved_list, &starved_list);
 
+       while (!list_empty(&starved_list)) {
                int flagset;
 
                /*
@@ -575,24 +591,18 @@ static void scsi_run_queue(struct request_queue *q)
                 * scsi_request_fn must get the host_lock before checking
                 * or modifying starved_list or starved_entry.
                 */
-               sdev = list_entry(shost->starved_list.next,
-                                         struct scsi_device, starved_entry);
-               /*
-                * The *queue_ready functions can add a device back onto the
-                * starved list's tail, so we must check for a infinite loop.
-                */
-               if (sdev == starved_head)
+               if (scsi_host_is_busy(shost))
                        break;
-               if (!starved_head)
-                       starved_head = sdev;
 
+               sdev = list_entry(starved_list.next,
+                                 struct scsi_device, starved_entry);
+               list_del_init(&sdev->starved_entry);
                if (scsi_target_is_busy(scsi_target(sdev))) {
                        list_move_tail(&sdev->starved_entry,
                                       &shost->starved_list);
                        continue;
                }
 
-               list_del_init(&sdev->starved_entry);
                spin_unlock(shost->host_lock);
 
                spin_lock(sdev->request_queue->queue_lock);
@@ -608,6 +618,8 @@ static void scsi_run_queue(struct request_queue *q)
 
                spin_lock(shost->host_lock);
        }
+       /* put any unprocessed entries back */
+       list_splice(&starved_list, &shost->starved_list);
        spin_unlock_irqrestore(shost->host_lock, flags);
 
        blk_run_queue(q);
@@ -636,8 +648,8 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
        struct request *req = cmd->request;
        unsigned long flags;
 
-       scsi_unprep_request(req);
        spin_lock_irqsave(q->queue_lock, flags);
+       scsi_unprep_request(req);
        blk_requeue_request(q, req);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
@@ -1348,8 +1360,6 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
 static inline int scsi_dev_queue_ready(struct request_queue *q,
                                  struct scsi_device *sdev)
 {
-       if (sdev->device_busy >= sdev->queue_depth)
-               return 0;
        if (sdev->device_busy == 0 && sdev->device_blocked) {
                /*
                 * unblock after device_blocked iterates to zero
@@ -1363,7 +1373,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
                        return 0;
                }
        }
-       if (sdev->device_blocked)
+       if (scsi_device_is_busy(sdev))
                return 0;
 
        return 1;
@@ -1440,8 +1450,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
                        return 0;
                }
        }
-       if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
-           shost->host_blocked || shost->host_self_blocked) {
+       if (scsi_host_is_busy(shost)) {
                if (list_empty(&sdev->starved_entry))
                        list_add_tail(&sdev->starved_entry, &shost->starved_list);
                return 0;
@@ -1454,6 +1463,37 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
        return 1;
 }
 
+/*
+ * Busy state exporting function for request stacking drivers.
+ *
+ * For efficiency, no lock is taken to check the busy state of
+ * shost/starget/sdev, since the returned value is not guaranteed and
+ * may be changed after request stacking drivers call the function,
+ * regardless of taking lock or not.
+ *
+ * When scsi can't dispatch I/Os anymore and needs to kill I/Os
+ * (e.g. !sdev), scsi needs to return 'not busy'.
+ * Otherwise, request stacking drivers may hold requests forever.
+ */
+static int scsi_lld_busy(struct request_queue *q)
+{
+       struct scsi_device *sdev = q->queuedata;
+       struct Scsi_Host *shost;
+       struct scsi_target *starget;
+
+       if (!sdev)
+               return 0;
+
+       shost = sdev->host;
+       starget = scsi_target(sdev);
+
+       if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
+           scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+               return 1;
+
+       return 0;
+}
+
 /*
  * Kill a request for a dead device
  */
@@ -1757,6 +1797,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
        blk_queue_prep_rq(q, scsi_prep_fn);
        blk_queue_softirq_done(q, scsi_softirq_done);
        blk_queue_rq_timed_out(q, scsi_times_out);
+       blk_queue_lld_busy(q, scsi_lld_busy);
        return q;
 }
 
@@ -2105,22 +2146,21 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
        do {
                result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
                                          timeout, retries);
-       } while ((driver_byte(result) & DRIVER_SENSE) &&
-                sshdr && sshdr->sense_key == UNIT_ATTENTION &&
-                --retries);
+               if (sdev->removable && scsi_sense_valid(sshdr) &&
+                   sshdr->sense_key == UNIT_ATTENTION)
+                       sdev->changed = 1;
+       } while (scsi_sense_valid(sshdr) &&
+                sshdr->sense_key == UNIT_ATTENTION && --retries);
 
        if (!sshdr)
                /* could not allocate sense buffer, so can't process it */
                return result;
 
-       if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
-
-               if ((scsi_sense_valid(sshdr)) &&
-                   ((sshdr->sense_key == UNIT_ATTENTION) ||
-                    (sshdr->sense_key == NOT_READY))) {
-                       sdev->changed = 1;
-                       result = 0;
-               }
+       if (sdev->removable && scsi_sense_valid(sshdr) &&
+           (sshdr->sense_key == UNIT_ATTENTION ||
+            sshdr->sense_key == NOT_READY)) {
+               sdev->changed = 1;
+               result = 0;
        }
        if (!sshdr_external)
                kfree(sshdr);
This page took 0.029408 seconds and 5 git commands to generate.