diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 156 |
1 files changed, 123 insertions, 33 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a82d2fe80fb5..98ee55ced592 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { }; #undef SP -static struct kmem_cache *scsi_bidi_sdb_cache; +struct kmem_cache *scsi_sdb_cache; static void scsi_run_queue(struct request_queue *q); @@ -207,6 +207,15 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, */ blk_execute_rq(req->q, NULL, req, 1); + /* + * Some devices (USB mass-storage in particular) may transfer + * garbage data together with a residue indicating that the data + * is invalid. Prevent the garbage from being misinterpreted + * and prevent security leaks by zeroing out the excess data. + */ + if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) + memset(buffer + (bufflen - req->data_len), 0, req->data_len); + ret = req->errors; out: blk_put_request(req); @@ -775,9 +784,12 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; scsi_free_sgtable(bidi_sdb); - kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); + kmem_cache_free(scsi_sdb_cache, bidi_sdb); cmd->request->next_rq->special = NULL; } + + if (scsi_prot_sg_count(cmd)) + scsi_free_sgtable(cmd->prot_sdb); } EXPORT_SYMBOL(scsi_release_buffers); @@ -840,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd) void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; - int this_count = scsi_bufflen(cmd); + int this_count; struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; int error = 0; @@ -896,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) */ if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) return; + this_count = blk_rq_bytes(req); /* good_bytes = 0, or (inclusive) there were leftovers and * result = 0, so scsi_end_request couldn't retry. @@ -938,9 +951,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * 6-byte command. */ scsi_requeue_command(q, cmd); - return; - } else { + } else if (sshdr.asc == 0x10) /* DIX */ + scsi_end_request(cmd, -EIO, this_count, 0); + else scsi_end_request(cmd, -EIO, this_count, 1); + return; + case ABORTED_COMMAND: + if (sshdr.asc == 0x10) { /* DIF */ + scsi_end_request(cmd, -EIO, this_count, 0); return; } break; @@ -1050,7 +1068,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) if (blk_bidi_rq(cmd->request)) { struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( - scsi_bidi_sdb_cache, GFP_ATOMIC); + scsi_sdb_cache, GFP_ATOMIC); if (!bidi_sdb) { error = BLKPREP_DEFER; goto err_exit; @@ -1063,6 +1081,26 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) goto err_exit; } + if (blk_integrity_rq(cmd->request)) { + struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; + int ivecs, count; + + BUG_ON(prot_sdb == NULL); + ivecs = blk_rq_count_integrity_sg(cmd->request); + + if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { + error = BLKPREP_DEFER; + goto err_exit; + } + + count = blk_rq_map_integrity_sg(cmd->request, + prot_sdb->table.sgl); + BUG_ON(unlikely(count > ivecs)); + + cmd->prot_sdb = prot_sdb; + cmd->prot_sdb->table.nents = count; + } + return BLKPREP_OK ; err_exit: @@ -1143,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) cmd->transfersize = req->data_len; cmd->allowed = req->retries; - cmd->timeout_per_command = req->timeout; return BLKPREP_OK; } EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); @@ -1160,6 +1197,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) if (ret != BLKPREP_OK) return ret; + + if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh + && sdev->scsi_dh_data->scsi_dh->prep_fn)) { + ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); + if (ret != BLKPREP_OK) + return ret; + } + /* * Filesystem requests must transfer data. */ @@ -1205,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) break; case SDEV_QUIESCE: case SDEV_BLOCK: + case SDEV_CREATED_BLOCK: /* * If the devices is blocked we defer normal commands. */ @@ -1320,7 +1366,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q, printk("scsi%d unblocking host at zero depth\n", shost->host_no)); } else { - blk_plug_device(q); return 0; } } @@ -1351,7 +1396,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) if (unlikely(cmd == NULL)) { printk(KERN_CRIT "impossible request in %s.\n", - __FUNCTION__); + __func__); BUG(); } @@ -1371,17 +1416,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) spin_unlock(shost->host_lock); spin_lock(sdev->request_queue->queue_lock); - __scsi_done(cmd); + blk_complete_request(req); } static void scsi_softirq_done(struct request *rq) { - struct scsi_cmnd *cmd = rq->completion_data; - unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; + struct scsi_cmnd *cmd = rq->special; + unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; int disposition; INIT_LIST_HEAD(&cmd->eh_entry); + /* + * Set the serial numbers back to zero + */ + cmd->serial_number = 0; + + atomic_inc(&cmd->device->iodone_cnt); + if (cmd->result) + atomic_inc(&cmd->device->ioerr_cnt); + disposition = scsi_decide_disposition(cmd); if (disposition != SUCCESS && time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { @@ -1475,12 +1529,27 @@ static void scsi_request_fn(struct request_queue *q) printk(KERN_CRIT "impossible request in %s.\n" "please mail a stack trace to " "linux-scsi@vger.kernel.org\n", - __FUNCTION__); + __func__); blk_dump_rq_flags(req, "foo"); BUG(); } spin_lock(shost->host_lock); + /* + * We hit this when the driver is using a host wide + * tag map. For device level tag maps the queue_depth check + * in the device ready fn would prevent us from trying + * to allocate a tag. Since the map is a shared host resource + * we add the dev to the starved list so it eventually gets + * a run when a tag is freed. + */ + if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { + if (list_empty(&sdev->starved_entry)) + list_add_tail(&sdev->starved_entry, + &shost->starved_list); + goto not_ready; + } + if (!scsi_host_queue_ready(q, shost, sdev)) goto not_ready; if (scsi_target(sdev)->single_lun) { @@ -1615,6 +1684,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) blk_queue_prep_rq(q, scsi_prep_fn); blk_queue_softirq_done(q, scsi_softirq_done); + blk_queue_rq_timed_out(q, scsi_times_out); return q; } @@ -1684,11 +1754,11 @@ int __init scsi_init_queue(void) return -ENOMEM; } - scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", - sizeof(struct scsi_data_buffer), - 0, 0, NULL); - if (!scsi_bidi_sdb_cache) { - printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); + scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", + sizeof(struct scsi_data_buffer), + 0, 0, NULL); + if (!scsi_sdb_cache) { + printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); goto cleanup_io_context; } @@ -1701,7 +1771,7 @@ int __init scsi_init_queue(void) if (!sgp->slab) { printk(KERN_ERR "SCSI: can't init sg slab %s\n", sgp->name); - goto cleanup_bidi_sdb; + goto cleanup_sdb; } sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, @@ -1709,13 +1779,13 @@ int __init scsi_init_queue(void) if (!sgp->pool) { printk(KERN_ERR "SCSI: can't init sg mempool %s\n", sgp->name); - goto cleanup_bidi_sdb; + goto cleanup_sdb; } } return 0; -cleanup_bidi_sdb: +cleanup_sdb: for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; if (sgp->pool) @@ -1723,7 +1793,7 @@ cleanup_bidi_sdb: if (sgp->slab) kmem_cache_destroy(sgp->slab); } - kmem_cache_destroy(scsi_bidi_sdb_cache); + kmem_cache_destroy(scsi_sdb_cache); cleanup_io_context: kmem_cache_destroy(scsi_io_context_cache); @@ -1735,7 +1805,7 @@ void scsi_exit_queue(void) int i; kmem_cache_destroy(scsi_io_context_cache); - kmem_cache_destroy(scsi_bidi_sdb_cache); + kmem_cache_destroy(scsi_sdb_cache); for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; @@ -2004,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (state) { case SDEV_CREATED: - /* There are no legal states that come back to - * created. This is the manually initialised start - * state */ - goto illegal; + switch (oldstate) { + case SDEV_CREATED_BLOCK: + break; + default: + goto illegal; + } + break; case SDEV_RUNNING: switch (oldstate) { @@ -2045,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_BLOCK: switch (oldstate) { - case SDEV_CREATED: case SDEV_RUNNING: + case SDEV_CREATED_BLOCK: + break; + default: + goto illegal; + } + break; + + case SDEV_CREATED_BLOCK: + switch (oldstate) { + case SDEV_CREATED: break; default: goto illegal; @@ -2334,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev) int err = 0; err = scsi_device_set_state(sdev, SDEV_BLOCK); - if (err) - return err; + if (err) { + err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); + + if (err) + return err; + } /* * The device has transitioned to SDEV_BLOCK. Stop the @@ -2378,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev) * and goose the device queue if successful. */ err = scsi_device_set_state(sdev, SDEV_RUNNING); - if (err) - return err; + if (err) { + err = scsi_device_set_state(sdev, SDEV_CREATED); + + if (err) + return err; + } spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); @@ -2470,7 +2560,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, if (unlikely(i == sg_count)) { printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " "elements %d\n", - __FUNCTION__, sg_len, *offset, sg_count); + __func__, sg_len, *offset, sg_count); WARN_ON(1); return NULL; } |