aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-01-15 23:32:17 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-01-23 12:37:37 -0500
commitde25deb18016f66dcdede165d07654559bb332bc (patch)
treeb566c2a369d3dce85507ab28ea20ffee020e0c06 /drivers/scsi/scsi.c
parentb30c2fc1113edfb2371427c10503ff942b0a0370 (diff)
[SCSI] use dynamically allocated sense buffer
This removes static array sense_buffer in scsi_cmnd and uses dynamically allocated sense_buffer (with GFP_DMA). The reason for doing this is that some architectures need cacheline aligned buffer for DMA: http://lkml.org/lkml/2007/11/19/2 The problems are that scsi_eh_prep_cmnd puts scsi_cmnd::sense_buffer to sglist and some LLDs directly DMA to scsi_cmnd::sense_buffer. It's necessary to DMA to scsi_cmnd::sense_buffer safely. This patch solves these issues. __scsi_get_command allocates sense_buffer via kmem_cache_alloc and attaches it to a scsi_cmnd so everything just work as before. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/scsi.c')
-rw-r--r--drivers/scsi/scsi.c61
1 files changed, 59 insertions, 2 deletions
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 54ff611b8677..0a4a5b8b87c6 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -161,6 +161,9 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
161 161
162static DEFINE_MUTEX(host_cmd_pool_mutex); 162static DEFINE_MUTEX(host_cmd_pool_mutex);
163 163
164static struct kmem_cache *sense_buffer_slab;
165static int sense_buffer_slab_users;
166
164/** 167/**
165 * __scsi_get_command - Allocate a struct scsi_cmnd 168 * __scsi_get_command - Allocate a struct scsi_cmnd
166 * @shost: host to transmit command 169 * @shost: host to transmit command
@@ -172,6 +175,7 @@ static DEFINE_MUTEX(host_cmd_pool_mutex);
172struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) 175struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
173{ 176{
174 struct scsi_cmnd *cmd; 177 struct scsi_cmnd *cmd;
178 unsigned char *buf;
175 179
176 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 180 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
177 gfp_mask | shost->cmd_pool->gfp_mask); 181 gfp_mask | shost->cmd_pool->gfp_mask);
@@ -186,6 +190,21 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
186 list_del_init(&cmd->list); 190 list_del_init(&cmd->list);
187 } 191 }
188 spin_unlock_irqrestore(&shost->free_list_lock, flags); 192 spin_unlock_irqrestore(&shost->free_list_lock, flags);
193
194 if (cmd) {
195 buf = cmd->sense_buffer;
196 memset(cmd, 0, sizeof(*cmd));
197 cmd->sense_buffer = buf;
198 }
199 } else {
200 buf = kmem_cache_alloc(sense_buffer_slab, __GFP_DMA|gfp_mask);
201 if (likely(buf)) {
202 memset(cmd, 0, sizeof(*cmd));
203 cmd->sense_buffer = buf;
204 } else {
205 kmem_cache_free(shost->cmd_pool->slab, cmd);
206 cmd = NULL;
207 }
189 } 208 }
190 209
191 return cmd; 210 return cmd;
@@ -212,7 +231,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
212 if (likely(cmd != NULL)) { 231 if (likely(cmd != NULL)) {
213 unsigned long flags; 232 unsigned long flags;
214 233
215 memset(cmd, 0, sizeof(*cmd));
216 cmd->device = dev; 234 cmd->device = dev;
217 init_timer(&cmd->eh_timeout); 235 init_timer(&cmd->eh_timeout);
218 INIT_LIST_HEAD(&cmd->list); 236 INIT_LIST_HEAD(&cmd->list);
@@ -246,8 +264,10 @@ void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
246 } 264 }
247 spin_unlock_irqrestore(&shost->free_list_lock, flags); 265 spin_unlock_irqrestore(&shost->free_list_lock, flags);
248 266
249 if (likely(cmd != NULL)) 267 if (likely(cmd != NULL)) {
268 kmem_cache_free(sense_buffer_slab, cmd->sense_buffer);
250 kmem_cache_free(shost->cmd_pool->slab, cmd); 269 kmem_cache_free(shost->cmd_pool->slab, cmd);
270 }
251 271
252 put_device(dev); 272 put_device(dev);
253} 273}
@@ -290,6 +310,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
290{ 310{
291 struct scsi_host_cmd_pool *pool; 311 struct scsi_host_cmd_pool *pool;
292 struct scsi_cmnd *cmd; 312 struct scsi_cmnd *cmd;
313 unsigned char *sense_buffer;
293 314
294 spin_lock_init(&shost->free_list_lock); 315 spin_lock_init(&shost->free_list_lock);
295 INIT_LIST_HEAD(&shost->free_list); 316 INIT_LIST_HEAD(&shost->free_list);
@@ -319,9 +340,18 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
319 GFP_KERNEL | shost->cmd_pool->gfp_mask); 340 GFP_KERNEL | shost->cmd_pool->gfp_mask);
320 if (!cmd) 341 if (!cmd)
321 goto fail2; 342 goto fail2;
343
344 sense_buffer = kmem_cache_alloc(sense_buffer_slab,
345 GFP_KERNEL | __GFP_DMA);
346 if (!sense_buffer)
347 goto destroy_backup;
348
349 cmd->sense_buffer = sense_buffer;
322 list_add(&cmd->list, &shost->free_list); 350 list_add(&cmd->list, &shost->free_list);
323 return 0; 351 return 0;
324 352
353destroy_backup:
354 kmem_cache_free(shost->cmd_pool->slab, cmd);
325 fail2: 355 fail2:
326 mutex_lock(&host_cmd_pool_mutex); 356 mutex_lock(&host_cmd_pool_mutex);
327 if (!--pool->users) 357 if (!--pool->users)
@@ -342,6 +372,7 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
342 372
343 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 373 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
344 list_del_init(&cmd->list); 374 list_del_init(&cmd->list);
375 kmem_cache_free(sense_buffer_slab, cmd->sense_buffer);
345 kmem_cache_free(shost->cmd_pool->slab, cmd); 376 kmem_cache_free(shost->cmd_pool->slab, cmd);
346 } 377 }
347 378
@@ -351,6 +382,32 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
351 mutex_unlock(&host_cmd_pool_mutex); 382 mutex_unlock(&host_cmd_pool_mutex);
352} 383}
353 384
385int scsi_setup_command_sense_buffer(struct Scsi_Host *shost)
386{
387 mutex_lock(&host_cmd_pool_mutex);
388 if (!sense_buffer_slab_users) {
389 sense_buffer_slab = kmem_cache_create("scsi_sense_buffer",
390 SCSI_SENSE_BUFFERSIZE,
391 0, SLAB_CACHE_DMA, NULL);
392 if (!sense_buffer_slab) {
393 mutex_unlock(&host_cmd_pool_mutex);
394 return -ENOMEM;
395 }
396 }
397 sense_buffer_slab_users++;
398 mutex_unlock(&host_cmd_pool_mutex);
399
400 return 0;
401}
402
403void scsi_destroy_command_sense_buffer(struct Scsi_Host *shost)
404{
405 mutex_lock(&host_cmd_pool_mutex);
406 if (!--sense_buffer_slab_users)
407 kmem_cache_destroy(sense_buffer_slab);
408 mutex_unlock(&host_cmd_pool_mutex);
409}
410
354#ifdef CONFIG_SCSI_LOGGING 411#ifdef CONFIG_SCSI_LOGGING
355void scsi_log_send(struct scsi_cmnd *cmd) 412void scsi_log_send(struct scsi_cmnd *cmd)
356{ 413{