diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2011-04-22 09:13:54 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2011-05-10 16:53:44 -0400 |
commit | f30e6d3e419bfb5540fa82ba7eca01d578556e6b (patch) | |
tree | e4d6e7bad161a76b09557bf7513358ae1ce8f7fb /drivers/firewire | |
parent | 020abf03cd659388f94cb328e1e1df0656e0d7ff (diff) |
firewire: octlet AT payloads can be stack-allocated
We do not need slab allocations anymore in order to satisfy
streaming DMA mapping constraints, thanks to commit da28947e7e36
"firewire: ohci: avoid separate DMA mapping for small AT payloads".
(Besides, the slab-allocated buffers that firewire-core, firewire-sbp2,
and firedtv used to provide for 8-byte write and lock requests were
still not fully portable since they crossed cacheline boundaries or
shared a cacheline with unrelated CPU-accessed data. snd-firewire-lib
got this aspect right by using an extra kmalloc/ kfree just for the
8-byte transaction buffer.)
This change replaces kmalloc'ed lock transaction scratch buffers in
firewire-core, firedtv, and snd-firewire-lib by local stack allocations.
Perhaps the most notable result of the change is simpler locking because
there is no need to serialize usages of preallocated per-device buffers
anymore. Also, allocations and deallocations are simpler.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Acked-by: Clemens Ladisch <clemens@ladisch.de>
Diffstat (limited to 'drivers/firewire')
-rw-r--r-- | drivers/firewire/core-card.c | 16 | ||||
-rw-r--r-- | drivers/firewire/core-cdev.c | 4 | ||||
-rw-r--r-- | drivers/firewire/core-iso.c | 21 | ||||
-rw-r--r-- | drivers/firewire/core-transaction.c | 7 |
4 files changed, 24 insertions, 24 deletions
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 3c44fbc81ac..e119f1e6ba4 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -258,8 +258,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) | |||
258 | 258 | ||
259 | if (!card->broadcast_channel_allocated) { | 259 | if (!card->broadcast_channel_allocated) { |
260 | fw_iso_resource_manage(card, generation, 1ULL << 31, | 260 | fw_iso_resource_manage(card, generation, 1ULL << 31, |
261 | &channel, &bandwidth, true, | 261 | &channel, &bandwidth, true); |
262 | card->bm_transaction_data); | ||
263 | if (channel != 31) { | 262 | if (channel != 31) { |
264 | fw_notify("failed to allocate broadcast channel\n"); | 263 | fw_notify("failed to allocate broadcast channel\n"); |
265 | return; | 264 | return; |
@@ -294,6 +293,7 @@ static void bm_work(struct work_struct *work) | |||
294 | bool root_device_is_cmc; | 293 | bool root_device_is_cmc; |
295 | bool irm_is_1394_1995_only; | 294 | bool irm_is_1394_1995_only; |
296 | bool keep_this_irm; | 295 | bool keep_this_irm; |
296 | __be32 transaction_data[2]; | ||
297 | 297 | ||
298 | spin_lock_irq(&card->lock); | 298 | spin_lock_irq(&card->lock); |
299 | 299 | ||
@@ -355,21 +355,21 @@ static void bm_work(struct work_struct *work) | |||
355 | goto pick_me; | 355 | goto pick_me; |
356 | } | 356 | } |
357 | 357 | ||
358 | card->bm_transaction_data[0] = cpu_to_be32(0x3f); | 358 | transaction_data[0] = cpu_to_be32(0x3f); |
359 | card->bm_transaction_data[1] = cpu_to_be32(local_id); | 359 | transaction_data[1] = cpu_to_be32(local_id); |
360 | 360 | ||
361 | spin_unlock_irq(&card->lock); | 361 | spin_unlock_irq(&card->lock); |
362 | 362 | ||
363 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 363 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
364 | irm_id, generation, SCODE_100, | 364 | irm_id, generation, SCODE_100, |
365 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, | 365 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, |
366 | card->bm_transaction_data, 8); | 366 | transaction_data, 8); |
367 | 367 | ||
368 | if (rcode == RCODE_GENERATION) | 368 | if (rcode == RCODE_GENERATION) |
369 | /* Another bus reset, BM work has been rescheduled. */ | 369 | /* Another bus reset, BM work has been rescheduled. */ |
370 | goto out; | 370 | goto out; |
371 | 371 | ||
372 | bm_id = be32_to_cpu(card->bm_transaction_data[0]); | 372 | bm_id = be32_to_cpu(transaction_data[0]); |
373 | 373 | ||
374 | spin_lock_irq(&card->lock); | 374 | spin_lock_irq(&card->lock); |
375 | if (rcode == RCODE_COMPLETE && generation == card->generation) | 375 | if (rcode == RCODE_COMPLETE && generation == card->generation) |
@@ -490,11 +490,11 @@ static void bm_work(struct work_struct *work) | |||
490 | /* | 490 | /* |
491 | * Make sure that the cycle master sends cycle start packets. | 491 | * Make sure that the cycle master sends cycle start packets. |
492 | */ | 492 | */ |
493 | card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); | 493 | transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); |
494 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, | 494 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, |
495 | root_id, generation, SCODE_100, | 495 | root_id, generation, SCODE_100, |
496 | CSR_REGISTER_BASE + CSR_STATE_SET, | 496 | CSR_REGISTER_BASE + CSR_STATE_SET, |
497 | card->bm_transaction_data, 4); | 497 | transaction_data, 4); |
498 | if (rcode == RCODE_GENERATION) | 498 | if (rcode == RCODE_GENERATION) |
499 | goto out; | 499 | goto out; |
500 | } | 500 | } |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 62ac111af24..2a3f1c4d690 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -141,7 +141,6 @@ struct iso_resource { | |||
141 | int generation; | 141 | int generation; |
142 | u64 channels; | 142 | u64 channels; |
143 | s32 bandwidth; | 143 | s32 bandwidth; |
144 | __be32 transaction_data[2]; | ||
145 | struct iso_resource_event *e_alloc, *e_dealloc; | 144 | struct iso_resource_event *e_alloc, *e_dealloc; |
146 | }; | 145 | }; |
147 | 146 | ||
@@ -1229,8 +1228,7 @@ static void iso_resource_work(struct work_struct *work) | |||
1229 | r->channels, &channel, &bandwidth, | 1228 | r->channels, &channel, &bandwidth, |
1230 | todo == ISO_RES_ALLOC || | 1229 | todo == ISO_RES_ALLOC || |
1231 | todo == ISO_RES_REALLOC || | 1230 | todo == ISO_RES_REALLOC || |
1232 | todo == ISO_RES_ALLOC_ONCE, | 1231 | todo == ISO_RES_ALLOC_ONCE); |
1233 | r->transaction_data); | ||
1234 | /* | 1232 | /* |
1235 | * Is this generation outdated already? As long as this resource sticks | 1233 | * Is this generation outdated already? As long as this resource sticks |
1236 | * in the idr, it will be scheduled again for a newer generation or at | 1234 | * in the idr, it will be scheduled again for a newer generation or at |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 481056df926..f872ede5af3 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -196,9 +196,10 @@ EXPORT_SYMBOL(fw_iso_context_stop); | |||
196 | */ | 196 | */ |
197 | 197 | ||
198 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | 198 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, |
199 | int bandwidth, bool allocate, __be32 data[2]) | 199 | int bandwidth, bool allocate) |
200 | { | 200 | { |
201 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; | 201 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; |
202 | __be32 data[2]; | ||
202 | 203 | ||
203 | /* | 204 | /* |
204 | * On a 1394a IRM with low contention, try < 1 is enough. | 205 | * On a 1394a IRM with low contention, try < 1 is enough. |
@@ -233,9 +234,10 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
233 | } | 234 | } |
234 | 235 | ||
235 | static int manage_channel(struct fw_card *card, int irm_id, int generation, | 236 | static int manage_channel(struct fw_card *card, int irm_id, int generation, |
236 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) | 237 | u32 channels_mask, u64 offset, bool allocate) |
237 | { | 238 | { |
238 | __be32 bit, all, old; | 239 | __be32 bit, all, old; |
240 | __be32 data[2]; | ||
239 | int channel, ret = -EIO, retry = 5; | 241 | int channel, ret = -EIO, retry = 5; |
240 | 242 | ||
241 | old = all = allocate ? cpu_to_be32(~0) : 0; | 243 | old = all = allocate ? cpu_to_be32(~0) : 0; |
@@ -284,7 +286,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
284 | } | 286 | } |
285 | 287 | ||
286 | static void deallocate_channel(struct fw_card *card, int irm_id, | 288 | static void deallocate_channel(struct fw_card *card, int irm_id, |
287 | int generation, int channel, __be32 buffer[2]) | 289 | int generation, int channel) |
288 | { | 290 | { |
289 | u32 mask; | 291 | u32 mask; |
290 | u64 offset; | 292 | u64 offset; |
@@ -293,7 +295,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
293 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : | 295 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : |
294 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; | 296 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; |
295 | 297 | ||
296 | manage_channel(card, irm_id, generation, mask, offset, false, buffer); | 298 | manage_channel(card, irm_id, generation, mask, offset, false); |
297 | } | 299 | } |
298 | 300 | ||
299 | /** | 301 | /** |
@@ -322,7 +324,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
322 | */ | 324 | */ |
323 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 325 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
324 | u64 channels_mask, int *channel, int *bandwidth, | 326 | u64 channels_mask, int *channel, int *bandwidth, |
325 | bool allocate, __be32 buffer[2]) | 327 | bool allocate) |
326 | { | 328 | { |
327 | u32 channels_hi = channels_mask; /* channels 31...0 */ | 329 | u32 channels_hi = channels_mask; /* channels 31...0 */ |
328 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ | 330 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ |
@@ -335,11 +337,11 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
335 | if (channels_hi) | 337 | if (channels_hi) |
336 | c = manage_channel(card, irm_id, generation, channels_hi, | 338 | c = manage_channel(card, irm_id, generation, channels_hi, |
337 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, | 339 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, |
338 | allocate, buffer); | 340 | allocate); |
339 | if (channels_lo && c < 0) { | 341 | if (channels_lo && c < 0) { |
340 | c = manage_channel(card, irm_id, generation, channels_lo, | 342 | c = manage_channel(card, irm_id, generation, channels_lo, |
341 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, | 343 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, |
342 | allocate, buffer); | 344 | allocate); |
343 | if (c >= 0) | 345 | if (c >= 0) |
344 | c += 32; | 346 | c += 32; |
345 | } | 347 | } |
@@ -351,14 +353,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
351 | if (*bandwidth == 0) | 353 | if (*bandwidth == 0) |
352 | return; | 354 | return; |
353 | 355 | ||
354 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, | 356 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); |
355 | allocate, buffer); | ||
356 | if (ret < 0) | 357 | if (ret < 0) |
357 | *bandwidth = 0; | 358 | *bandwidth = 0; |
358 | 359 | ||
359 | if (allocate && ret < 0) { | 360 | if (allocate && ret < 0) { |
360 | if (c >= 0) | 361 | if (c >= 0) |
361 | deallocate_channel(card, irm_id, generation, c, buffer); | 362 | deallocate_channel(card, irm_id, generation, c); |
362 | *channel = ret; | 363 | *channel = ret; |
363 | } | 364 | } |
364 | } | 365 | } |
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index d00f8ce902c..77275fdf6c1 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c | |||
@@ -326,8 +326,8 @@ static int allocate_tlabel(struct fw_card *card) | |||
326 | * It will contain tag, channel, and sy data instead of a node ID then. | 326 | * It will contain tag, channel, and sy data instead of a node ID then. |
327 | * | 327 | * |
328 | * The payload buffer at @data is going to be DMA-mapped except in case of | 328 | * The payload buffer at @data is going to be DMA-mapped except in case of |
329 | * quadlet-sized payload or of local (loopback) requests. Hence make sure that | 329 | * @length <= 8 or of local (loopback) requests. Hence make sure that the |
330 | * the buffer complies with the restrictions for DMA-mapped memory. The | 330 | * buffer complies with the restrictions of the streaming DMA mapping API. |
331 | * @payload must not be freed before the @callback is called. | 331 | * @payload must not be freed before the @callback is called. |
332 | * | 332 | * |
333 | * In case of request types without payload, @data is NULL and @length is 0. | 333 | * In case of request types without payload, @data is NULL and @length is 0. |
@@ -411,7 +411,8 @@ static void transaction_callback(struct fw_card *card, int rcode, | |||
411 | * | 411 | * |
412 | * Returns the RCODE. See fw_send_request() for parameter documentation. | 412 | * Returns the RCODE. See fw_send_request() for parameter documentation. |
413 | * Unlike fw_send_request(), @data points to the payload of the request or/and | 413 | * Unlike fw_send_request(), @data points to the payload of the request or/and |
414 | * to the payload of the response. | 414 | * to the payload of the response. DMA mapping restrictions apply to outbound |
415 | * request payloads of >= 8 bytes but not to inbound response payloads. | ||
415 | */ | 416 | */ |
416 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | 417 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, |
417 | int generation, int speed, unsigned long long offset, | 418 | int generation, int speed, unsigned long long offset, |