diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2011-04-22 09:13:54 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2011-05-10 16:53:44 -0400 |
commit | f30e6d3e419bfb5540fa82ba7eca01d578556e6b (patch) | |
tree | e4d6e7bad161a76b09557bf7513358ae1ce8f7fb /drivers/firewire/core-iso.c | |
parent | 020abf03cd659388f94cb328e1e1df0656e0d7ff (diff) |
firewire: octlet AT payloads can be stack-allocated
We do not need slab allocations anymore in order to satisfy
streaming DMA mapping constraints, thanks to commit da28947e7e36
"firewire: ohci: avoid separate DMA mapping for small AT payloads".
(Besides, the slab-allocated buffers that firewire-core, firewire-sbp2,
and firedtv used to provide for 8-byte write and lock requests were
still not fully portable since they crossed cacheline boundaries or
shared a cacheline with unrelated CPU-accessed data. snd-firewire-lib
got this aspect right by using an extra kmalloc/ kfree just for the
8-byte transaction buffer.)
This change replaces kmalloc'ed lock transaction scratch buffers in
firewire-core, firedtv, and snd-firewire-lib by local stack allocations.
Perhaps the most notable result of the change is simpler locking because
there is no need to serialize usages of preallocated per-device buffers
anymore. Also, allocations and deallocations are simpler.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Acked-by: Clemens Ladisch <clemens@ladisch.de>
Diffstat (limited to 'drivers/firewire/core-iso.c')
-rw-r--r-- | drivers/firewire/core-iso.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 481056df9268..f872ede5af37 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -196,9 +196,10 @@ EXPORT_SYMBOL(fw_iso_context_stop); | |||
196 | */ | 196 | */ |
197 | 197 | ||
198 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | 198 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, |
199 | int bandwidth, bool allocate, __be32 data[2]) | 199 | int bandwidth, bool allocate) |
200 | { | 200 | { |
201 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; | 201 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; |
202 | __be32 data[2]; | ||
202 | 203 | ||
203 | /* | 204 | /* |
204 | * On a 1394a IRM with low contention, try < 1 is enough. | 205 | * On a 1394a IRM with low contention, try < 1 is enough. |
@@ -233,9 +234,10 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
233 | } | 234 | } |
234 | 235 | ||
235 | static int manage_channel(struct fw_card *card, int irm_id, int generation, | 236 | static int manage_channel(struct fw_card *card, int irm_id, int generation, |
236 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) | 237 | u32 channels_mask, u64 offset, bool allocate) |
237 | { | 238 | { |
238 | __be32 bit, all, old; | 239 | __be32 bit, all, old; |
240 | __be32 data[2]; | ||
239 | int channel, ret = -EIO, retry = 5; | 241 | int channel, ret = -EIO, retry = 5; |
240 | 242 | ||
241 | old = all = allocate ? cpu_to_be32(~0) : 0; | 243 | old = all = allocate ? cpu_to_be32(~0) : 0; |
@@ -284,7 +286,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
284 | } | 286 | } |
285 | 287 | ||
286 | static void deallocate_channel(struct fw_card *card, int irm_id, | 288 | static void deallocate_channel(struct fw_card *card, int irm_id, |
287 | int generation, int channel, __be32 buffer[2]) | 289 | int generation, int channel) |
288 | { | 290 | { |
289 | u32 mask; | 291 | u32 mask; |
290 | u64 offset; | 292 | u64 offset; |
@@ -293,7 +295,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
293 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : | 295 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : |
294 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; | 296 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; |
295 | 297 | ||
296 | manage_channel(card, irm_id, generation, mask, offset, false, buffer); | 298 | manage_channel(card, irm_id, generation, mask, offset, false); |
297 | } | 299 | } |
298 | 300 | ||
299 | /** | 301 | /** |
@@ -322,7 +324,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
322 | */ | 324 | */ |
323 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 325 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
324 | u64 channels_mask, int *channel, int *bandwidth, | 326 | u64 channels_mask, int *channel, int *bandwidth, |
325 | bool allocate, __be32 buffer[2]) | 327 | bool allocate) |
326 | { | 328 | { |
327 | u32 channels_hi = channels_mask; /* channels 31...0 */ | 329 | u32 channels_hi = channels_mask; /* channels 31...0 */ |
328 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ | 330 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ |
@@ -335,11 +337,11 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
335 | if (channels_hi) | 337 | if (channels_hi) |
336 | c = manage_channel(card, irm_id, generation, channels_hi, | 338 | c = manage_channel(card, irm_id, generation, channels_hi, |
337 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, | 339 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, |
338 | allocate, buffer); | 340 | allocate); |
339 | if (channels_lo && c < 0) { | 341 | if (channels_lo && c < 0) { |
340 | c = manage_channel(card, irm_id, generation, channels_lo, | 342 | c = manage_channel(card, irm_id, generation, channels_lo, |
341 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, | 343 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, |
342 | allocate, buffer); | 344 | allocate); |
343 | if (c >= 0) | 345 | if (c >= 0) |
344 | c += 32; | 346 | c += 32; |
345 | } | 347 | } |
@@ -351,14 +353,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
351 | if (*bandwidth == 0) | 353 | if (*bandwidth == 0) |
352 | return; | 354 | return; |
353 | 355 | ||
354 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, | 356 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); |
355 | allocate, buffer); | ||
356 | if (ret < 0) | 357 | if (ret < 0) |
357 | *bandwidth = 0; | 358 | *bandwidth = 0; |
358 | 359 | ||
359 | if (allocate && ret < 0) { | 360 | if (allocate && ret < 0) { |
360 | if (c >= 0) | 361 | if (c >= 0) |
361 | deallocate_channel(card, irm_id, generation, c, buffer); | 362 | deallocate_channel(card, irm_id, generation, c); |
362 | *channel = ret; | 363 | *channel = ret; |
363 | } | 364 | } |
364 | } | 365 | } |