diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-12-06 23:33:16 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:39:24 -0500 |
commit | 54e6ecb23951b195d02433a741c7f7cb0b796c78 (patch) | |
tree | c8885c49f37c8d383945b8af69d51597494ed62c /drivers/ieee1394 | |
parent | f7267c0c0721fd02ad3dc37c3d6dd24ccd81d4d6 (diff) |
[PATCH] slab: remove SLAB_ATOMIC
SLAB_ATOMIC is an alias of GFP_ATOMIC
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/ieee1394')
-rw-r--r-- | drivers/ieee1394/raw1394.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 5ec4f5eb6b19..47f6a4e29b40 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c | |||
@@ -259,7 +259,7 @@ static void host_reset(struct hpsb_host *host) | |||
259 | if (hi != NULL) { | 259 | if (hi != NULL) { |
260 | list_for_each_entry(fi, &hi->file_info_list, list) { | 260 | list_for_each_entry(fi, &hi->file_info_list, list) { |
261 | if (fi->notification == RAW1394_NOTIFY_ON) { | 261 | if (fi->notification == RAW1394_NOTIFY_ON) { |
262 | req = __alloc_pending_request(SLAB_ATOMIC); | 262 | req = __alloc_pending_request(GFP_ATOMIC); |
263 | 263 | ||
264 | if (req != NULL) { | 264 | if (req != NULL) { |
265 | req->file_info = fi; | 265 | req->file_info = fi; |
@@ -306,13 +306,13 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data, | |||
306 | if (!(fi->listen_channels & (1ULL << channel))) | 306 | if (!(fi->listen_channels & (1ULL << channel))) |
307 | continue; | 307 | continue; |
308 | 308 | ||
309 | req = __alloc_pending_request(SLAB_ATOMIC); | 309 | req = __alloc_pending_request(GFP_ATOMIC); |
310 | if (!req) | 310 | if (!req) |
311 | break; | 311 | break; |
312 | 312 | ||
313 | if (!ibs) { | 313 | if (!ibs) { |
314 | ibs = kmalloc(sizeof(*ibs) + length, | 314 | ibs = kmalloc(sizeof(*ibs) + length, |
315 | SLAB_ATOMIC); | 315 | GFP_ATOMIC); |
316 | if (!ibs) { | 316 | if (!ibs) { |
317 | kfree(req); | 317 | kfree(req); |
318 | break; | 318 | break; |
@@ -367,13 +367,13 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction, | |||
367 | if (!fi->fcp_buffer) | 367 | if (!fi->fcp_buffer) |
368 | continue; | 368 | continue; |
369 | 369 | ||
370 | req = __alloc_pending_request(SLAB_ATOMIC); | 370 | req = __alloc_pending_request(GFP_ATOMIC); |
371 | if (!req) | 371 | if (!req) |
372 | break; | 372 | break; |
373 | 373 | ||
374 | if (!ibs) { | 374 | if (!ibs) { |
375 | ibs = kmalloc(sizeof(*ibs) + length, | 375 | ibs = kmalloc(sizeof(*ibs) + length, |
376 | SLAB_ATOMIC); | 376 | GFP_ATOMIC); |
377 | if (!ibs) { | 377 | if (!ibs) { |
378 | kfree(req); | 378 | kfree(req); |
379 | break; | 379 | break; |
@@ -593,7 +593,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) | |||
593 | switch (req->req.type) { | 593 | switch (req->req.type) { |
594 | case RAW1394_REQ_LIST_CARDS: | 594 | case RAW1394_REQ_LIST_CARDS: |
595 | spin_lock_irqsave(&host_info_lock, flags); | 595 | spin_lock_irqsave(&host_info_lock, flags); |
596 | khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC); | 596 | khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC); |
597 | 597 | ||
598 | if (khl) { | 598 | if (khl) { |
599 | req->req.misc = host_count; | 599 | req->req.misc = host_count; |
@@ -1045,7 +1045,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, | |||
1045 | } | 1045 | } |
1046 | if (arm_addr->notification_options & ARM_READ) { | 1046 | if (arm_addr->notification_options & ARM_READ) { |
1047 | DBGMSG("arm_read -> entering notification-section"); | 1047 | DBGMSG("arm_read -> entering notification-section"); |
1048 | req = __alloc_pending_request(SLAB_ATOMIC); | 1048 | req = __alloc_pending_request(GFP_ATOMIC); |
1049 | if (!req) { | 1049 | if (!req) { |
1050 | DBGMSG("arm_read -> rcode_conflict_error"); | 1050 | DBGMSG("arm_read -> rcode_conflict_error"); |
1051 | spin_unlock_irqrestore(&host_info_lock, irqflags); | 1051 | spin_unlock_irqrestore(&host_info_lock, irqflags); |
@@ -1064,7 +1064,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, | |||
1064 | sizeof(struct arm_response) + | 1064 | sizeof(struct arm_response) + |
1065 | sizeof(struct arm_request_response); | 1065 | sizeof(struct arm_request_response); |
1066 | } | 1066 | } |
1067 | req->data = kmalloc(size, SLAB_ATOMIC); | 1067 | req->data = kmalloc(size, GFP_ATOMIC); |
1068 | if (!(req->data)) { | 1068 | if (!(req->data)) { |
1069 | free_pending_request(req); | 1069 | free_pending_request(req); |
1070 | DBGMSG("arm_read -> rcode_conflict_error"); | 1070 | DBGMSG("arm_read -> rcode_conflict_error"); |
@@ -1198,7 +1198,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, | |||
1198 | } | 1198 | } |
1199 | if (arm_addr->notification_options & ARM_WRITE) { | 1199 | if (arm_addr->notification_options & ARM_WRITE) { |
1200 | DBGMSG("arm_write -> entering notification-section"); | 1200 | DBGMSG("arm_write -> entering notification-section"); |
1201 | req = __alloc_pending_request(SLAB_ATOMIC); | 1201 | req = __alloc_pending_request(GFP_ATOMIC); |
1202 | if (!req) { | 1202 | if (!req) { |
1203 | DBGMSG("arm_write -> rcode_conflict_error"); | 1203 | DBGMSG("arm_write -> rcode_conflict_error"); |
1204 | spin_unlock_irqrestore(&host_info_lock, irqflags); | 1204 | spin_unlock_irqrestore(&host_info_lock, irqflags); |
@@ -1209,7 +1209,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, | |||
1209 | sizeof(struct arm_request) + sizeof(struct arm_response) + | 1209 | sizeof(struct arm_request) + sizeof(struct arm_response) + |
1210 | (length) * sizeof(byte_t) + | 1210 | (length) * sizeof(byte_t) + |
1211 | sizeof(struct arm_request_response); | 1211 | sizeof(struct arm_request_response); |
1212 | req->data = kmalloc(size, SLAB_ATOMIC); | 1212 | req->data = kmalloc(size, GFP_ATOMIC); |
1213 | if (!(req->data)) { | 1213 | if (!(req->data)) { |
1214 | free_pending_request(req); | 1214 | free_pending_request(req); |
1215 | DBGMSG("arm_write -> rcode_conflict_error"); | 1215 | DBGMSG("arm_write -> rcode_conflict_error"); |
@@ -1400,7 +1400,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, | |||
1400 | if (arm_addr->notification_options & ARM_LOCK) { | 1400 | if (arm_addr->notification_options & ARM_LOCK) { |
1401 | byte_t *buf1, *buf2; | 1401 | byte_t *buf1, *buf2; |
1402 | DBGMSG("arm_lock -> entering notification-section"); | 1402 | DBGMSG("arm_lock -> entering notification-section"); |
1403 | req = __alloc_pending_request(SLAB_ATOMIC); | 1403 | req = __alloc_pending_request(GFP_ATOMIC); |
1404 | if (!req) { | 1404 | if (!req) { |
1405 | DBGMSG("arm_lock -> rcode_conflict_error"); | 1405 | DBGMSG("arm_lock -> rcode_conflict_error"); |
1406 | spin_unlock_irqrestore(&host_info_lock, irqflags); | 1406 | spin_unlock_irqrestore(&host_info_lock, irqflags); |
@@ -1408,7 +1408,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, | |||
1408 | The request may be retried */ | 1408 | The request may be retried */ |
1409 | } | 1409 | } |
1410 | size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ | 1410 | size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ |
1411 | req->data = kmalloc(size, SLAB_ATOMIC); | 1411 | req->data = kmalloc(size, GFP_ATOMIC); |
1412 | if (!(req->data)) { | 1412 | if (!(req->data)) { |
1413 | free_pending_request(req); | 1413 | free_pending_request(req); |
1414 | DBGMSG("arm_lock -> rcode_conflict_error"); | 1414 | DBGMSG("arm_lock -> rcode_conflict_error"); |
@@ -1628,7 +1628,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, | |||
1628 | if (arm_addr->notification_options & ARM_LOCK) { | 1628 | if (arm_addr->notification_options & ARM_LOCK) { |
1629 | byte_t *buf1, *buf2; | 1629 | byte_t *buf1, *buf2; |
1630 | DBGMSG("arm_lock64 -> entering notification-section"); | 1630 | DBGMSG("arm_lock64 -> entering notification-section"); |
1631 | req = __alloc_pending_request(SLAB_ATOMIC); | 1631 | req = __alloc_pending_request(GFP_ATOMIC); |
1632 | if (!req) { | 1632 | if (!req) { |
1633 | spin_unlock_irqrestore(&host_info_lock, irqflags); | 1633 | spin_unlock_irqrestore(&host_info_lock, irqflags); |
1634 | DBGMSG("arm_lock64 -> rcode_conflict_error"); | 1634 | DBGMSG("arm_lock64 -> rcode_conflict_error"); |
@@ -1636,7 +1636,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, | |||
1636 | The request may be retried */ | 1636 | The request may be retried */ |
1637 | } | 1637 | } |
1638 | size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ | 1638 | size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ |
1639 | req->data = kmalloc(size, SLAB_ATOMIC); | 1639 | req->data = kmalloc(size, GFP_ATOMIC); |
1640 | if (!(req->data)) { | 1640 | if (!(req->data)) { |
1641 | free_pending_request(req); | 1641 | free_pending_request(req); |
1642 | spin_unlock_irqrestore(&host_info_lock, irqflags); | 1642 | spin_unlock_irqrestore(&host_info_lock, irqflags); |
@@ -2443,7 +2443,7 @@ static void queue_rawiso_event(struct file_info *fi) | |||
2443 | /* only one ISO activity event may be in the queue */ | 2443 | /* only one ISO activity event may be in the queue */ |
2444 | if (!__rawiso_event_in_queue(fi)) { | 2444 | if (!__rawiso_event_in_queue(fi)) { |
2445 | struct pending_request *req = | 2445 | struct pending_request *req = |
2446 | __alloc_pending_request(SLAB_ATOMIC); | 2446 | __alloc_pending_request(GFP_ATOMIC); |
2447 | 2447 | ||
2448 | if (req) { | 2448 | if (req) { |
2449 | req->file_info = fi; | 2449 | req->file_info = fi; |