aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2008-12-04 01:16:36 -0500
committerJames Morris <jmorris@namei.org>2008-12-04 01:16:36 -0500
commitec98ce480ada787f2cfbd696980ff3564415505b (patch)
tree1a4d644b38f9f1e4b4e086fde0b195df4a92cf84 /lib
parent3496f92beb9aa99ef21fccc154a36c7698e9c538 (diff)
parentfeaf3848a813a106f163013af6fcf6c4bfec92d9 (diff)
Merge branch 'master' into next
Conflicts: fs/nfsd/nfs4recover.c Manually fixed above to use new creds API functions, e.g. nfs4_save_creds(). Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c14
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/swiotlb.c10
3 files changed, 20 insertions, 6 deletions
diff --git a/lib/idr.c b/lib/idr.c
index e728c7fccc4d..7a785a0c2ea0 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
185 new = get_from_free_list(idp); 185 new = get_from_free_list(idp);
186 if (!new) 186 if (!new)
187 return -1; 187 return -1;
188 new->layer = l-1;
188 rcu_assign_pointer(p->ary[m], new); 189 rcu_assign_pointer(p->ary[m], new);
189 p->count++; 190 p->count++;
190 } 191 }
@@ -210,6 +211,7 @@ build_up:
210 if (unlikely(!p)) { 211 if (unlikely(!p)) {
211 if (!(p = get_from_free_list(idp))) 212 if (!(p = get_from_free_list(idp)))
212 return -1; 213 return -1;
214 p->layer = 0;
213 layers = 1; 215 layers = 1;
214 } 216 }
215 /* 217 /*
@@ -237,6 +239,7 @@ build_up:
237 } 239 }
238 new->ary[0] = p; 240 new->ary[0] = p;
239 new->count = 1; 241 new->count = 1;
242 new->layer = layers-1;
240 if (p->bitmap == IDR_FULL) 243 if (p->bitmap == IDR_FULL)
241 __set_bit(0, &new->bitmap); 244 __set_bit(0, &new->bitmap);
242 p = new; 245 p = new;
@@ -493,17 +496,21 @@ void *idr_find(struct idr *idp, int id)
493 int n; 496 int n;
494 struct idr_layer *p; 497 struct idr_layer *p;
495 498
496 n = idp->layers * IDR_BITS;
497 p = rcu_dereference(idp->top); 499 p = rcu_dereference(idp->top);
500 if (!p)
501 return NULL;
502 n = (p->layer+1) * IDR_BITS;
498 503
499 /* Mask off upper bits we don't use for the search. */ 504 /* Mask off upper bits we don't use for the search. */
500 id &= MAX_ID_MASK; 505 id &= MAX_ID_MASK;
501 506
502 if (id >= (1 << n)) 507 if (id >= (1 << n))
503 return NULL; 508 return NULL;
509 BUG_ON(n == 0);
504 510
505 while (n > 0 && p) { 511 while (n > 0 && p) {
506 n -= IDR_BITS; 512 n -= IDR_BITS;
513 BUG_ON(n != p->layer*IDR_BITS);
507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 514 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
508 } 515 }
509 return((void *)p); 516 return((void *)p);
@@ -582,8 +589,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
582 int n; 589 int n;
583 struct idr_layer *p, *old_p; 590 struct idr_layer *p, *old_p;
584 591
585 n = idp->layers * IDR_BITS;
586 p = idp->top; 592 p = idp->top;
593 if (!p)
594 return ERR_PTR(-EINVAL);
595
596 n = (p->layer+1) * IDR_BITS;
587 597
588 id &= MAX_ID_MASK; 598 id &= MAX_ID_MASK;
589 599
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 8d2688ff1352..b7b449dafbe5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
395 WARN_ON(!irqs_disabled()); 395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else 397 } else
398 kunmap(miter->addr); 398 kunmap(miter->page);
399 399
400 miter->page = NULL; 400 miter->page = NULL;
401 miter->addr = NULL; 401 miter->addr = NULL;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 78330c37a61b..5f6c629a924d 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 dma_addr_t dev_addr; 467 dma_addr_t dev_addr;
468 void *ret; 468 void *ret;
469 int order = get_order(size); 469 int order = get_order(size);
470 u64 dma_mask = DMA_32BIT_MASK;
471
472 if (hwdev && hwdev->coherent_dma_mask)
473 dma_mask = hwdev->coherent_dma_mask;
470 474
471 ret = (void *)__get_free_pages(flags, order); 475 ret = (void *)__get_free_pages(flags, order);
472 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { 476 if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
473 /* 477 /*
474 * The allocated memory isn't reachable by the device. 478 * The allocated memory isn't reachable by the device.
475 * Fall back on swiotlb_map_single(). 479 * Fall back on swiotlb_map_single().
@@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
493 dev_addr = virt_to_bus(ret); 497 dev_addr = virt_to_bus(ret);
494 498
495 /* Confirm address can be DMA'd by device */ 499 /* Confirm address can be DMA'd by device */
496 if (address_needs_mapping(hwdev, dev_addr, size)) { 500 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 501 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
498 (unsigned long long)*hwdev->dma_mask, 502 (unsigned long long)dma_mask,
499 (unsigned long long)dev_addr); 503 (unsigned long long)dev_addr);
500 504
501 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 505 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */