aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/devres.c25
-rw-r--r--lib/iommu-helper.c10
-rw-r--r--lib/kobject.c11
-rw-r--r--lib/swiotlb.c30
4 files changed, 52 insertions, 24 deletions
diff --git a/lib/devres.c b/lib/devres.c
index b1d336ce7f3d..edc27a5d1b73 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -298,6 +298,31 @@ int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
298EXPORT_SYMBOL(pcim_iomap_regions); 298EXPORT_SYMBOL(pcim_iomap_regions);
299 299
300/** 300/**
301 * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
302 * @pdev: PCI device to map IO resources for
303 * @mask: Mask of BARs to iomap
304 * @name: Name used when requesting regions
305 *
306 * Request all PCI BARs and iomap regions specified by @mask.
307 */
308int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask,
309 const char *name)
310{
311 int request_mask = ((1 << 6) - 1) & ~mask;
312 int rc;
313
314 rc = pci_request_selected_regions(pdev, request_mask, name);
315 if (rc)
316 return rc;
317
318 rc = pcim_iomap_regions(pdev, mask, name);
319 if (rc)
320 pci_release_selected_regions(pdev, request_mask);
321 return rc;
322}
323EXPORT_SYMBOL(pcim_iomap_regions_request_all);
324
325/**
301 * pcim_iounmap_regions - Unmap and release PCI BARs 326 * pcim_iounmap_regions - Unmap and release PCI BARs
302 * @pdev: PCI device to map IO resources for 327 * @pdev: PCI device to map IO resources for
303 * @mask: Mask of BARs to unmap and release 328 * @mask: Mask of BARs to unmap and release
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 495575a59ca6..a3b8d4c3f77a 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -40,10 +40,12 @@ static inline void set_bit_area(unsigned long *map, unsigned long i,
40 } 40 }
41} 41}
42 42
43static inline int is_span_boundary(unsigned int index, unsigned int nr, 43int iommu_is_span_boundary(unsigned int index, unsigned int nr,
44 unsigned long shift, 44 unsigned long shift,
45 unsigned long boundary_size) 45 unsigned long boundary_size)
46{ 46{
47 BUG_ON(!is_power_of_2(boundary_size));
48
47 shift = (shift + index) & (boundary_size - 1); 49 shift = (shift + index) & (boundary_size - 1);
48 return shift + nr > boundary_size; 50 return shift + nr > boundary_size;
49} 51}
@@ -57,7 +59,7 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
57again: 59again:
58 index = find_next_zero_area(map, size, start, nr, align_mask); 60 index = find_next_zero_area(map, size, start, nr, align_mask);
59 if (index != -1) { 61 if (index != -1) {
60 if (is_span_boundary(index, nr, shift, boundary_size)) { 62 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
61 /* we could do more effectively */ 63 /* we could do more effectively */
62 start = index + 1; 64 start = index + 1;
63 goto again; 65 goto again;
diff --git a/lib/kobject.c b/lib/kobject.c
index d784daeb8571..0d03252f87a8 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -153,6 +153,10 @@ static void kobject_init_internal(struct kobject *kobj)
153 return; 153 return;
154 kref_init(&kobj->kref); 154 kref_init(&kobj->kref);
155 INIT_LIST_HEAD(&kobj->entry); 155 INIT_LIST_HEAD(&kobj->entry);
156 kobj->state_in_sysfs = 0;
157 kobj->state_add_uevent_sent = 0;
158 kobj->state_remove_uevent_sent = 0;
159 kobj->state_initialized = 1;
156} 160}
157 161
158 162
@@ -289,13 +293,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
289 dump_stack(); 293 dump_stack();
290 } 294 }
291 295
292 kref_init(&kobj->kref); 296 kobject_init_internal(kobj);
293 INIT_LIST_HEAD(&kobj->entry);
294 kobj->ktype = ktype; 297 kobj->ktype = ktype;
295 kobj->state_in_sysfs = 0;
296 kobj->state_add_uevent_sent = 0;
297 kobj->state_remove_uevent_sent = 0;
298 kobj->state_initialized = 1;
299 return; 298 return;
300 299
301error: 300error:
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4bb5a11e18a2..025922807e6e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -310,7 +310,9 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
310 start_dma_addr = virt_to_bus(io_tlb_start) & mask; 310 start_dma_addr = virt_to_bus(io_tlb_start) & mask;
311 311
312 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 312 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
313 max_slots = ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 313 max_slots = mask + 1
314 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
315 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
314 316
315 /* 317 /*
316 * For mappings greater than a page, we limit the stride (and 318 * For mappings greater than a page, we limit the stride (and
@@ -333,16 +335,18 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
333 index = ALIGN(io_tlb_index, stride); 335 index = ALIGN(io_tlb_index, stride);
334 if (index >= io_tlb_nslabs) 336 if (index >= io_tlb_nslabs)
335 index = 0; 337 index = 0;
336
337 while (is_span_boundary(index, nslots, offset_slots,
338 max_slots)) {
339 index += stride;
340 if (index >= io_tlb_nslabs)
341 index = 0;
342 }
343 wrap = index; 338 wrap = index;
344 339
345 do { 340 do {
341 while (is_span_boundary(index, nslots, offset_slots,
342 max_slots)) {
343 index += stride;
344 if (index >= io_tlb_nslabs)
345 index = 0;
346 if (index == wrap)
347 goto not_found;
348 }
349
346 /* 350 /*
347 * If we find a slot that indicates we have 'nslots' 351 * If we find a slot that indicates we have 'nslots'
348 * number of contiguous buffers, we allocate the 352 * number of contiguous buffers, we allocate the
@@ -367,14 +371,12 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
367 371
368 goto found; 372 goto found;
369 } 373 }
370 do { 374 index += stride;
371 index += stride; 375 if (index >= io_tlb_nslabs)
372 if (index >= io_tlb_nslabs) 376 index = 0;
373 index = 0;
374 } while (is_span_boundary(index, nslots, offset_slots,
375 max_slots));
376 } while (index != wrap); 377 } while (index != wrap);
377 378
379 not_found:
378 spin_unlock_irqrestore(&io_tlb_lock, flags); 380 spin_unlock_irqrestore(&io_tlb_lock, flags);
379 return NULL; 381 return NULL;
380 } 382 }