aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/iova.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r--drivers/pci/iova.c63
1 files changed, 50 insertions, 13 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 717fafaa7e02..a84571c29360 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -57,12 +57,28 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
57 iovad->cached32_node = rb_next(&free->node); 57 iovad->cached32_node = rb_next(&free->node);
58} 58}
59 59
60static int __alloc_iova_range(struct iova_domain *iovad, 60/* Computes the padding size required, to make the
61 unsigned long size, unsigned long limit_pfn, struct iova *new) 61 * the start address naturally aligned on its size
62 */
63static int
64iova_get_pad_size(int size, unsigned int limit_pfn)
65{
66 unsigned int pad_size = 0;
67 unsigned int order = ilog2(size);
68
69 if (order)
70 pad_size = (limit_pfn + 1) % (1 << order);
71
72 return pad_size;
73}
74
75static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
76 unsigned long limit_pfn, struct iova *new, bool size_aligned)
62{ 77{
63 struct rb_node *curr = NULL; 78 struct rb_node *curr = NULL;
64 unsigned long flags; 79 unsigned long flags;
65 unsigned long saved_pfn; 80 unsigned long saved_pfn;
81 unsigned int pad_size = 0;
66 82
67 /* Walk the tree backwards */ 83 /* Walk the tree backwards */
68 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 84 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
@@ -72,22 +88,32 @@ static int __alloc_iova_range(struct iova_domain *iovad,
72 struct iova *curr_iova = container_of(curr, struct iova, node); 88 struct iova *curr_iova = container_of(curr, struct iova, node);
73 if (limit_pfn < curr_iova->pfn_lo) 89 if (limit_pfn < curr_iova->pfn_lo)
74 goto move_left; 90 goto move_left;
75 if (limit_pfn < curr_iova->pfn_hi) 91 else if (limit_pfn < curr_iova->pfn_hi)
76 goto adjust_limit_pfn; 92 goto adjust_limit_pfn;
77 if ((curr_iova->pfn_hi + size) <= limit_pfn) 93 else {
78 break; /* found a free slot */ 94 if (size_aligned)
95 pad_size = iova_get_pad_size(size, limit_pfn);
96 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
97 break; /* found a free slot */
98 }
79adjust_limit_pfn: 99adjust_limit_pfn:
80 limit_pfn = curr_iova->pfn_lo - 1; 100 limit_pfn = curr_iova->pfn_lo - 1;
81move_left: 101move_left:
82 curr = rb_prev(curr); 102 curr = rb_prev(curr);
83 } 103 }
84 104
85 if ((!curr) && !(IOVA_START_PFN + size <= limit_pfn)) { 105 if (!curr) {
86 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 106 if (size_aligned)
87 return -ENOMEM; 107 pad_size = iova_get_pad_size(size, limit_pfn);
108 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
109 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
110 return -ENOMEM;
111 }
88 } 112 }
89 new->pfn_hi = limit_pfn; 113
90 new->pfn_lo = limit_pfn - size + 1; 114 /* pfn_lo will point to size aligned address if size_aligned is set */
115 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
116 new->pfn_hi = new->pfn_lo + size - 1;
91 117
92 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 118 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
93 return 0; 119 return 0;
@@ -119,12 +145,16 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
119 * @iovad - iova domain in question 145 * @iovad - iova domain in question
120 * @size - size of page frames to allocate 146 * @size - size of page frames to allocate
121 * @limit_pfn - max limit address 147 * @limit_pfn - max limit address
148 * @size_aligned - set if size_aligned address range is required
122 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN 149 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
123 * looking from limit_pfn instead from IOVA_START_PFN. 150 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
151 * flag is set then the allocated address iova->pfn_lo will be naturally
152 * aligned on roundup_power_of_two(size).
124 */ 153 */
125struct iova * 154struct iova *
126alloc_iova(struct iova_domain *iovad, unsigned long size, 155alloc_iova(struct iova_domain *iovad, unsigned long size,
127 unsigned long limit_pfn) 156 unsigned long limit_pfn,
157 bool size_aligned)
128{ 158{
129 unsigned long flags; 159 unsigned long flags;
130 struct iova *new_iova; 160 struct iova *new_iova;
@@ -134,8 +164,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
134 if (!new_iova) 164 if (!new_iova)
135 return NULL; 165 return NULL;
136 166
167 /* If size aligned is set then round the size to
168 * to next power of two.
169 */
170 if (size_aligned)
171 size = __roundup_pow_of_two(size);
172
137 spin_lock_irqsave(&iovad->iova_alloc_lock, flags); 173 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
138 ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova); 174 ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
175 size_aligned);
139 176
140 if (ret) { 177 if (ret) {
141 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); 178 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);