1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* Copyright (c) 2006, Intel Corporation.
|
3 |
|
|
*
|
4 |
|
|
* This file is released under the GPLv2.
|
5 |
|
|
*
|
6 |
|
|
* Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
7 |
|
|
*/
|
8 |
|
|
|
9 |
|
|
#include "iova.h"
|
10 |
|
|
|
11 |
|
|
void
|
12 |
|
|
init_iova_domain(struct iova_domain *iovad)
|
13 |
|
|
{
|
14 |
|
|
spin_lock_init(&iovad->iova_alloc_lock);
|
15 |
|
|
spin_lock_init(&iovad->iova_rbtree_lock);
|
16 |
|
|
iovad->rbroot = RB_ROOT;
|
17 |
|
|
iovad->cached32_node = NULL;
|
18 |
|
|
|
19 |
|
|
}
|
20 |
|
|
|
21 |
|
|
static struct rb_node *
|
22 |
|
|
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
|
23 |
|
|
{
|
24 |
|
|
if ((*limit_pfn != DMA_32BIT_PFN) ||
|
25 |
|
|
(iovad->cached32_node == NULL))
|
26 |
|
|
return rb_last(&iovad->rbroot);
|
27 |
|
|
else {
|
28 |
|
|
struct rb_node *prev_node = rb_prev(iovad->cached32_node);
|
29 |
|
|
struct iova *curr_iova =
|
30 |
|
|
container_of(iovad->cached32_node, struct iova, node);
|
31 |
|
|
*limit_pfn = curr_iova->pfn_lo - 1;
|
32 |
|
|
return prev_node;
|
33 |
|
|
}
|
34 |
|
|
}
|
35 |
|
|
|
36 |
|
|
static void
|
37 |
|
|
__cached_rbnode_insert_update(struct iova_domain *iovad,
|
38 |
|
|
unsigned long limit_pfn, struct iova *new)
|
39 |
|
|
{
|
40 |
|
|
if (limit_pfn != DMA_32BIT_PFN)
|
41 |
|
|
return;
|
42 |
|
|
iovad->cached32_node = &new->node;
|
43 |
|
|
}
|
44 |
|
|
|
45 |
|
|
static void
|
46 |
|
|
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
|
47 |
|
|
{
|
48 |
|
|
struct iova *cached_iova;
|
49 |
|
|
struct rb_node *curr;
|
50 |
|
|
|
51 |
|
|
if (!iovad->cached32_node)
|
52 |
|
|
return;
|
53 |
|
|
curr = iovad->cached32_node;
|
54 |
|
|
cached_iova = container_of(curr, struct iova, node);
|
55 |
|
|
|
56 |
|
|
if (free->pfn_lo >= cached_iova->pfn_lo)
|
57 |
|
|
iovad->cached32_node = rb_next(&free->node);
|
58 |
|
|
}
|
59 |
|
|
|
60 |
|
|
/* Computes the padding size required, to make the
|
61 |
|
|
* the start address naturally aligned on its size
|
62 |
|
|
*/
|
63 |
|
|
static int
|
64 |
|
|
iova_get_pad_size(int size, unsigned int limit_pfn)
|
65 |
|
|
{
|
66 |
|
|
unsigned int pad_size = 0;
|
67 |
|
|
unsigned int order = ilog2(size);
|
68 |
|
|
|
69 |
|
|
if (order)
|
70 |
|
|
pad_size = (limit_pfn + 1) % (1 << order);
|
71 |
|
|
|
72 |
|
|
return pad_size;
|
73 |
|
|
}
|
74 |
|
|
|
75 |
|
|
static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
|
76 |
|
|
unsigned long limit_pfn, struct iova *new, bool size_aligned)
|
77 |
|
|
{
|
78 |
|
|
struct rb_node *curr = NULL;
|
79 |
|
|
unsigned long flags;
|
80 |
|
|
unsigned long saved_pfn;
|
81 |
|
|
unsigned int pad_size = 0;
|
82 |
|
|
|
83 |
|
|
/* Walk the tree backwards */
|
84 |
|
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
85 |
|
|
saved_pfn = limit_pfn;
|
86 |
|
|
curr = __get_cached_rbnode(iovad, &limit_pfn);
|
87 |
|
|
while (curr) {
|
88 |
|
|
struct iova *curr_iova = container_of(curr, struct iova, node);
|
89 |
|
|
if (limit_pfn < curr_iova->pfn_lo)
|
90 |
|
|
goto move_left;
|
91 |
|
|
else if (limit_pfn < curr_iova->pfn_hi)
|
92 |
|
|
goto adjust_limit_pfn;
|
93 |
|
|
else {
|
94 |
|
|
if (size_aligned)
|
95 |
|
|
pad_size = iova_get_pad_size(size, limit_pfn);
|
96 |
|
|
if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
|
97 |
|
|
break; /* found a free slot */
|
98 |
|
|
}
|
99 |
|
|
adjust_limit_pfn:
|
100 |
|
|
limit_pfn = curr_iova->pfn_lo - 1;
|
101 |
|
|
move_left:
|
102 |
|
|
curr = rb_prev(curr);
|
103 |
|
|
}
|
104 |
|
|
|
105 |
|
|
if (!curr) {
|
106 |
|
|
if (size_aligned)
|
107 |
|
|
pad_size = iova_get_pad_size(size, limit_pfn);
|
108 |
|
|
if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
|
109 |
|
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
110 |
|
|
return -ENOMEM;
|
111 |
|
|
}
|
112 |
|
|
}
|
113 |
|
|
|
114 |
|
|
/* pfn_lo will point to size aligned address if size_aligned is set */
|
115 |
|
|
new->pfn_lo = limit_pfn - (size + pad_size) + 1;
|
116 |
|
|
new->pfn_hi = new->pfn_lo + size - 1;
|
117 |
|
|
|
118 |
|
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
119 |
|
|
return 0;
|
120 |
|
|
}
|
121 |
|
|
|
122 |
|
|
static void
|
123 |
|
|
iova_insert_rbtree(struct rb_root *root, struct iova *iova)
|
124 |
|
|
{
|
125 |
|
|
struct rb_node **new = &(root->rb_node), *parent = NULL;
|
126 |
|
|
/* Figure out where to put new node */
|
127 |
|
|
while (*new) {
|
128 |
|
|
struct iova *this = container_of(*new, struct iova, node);
|
129 |
|
|
parent = *new;
|
130 |
|
|
|
131 |
|
|
if (iova->pfn_lo < this->pfn_lo)
|
132 |
|
|
new = &((*new)->rb_left);
|
133 |
|
|
else if (iova->pfn_lo > this->pfn_lo)
|
134 |
|
|
new = &((*new)->rb_right);
|
135 |
|
|
else
|
136 |
|
|
BUG(); /* this should not happen */
|
137 |
|
|
}
|
138 |
|
|
/* Add new node and rebalance tree. */
|
139 |
|
|
rb_link_node(&iova->node, parent, new);
|
140 |
|
|
rb_insert_color(&iova->node, root);
|
141 |
|
|
}
|
142 |
|
|
|
143 |
|
|
/**
|
144 |
|
|
* alloc_iova - allocates an iova
|
145 |
|
|
* @iovad - iova domain in question
|
146 |
|
|
* @size - size of page frames to allocate
|
147 |
|
|
* @limit_pfn - max limit address
|
148 |
|
|
* @size_aligned - set if size_aligned address range is required
|
149 |
|
|
* This function allocates an iova in the range limit_pfn to IOVA_START_PFN
|
150 |
|
|
* looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
|
151 |
|
|
* flag is set then the allocated address iova->pfn_lo will be naturally
|
152 |
|
|
* aligned on roundup_power_of_two(size).
|
153 |
|
|
*/
|
154 |
|
|
struct iova *
|
155 |
|
|
alloc_iova(struct iova_domain *iovad, unsigned long size,
|
156 |
|
|
unsigned long limit_pfn,
|
157 |
|
|
bool size_aligned)
|
158 |
|
|
{
|
159 |
|
|
unsigned long flags;
|
160 |
|
|
struct iova *new_iova;
|
161 |
|
|
int ret;
|
162 |
|
|
|
163 |
|
|
new_iova = alloc_iova_mem();
|
164 |
|
|
if (!new_iova)
|
165 |
|
|
return NULL;
|
166 |
|
|
|
167 |
|
|
/* If size aligned is set then round the size to
|
168 |
|
|
* to next power of two.
|
169 |
|
|
*/
|
170 |
|
|
if (size_aligned)
|
171 |
|
|
size = __roundup_pow_of_two(size);
|
172 |
|
|
|
173 |
|
|
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
|
174 |
|
|
ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
|
175 |
|
|
size_aligned);
|
176 |
|
|
|
177 |
|
|
if (ret) {
|
178 |
|
|
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
|
179 |
|
|
free_iova_mem(new_iova);
|
180 |
|
|
return NULL;
|
181 |
|
|
}
|
182 |
|
|
|
183 |
|
|
/* Insert the new_iova into domain rbtree by holding writer lock */
|
184 |
|
|
spin_lock(&iovad->iova_rbtree_lock);
|
185 |
|
|
iova_insert_rbtree(&iovad->rbroot, new_iova);
|
186 |
|
|
__cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
|
187 |
|
|
spin_unlock(&iovad->iova_rbtree_lock);
|
188 |
|
|
|
189 |
|
|
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
|
190 |
|
|
|
191 |
|
|
return new_iova;
|
192 |
|
|
}
|
193 |
|
|
|
194 |
|
|
/**
|
195 |
|
|
* find_iova - find's an iova for a given pfn
|
196 |
|
|
* @iovad - iova domain in question.
|
197 |
|
|
* pfn - page frame number
|
198 |
|
|
* This function finds and returns an iova belonging to the
|
199 |
|
|
* given doamin which matches the given pfn.
|
200 |
|
|
*/
|
201 |
|
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
|
202 |
|
|
{
|
203 |
|
|
unsigned long flags;
|
204 |
|
|
struct rb_node *node;
|
205 |
|
|
|
206 |
|
|
/* Take the lock so that no other thread is manipulating the rbtree */
|
207 |
|
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
208 |
|
|
node = iovad->rbroot.rb_node;
|
209 |
|
|
while (node) {
|
210 |
|
|
struct iova *iova = container_of(node, struct iova, node);
|
211 |
|
|
|
212 |
|
|
/* If pfn falls within iova's range, return iova */
|
213 |
|
|
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
|
214 |
|
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
215 |
|
|
/* We are not holding the lock while this iova
|
216 |
|
|
* is referenced by the caller as the same thread
|
217 |
|
|
* which called this function also calls __free_iova()
|
218 |
|
|
* and it is by desing that only one thread can possibly
|
219 |
|
|
* reference a particular iova and hence no conflict.
|
220 |
|
|
*/
|
221 |
|
|
return iova;
|
222 |
|
|
}
|
223 |
|
|
|
224 |
|
|
if (pfn < iova->pfn_lo)
|
225 |
|
|
node = node->rb_left;
|
226 |
|
|
else if (pfn > iova->pfn_lo)
|
227 |
|
|
node = node->rb_right;
|
228 |
|
|
}
|
229 |
|
|
|
230 |
|
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
231 |
|
|
return NULL;
|
232 |
|
|
}
|
233 |
|
|
|
234 |
|
|
/**
|
235 |
|
|
* __free_iova - frees the given iova
|
236 |
|
|
* @iovad: iova domain in question.
|
237 |
|
|
* @iova: iova in question.
|
238 |
|
|
* Frees the given iova belonging to the giving domain
|
239 |
|
|
*/
|
240 |
|
|
void
|
241 |
|
|
__free_iova(struct iova_domain *iovad, struct iova *iova)
|
242 |
|
|
{
|
243 |
|
|
unsigned long flags;
|
244 |
|
|
|
245 |
|
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
246 |
|
|
__cached_rbnode_delete_update(iovad, iova);
|
247 |
|
|
rb_erase(&iova->node, &iovad->rbroot);
|
248 |
|
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
249 |
|
|
free_iova_mem(iova);
|
250 |
|
|
}
|
251 |
|
|
|
252 |
|
|
/**
|
253 |
|
|
* free_iova - finds and frees the iova for a given pfn
|
254 |
|
|
* @iovad: - iova domain in question.
|
255 |
|
|
* @pfn: - pfn that is allocated previously
|
256 |
|
|
* This functions finds an iova for a given pfn and then
|
257 |
|
|
* frees the iova from that domain.
|
258 |
|
|
*/
|
259 |
|
|
void
|
260 |
|
|
free_iova(struct iova_domain *iovad, unsigned long pfn)
|
261 |
|
|
{
|
262 |
|
|
struct iova *iova = find_iova(iovad, pfn);
|
263 |
|
|
if (iova)
|
264 |
|
|
__free_iova(iovad, iova);
|
265 |
|
|
|
266 |
|
|
}
|
267 |
|
|
|
268 |
|
|
/**
|
269 |
|
|
* put_iova_domain - destroys the iova doamin
|
270 |
|
|
* @iovad: - iova domain in question.
|
271 |
|
|
* All the iova's in that domain are destroyed.
|
272 |
|
|
*/
|
273 |
|
|
void put_iova_domain(struct iova_domain *iovad)
|
274 |
|
|
{
|
275 |
|
|
struct rb_node *node;
|
276 |
|
|
unsigned long flags;
|
277 |
|
|
|
278 |
|
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
279 |
|
|
node = rb_first(&iovad->rbroot);
|
280 |
|
|
while (node) {
|
281 |
|
|
struct iova *iova = container_of(node, struct iova, node);
|
282 |
|
|
rb_erase(node, &iovad->rbroot);
|
283 |
|
|
free_iova_mem(iova);
|
284 |
|
|
node = rb_first(&iovad->rbroot);
|
285 |
|
|
}
|
286 |
|
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
287 |
|
|
}
|
288 |
|
|
|
289 |
|
|
static int
|
290 |
|
|
__is_range_overlap(struct rb_node *node,
|
291 |
|
|
unsigned long pfn_lo, unsigned long pfn_hi)
|
292 |
|
|
{
|
293 |
|
|
struct iova *iova = container_of(node, struct iova, node);
|
294 |
|
|
|
295 |
|
|
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
|
296 |
|
|
return 1;
|
297 |
|
|
return 0;
|
298 |
|
|
}
|
299 |
|
|
|
300 |
|
|
static struct iova *
|
301 |
|
|
__insert_new_range(struct iova_domain *iovad,
|
302 |
|
|
unsigned long pfn_lo, unsigned long pfn_hi)
|
303 |
|
|
{
|
304 |
|
|
struct iova *iova;
|
305 |
|
|
|
306 |
|
|
iova = alloc_iova_mem();
|
307 |
|
|
if (!iova)
|
308 |
|
|
return iova;
|
309 |
|
|
|
310 |
|
|
iova->pfn_hi = pfn_hi;
|
311 |
|
|
iova->pfn_lo = pfn_lo;
|
312 |
|
|
iova_insert_rbtree(&iovad->rbroot, iova);
|
313 |
|
|
return iova;
|
314 |
|
|
}
|
315 |
|
|
|
316 |
|
|
static void
|
317 |
|
|
__adjust_overlap_range(struct iova *iova,
|
318 |
|
|
unsigned long *pfn_lo, unsigned long *pfn_hi)
|
319 |
|
|
{
|
320 |
|
|
if (*pfn_lo < iova->pfn_lo)
|
321 |
|
|
iova->pfn_lo = *pfn_lo;
|
322 |
|
|
if (*pfn_hi > iova->pfn_hi)
|
323 |
|
|
*pfn_lo = iova->pfn_hi + 1;
|
324 |
|
|
}
|
325 |
|
|
|
326 |
|
|
/**
|
327 |
|
|
* reserve_iova - reserves an iova in the given range
|
328 |
|
|
* @iovad: - iova domain pointer
|
329 |
|
|
* @pfn_lo: - lower page frame address
|
330 |
|
|
* @pfn_hi:- higher pfn adderss
|
331 |
|
|
* This function allocates reserves the address range from pfn_lo to pfn_hi so
|
332 |
|
|
* that this address is not dished out as part of alloc_iova.
|
333 |
|
|
*/
|
334 |
|
|
struct iova *
|
335 |
|
|
reserve_iova(struct iova_domain *iovad,
|
336 |
|
|
unsigned long pfn_lo, unsigned long pfn_hi)
|
337 |
|
|
{
|
338 |
|
|
struct rb_node *node;
|
339 |
|
|
unsigned long flags;
|
340 |
|
|
struct iova *iova;
|
341 |
|
|
unsigned int overlap = 0;
|
342 |
|
|
|
343 |
|
|
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
|
344 |
|
|
spin_lock(&iovad->iova_rbtree_lock);
|
345 |
|
|
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
|
346 |
|
|
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
|
347 |
|
|
iova = container_of(node, struct iova, node);
|
348 |
|
|
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
|
349 |
|
|
if ((pfn_lo >= iova->pfn_lo) &&
|
350 |
|
|
(pfn_hi <= iova->pfn_hi))
|
351 |
|
|
goto finish;
|
352 |
|
|
overlap = 1;
|
353 |
|
|
|
354 |
|
|
} else if (overlap)
|
355 |
|
|
break;
|
356 |
|
|
}
|
357 |
|
|
|
358 |
|
|
/* We are here either becasue this is the first reserver node
|
359 |
|
|
* or need to insert remaining non overlap addr range
|
360 |
|
|
*/
|
361 |
|
|
iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
|
362 |
|
|
finish:
|
363 |
|
|
|
364 |
|
|
spin_unlock(&iovad->iova_rbtree_lock);
|
365 |
|
|
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
|
366 |
|
|
return iova;
|
367 |
|
|
}
|
368 |
|
|
|
369 |
|
|
/**
|
370 |
|
|
* copy_reserved_iova - copies the reserved between domains
|
371 |
|
|
* @from: - source doamin from where to copy
|
372 |
|
|
* @to: - destination domin where to copy
|
373 |
|
|
* This function copies reserved iova's from one doamin to
|
374 |
|
|
* other.
|
375 |
|
|
*/
|
376 |
|
|
void
|
377 |
|
|
copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
|
378 |
|
|
{
|
379 |
|
|
unsigned long flags;
|
380 |
|
|
struct rb_node *node;
|
381 |
|
|
|
382 |
|
|
spin_lock_irqsave(&from->iova_alloc_lock, flags);
|
383 |
|
|
spin_lock(&from->iova_rbtree_lock);
|
384 |
|
|
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
|
385 |
|
|
struct iova *iova = container_of(node, struct iova, node);
|
386 |
|
|
struct iova *new_iova;
|
387 |
|
|
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
|
388 |
|
|
if (!new_iova)
|
389 |
|
|
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
|
390 |
|
|
iova->pfn_lo, iova->pfn_lo);
|
391 |
|
|
}
|
392 |
|
|
spin_unlock(&from->iova_rbtree_lock);
|
393 |
|
|
spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
|
394 |
|
|
}
|