aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c79
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c23
3 files changed, 22 insertions, 82 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 7b26f5ff9692..1dd528849755 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -21,8 +21,6 @@
21extern struct workqueue_struct *xfsdatad_workqueue; 21extern struct workqueue_struct *xfsdatad_workqueue;
22extern mempool_t *xfs_ioend_pool; 22extern mempool_t *xfs_ioend_pool;
23 23
24typedef void (*xfs_ioend_func_t)(void *);
25
26/* 24/*
27 * xfs_ioend struct manages large extent writes for XFS. 25 * xfs_ioend struct manages large extent writes for XFS.
28 * It can manage several multi-page bio's at once. 26 * It can manage several multi-page bio's at once.
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index cb329edc925b..d71dc44e21ed 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,75 +166,6 @@ test_page_region(
166} 166}
167 167
168/* 168/*
169 * Mapping of multi-page buffers into contiguous virtual space
170 */
171
172typedef struct a_list {
173 void *vm_addr;
174 struct a_list *next;
175} a_list_t;
176
177static a_list_t *as_free_head;
178static int as_list_len;
179static DEFINE_SPINLOCK(as_lock);
180
181/*
182 * Try to batch vunmaps because they are costly.
183 */
184STATIC void
185free_address(
186 void *addr)
187{
188 a_list_t *aentry;
189
190#ifdef CONFIG_XEN
191 /*
192 * Xen needs to be able to make sure it can get an exclusive
193 * RO mapping of pages it wants to turn into a pagetable. If
194 * a newly allocated page is also still being vmap()ed by xfs,
195 * it will cause pagetable construction to fail. This is a
196 * quick workaround to always eagerly unmap pages so that Xen
197 * is happy.
198 */
199 vunmap(addr);
200 return;
201#endif
202
203 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
204 if (likely(aentry)) {
205 spin_lock(&as_lock);
206 aentry->next = as_free_head;
207 aentry->vm_addr = addr;
208 as_free_head = aentry;
209 as_list_len++;
210 spin_unlock(&as_lock);
211 } else {
212 vunmap(addr);
213 }
214}
215
216STATIC void
217purge_addresses(void)
218{
219 a_list_t *aentry, *old;
220
221 if (as_free_head == NULL)
222 return;
223
224 spin_lock(&as_lock);
225 aentry = as_free_head;
226 as_free_head = NULL;
227 as_list_len = 0;
228 spin_unlock(&as_lock);
229
230 while ((old = aentry) != NULL) {
231 vunmap(aentry->vm_addr);
232 aentry = aentry->next;
233 kfree(old);
234 }
235}
236
237/*
238 * Internal xfs_buf_t object manipulation 169 * Internal xfs_buf_t object manipulation
239 */ 170 */
240 171
@@ -333,7 +264,7 @@ xfs_buf_free(
333 uint i; 264 uint i;
334 265
335 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 266 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
336 free_address(bp->b_addr - bp->b_offset); 267 vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
337 268
338 for (i = 0; i < bp->b_page_count; i++) { 269 for (i = 0; i < bp->b_page_count; i++) {
339 struct page *page = bp->b_pages[i]; 270 struct page *page = bp->b_pages[i];
@@ -455,10 +386,8 @@ _xfs_buf_map_pages(
455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 386 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
456 bp->b_flags |= XBF_MAPPED; 387 bp->b_flags |= XBF_MAPPED;
457 } else if (flags & XBF_MAPPED) { 388 } else if (flags & XBF_MAPPED) {
458 if (as_list_len > 64) 389 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
459 purge_addresses(); 390 -1, PAGE_KERNEL);
460 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
461 VM_MAP, PAGE_KERNEL);
462 if (unlikely(bp->b_addr == NULL)) 391 if (unlikely(bp->b_addr == NULL))
463 return -ENOMEM; 392 return -ENOMEM;
464 bp->b_addr += bp->b_offset; 393 bp->b_addr += bp->b_offset;
@@ -1743,8 +1672,6 @@ xfsbufd(
1743 count++; 1672 count++;
1744 } 1673 }
1745 1674
1746 if (as_list_len > 0)
1747 purge_addresses();
1748 if (count) 1675 if (count)
1749 blk_run_address_space(target->bt_mapping); 1676 blk_run_address_space(target->bt_mapping);
1750 1677
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 595751f78350..87b8cbd23d4b 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -126,11 +126,26 @@ xfs_nfs_get_inode(
126 if (ino == 0) 126 if (ino == 0)
127 return ERR_PTR(-ESTALE); 127 return ERR_PTR(-ESTALE);
128 128
129 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); 129 /*
130 if (error) 130 * The XFS_IGET_BULKSTAT means that an invalid inode number is just
131 * fine and not an indication of a corrupted filesystem. Because
132 * clients can send any kind of invalid file handle, e.g. after
133 * a restore on the server we have to deal with this case gracefully.
134 */
135 error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT,
136 XFS_ILOCK_SHARED, &ip, 0);
137 if (error) {
138 /*
139 * EINVAL means the inode cluster doesn't exist anymore.
140 * This implies the filehandle is stale, so we should
141 * translate it here.
142 * We don't use ESTALE directly down the chain to not
143 * confuse applications using bulkstat that expect EINVAL.
144 */
145 if (error == EINVAL)
146 error = ESTALE;
131 return ERR_PTR(-error); 147 return ERR_PTR(-error);
132 if (!ip) 148 }
133 return ERR_PTR(-EIO);
134 149
135 if (ip->i_d.di_gen != generation) { 150 if (ip->i_d.di_gen != generation) {
136 xfs_iput_new(ip, XFS_ILOCK_SHARED); 151 xfs_iput_new(ip, XFS_ILOCK_SHARED);