diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-09-26 13:13:19 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-26 13:13:19 -0400 |
commit | c226951b93f7cd7c3a10b17384535b617bd43fd0 (patch) | |
tree | 07b8796a5c99fbbf587b8d0dbcbc173cfe5e381e /fs | |
parent | b0df3bd1e553e901ec7297267611a5db88240b38 (diff) | |
parent | e8216dee838c09776680a6f1a2e54d81f3cdfa14 (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'fs')
-rw-r--r-- | fs/autofs4/expire.c | 6 | ||||
-rw-r--r-- | fs/binfmt_elf.c | 10 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/jbd/commit.c | 182 | ||||
-rw-r--r-- | fs/proc/proc_misc.c | 11 |
5 files changed, 132 insertions, 79 deletions
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 8dbd44f10e9d..d96e5c14a9ca 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c | |||
@@ -32,7 +32,7 @@ static inline int autofs4_can_expire(struct dentry *dentry, | |||
32 | 32 | ||
33 | if (!do_now) { | 33 | if (!do_now) { |
34 | /* Too young to die */ | 34 | /* Too young to die */ |
35 | if (time_after(ino->last_used + timeout, now)) | 35 | if (!timeout || time_after(ino->last_used + timeout, now)) |
36 | return 0; | 36 | return 0; |
37 | 37 | ||
38 | /* update last_used here :- | 38 | /* update last_used here :- |
@@ -253,7 +253,7 @@ static struct dentry *autofs4_expire_direct(struct super_block *sb, | |||
253 | struct dentry *root = dget(sb->s_root); | 253 | struct dentry *root = dget(sb->s_root); |
254 | int do_now = how & AUTOFS_EXP_IMMEDIATE; | 254 | int do_now = how & AUTOFS_EXP_IMMEDIATE; |
255 | 255 | ||
256 | if (!sbi->exp_timeout || !root) | 256 | if (!root) |
257 | return NULL; | 257 | return NULL; |
258 | 258 | ||
259 | now = jiffies; | 259 | now = jiffies; |
@@ -293,7 +293,7 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb, | |||
293 | int do_now = how & AUTOFS_EXP_IMMEDIATE; | 293 | int do_now = how & AUTOFS_EXP_IMMEDIATE; |
294 | int exp_leaves = how & AUTOFS_EXP_LEAVES; | 294 | int exp_leaves = how & AUTOFS_EXP_LEAVES; |
295 | 295 | ||
296 | if ( !sbi->exp_timeout || !root ) | 296 | if (!root) |
297 | return NULL; | 297 | return NULL; |
298 | 298 | ||
299 | now = jiffies; | 299 | now = jiffies; |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 672a3b90bc55..64802aabd1ac 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1262,7 +1262,7 @@ static void fill_elf_header(struct elfhdr *elf, int segs) | |||
1262 | return; | 1262 | return; |
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) | 1265 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) |
1266 | { | 1266 | { |
1267 | phdr->p_type = PT_NOTE; | 1267 | phdr->p_type = PT_NOTE; |
1268 | phdr->p_offset = offset; | 1268 | phdr->p_offset = offset; |
@@ -1428,7 +1428,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file) | |||
1428 | int i; | 1428 | int i; |
1429 | struct vm_area_struct *vma; | 1429 | struct vm_area_struct *vma; |
1430 | struct elfhdr *elf = NULL; | 1430 | struct elfhdr *elf = NULL; |
1431 | off_t offset = 0, dataoff; | 1431 | loff_t offset = 0, dataoff; |
1432 | unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; | 1432 | unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; |
1433 | int numnote; | 1433 | int numnote; |
1434 | struct memelfnote *notes = NULL; | 1434 | struct memelfnote *notes = NULL; |
@@ -1661,11 +1661,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file) | |||
1661 | ELF_CORE_WRITE_EXTRA_DATA; | 1661 | ELF_CORE_WRITE_EXTRA_DATA; |
1662 | #endif | 1662 | #endif |
1663 | 1663 | ||
1664 | if ((off_t)file->f_pos != offset) { | 1664 | if (file->f_pos != offset) { |
1665 | /* Sanity check */ | 1665 | /* Sanity check */ |
1666 | printk(KERN_WARNING | 1666 | printk(KERN_WARNING |
1667 | "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", | 1667 | "elf_core_dump: file->f_pos (%Ld) != offset (%Ld)\n", |
1668 | (off_t)file->f_pos, offset); | 1668 | file->f_pos, offset); |
1669 | } | 1669 | } |
1670 | 1670 | ||
1671 | end_coredump: | 1671 | end_coredump: |
diff --git a/fs/buffer.c b/fs/buffer.c index 71649ef9b658..3b6d701073e7 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *page) | |||
2987 | 2987 | ||
2988 | spin_lock(&mapping->private_lock); | 2988 | spin_lock(&mapping->private_lock); |
2989 | ret = drop_buffers(page, &buffers_to_free); | 2989 | ret = drop_buffers(page, &buffers_to_free); |
2990 | spin_unlock(&mapping->private_lock); | ||
2990 | if (ret) { | 2991 | if (ret) { |
2991 | /* | 2992 | /* |
2992 | * If the filesystem writes its buffers by hand (eg ext3) | 2993 | * If the filesystem writes its buffers by hand (eg ext3) |
@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *page) | |||
2998 | */ | 2999 | */ |
2999 | clear_page_dirty(page); | 3000 | clear_page_dirty(page); |
3000 | } | 3001 | } |
3001 | spin_unlock(&mapping->private_lock); | ||
3002 | out: | 3002 | out: |
3003 | if (buffers_to_free) { | 3003 | if (buffers_to_free) { |
3004 | struct buffer_head *bh = buffers_to_free; | 3004 | struct buffer_head *bh = buffers_to_free; |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 42da60784311..32a8caf0c41e 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -160,6 +160,117 @@ static int journal_write_commit_record(journal_t *journal, | |||
160 | return (ret == -EIO); | 160 | return (ret == -EIO); |
161 | } | 161 | } |
162 | 162 | ||
163 | static void journal_do_submit_data(struct buffer_head **wbuf, int bufs) | ||
164 | { | ||
165 | int i; | ||
166 | |||
167 | for (i = 0; i < bufs; i++) { | ||
168 | wbuf[i]->b_end_io = end_buffer_write_sync; | ||
169 | /* We use-up our safety reference in submit_bh() */ | ||
170 | submit_bh(WRITE, wbuf[i]); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Submit all the data buffers to disk | ||
176 | */ | ||
177 | static void journal_submit_data_buffers(journal_t *journal, | ||
178 | transaction_t *commit_transaction) | ||
179 | { | ||
180 | struct journal_head *jh; | ||
181 | struct buffer_head *bh; | ||
182 | int locked; | ||
183 | int bufs = 0; | ||
184 | struct buffer_head **wbuf = journal->j_wbuf; | ||
185 | |||
186 | /* | ||
187 | * Whenever we unlock the journal and sleep, things can get added | ||
188 | * onto ->t_sync_datalist, so we have to keep looping back to | ||
189 | * write_out_data until we *know* that the list is empty. | ||
190 | * | ||
191 | * Cleanup any flushed data buffers from the data list. Even in | ||
192 | * abort mode, we want to flush this out as soon as possible. | ||
193 | */ | ||
194 | write_out_data: | ||
195 | cond_resched(); | ||
196 | spin_lock(&journal->j_list_lock); | ||
197 | |||
198 | while (commit_transaction->t_sync_datalist) { | ||
199 | jh = commit_transaction->t_sync_datalist; | ||
200 | bh = jh2bh(jh); | ||
201 | locked = 0; | ||
202 | |||
203 | /* Get reference just to make sure buffer does not disappear | ||
204 | * when we are forced to drop various locks */ | ||
205 | get_bh(bh); | ||
206 | /* If the buffer is dirty, we need to submit IO and hence | ||
207 | * we need the buffer lock. We try to lock the buffer without | ||
208 | * blocking. If we fail, we need to drop j_list_lock and do | ||
209 | * blocking lock_buffer(). | ||
210 | */ | ||
211 | if (buffer_dirty(bh)) { | ||
212 | if (test_set_buffer_locked(bh)) { | ||
213 | BUFFER_TRACE(bh, "needs blocking lock"); | ||
214 | spin_unlock(&journal->j_list_lock); | ||
215 | /* Write out all data to prevent deadlocks */ | ||
216 | journal_do_submit_data(wbuf, bufs); | ||
217 | bufs = 0; | ||
218 | lock_buffer(bh); | ||
219 | spin_lock(&journal->j_list_lock); | ||
220 | } | ||
221 | locked = 1; | ||
222 | } | ||
223 | /* We have to get bh_state lock. Again out of order, sigh. */ | ||
224 | if (!inverted_lock(journal, bh)) { | ||
225 | jbd_lock_bh_state(bh); | ||
226 | spin_lock(&journal->j_list_lock); | ||
227 | } | ||
228 | /* Someone already cleaned up the buffer? */ | ||
229 | if (!buffer_jbd(bh) | ||
230 | || jh->b_transaction != commit_transaction | ||
231 | || jh->b_jlist != BJ_SyncData) { | ||
232 | jbd_unlock_bh_state(bh); | ||
233 | if (locked) | ||
234 | unlock_buffer(bh); | ||
235 | BUFFER_TRACE(bh, "already cleaned up"); | ||
236 | put_bh(bh); | ||
237 | continue; | ||
238 | } | ||
239 | if (locked && test_clear_buffer_dirty(bh)) { | ||
240 | BUFFER_TRACE(bh, "needs writeout, adding to array"); | ||
241 | wbuf[bufs++] = bh; | ||
242 | __journal_file_buffer(jh, commit_transaction, | ||
243 | BJ_Locked); | ||
244 | jbd_unlock_bh_state(bh); | ||
245 | if (bufs == journal->j_wbufsize) { | ||
246 | spin_unlock(&journal->j_list_lock); | ||
247 | journal_do_submit_data(wbuf, bufs); | ||
248 | bufs = 0; | ||
249 | goto write_out_data; | ||
250 | } | ||
251 | } | ||
252 | else { | ||
253 | BUFFER_TRACE(bh, "writeout complete: unfile"); | ||
254 | __journal_unfile_buffer(jh); | ||
255 | jbd_unlock_bh_state(bh); | ||
256 | if (locked) | ||
257 | unlock_buffer(bh); | ||
258 | journal_remove_journal_head(bh); | ||
259 | /* Once for our safety reference, once for | ||
260 | * journal_remove_journal_head() */ | ||
261 | put_bh(bh); | ||
262 | put_bh(bh); | ||
263 | } | ||
264 | |||
265 | if (lock_need_resched(&journal->j_list_lock)) { | ||
266 | spin_unlock(&journal->j_list_lock); | ||
267 | goto write_out_data; | ||
268 | } | ||
269 | } | ||
270 | spin_unlock(&journal->j_list_lock); | ||
271 | journal_do_submit_data(wbuf, bufs); | ||
272 | } | ||
273 | |||
163 | /* | 274 | /* |
164 | * journal_commit_transaction | 275 | * journal_commit_transaction |
165 | * | 276 | * |
@@ -313,80 +424,13 @@ void journal_commit_transaction(journal_t *journal) | |||
313 | * Now start flushing things to disk, in the order they appear | 424 | * Now start flushing things to disk, in the order they appear |
314 | * on the transaction lists. Data blocks go first. | 425 | * on the transaction lists. Data blocks go first. |
315 | */ | 426 | */ |
316 | |||
317 | err = 0; | 427 | err = 0; |
318 | /* | 428 | journal_submit_data_buffers(journal, commit_transaction); |
319 | * Whenever we unlock the journal and sleep, things can get added | ||
320 | * onto ->t_sync_datalist, so we have to keep looping back to | ||
321 | * write_out_data until we *know* that the list is empty. | ||
322 | */ | ||
323 | bufs = 0; | ||
324 | /* | ||
325 | * Cleanup any flushed data buffers from the data list. Even in | ||
326 | * abort mode, we want to flush this out as soon as possible. | ||
327 | */ | ||
328 | write_out_data: | ||
329 | cond_resched(); | ||
330 | spin_lock(&journal->j_list_lock); | ||
331 | |||
332 | while (commit_transaction->t_sync_datalist) { | ||
333 | struct buffer_head *bh; | ||
334 | |||
335 | jh = commit_transaction->t_sync_datalist; | ||
336 | commit_transaction->t_sync_datalist = jh->b_tnext; | ||
337 | bh = jh2bh(jh); | ||
338 | if (buffer_locked(bh)) { | ||
339 | BUFFER_TRACE(bh, "locked"); | ||
340 | if (!inverted_lock(journal, bh)) | ||
341 | goto write_out_data; | ||
342 | __journal_temp_unlink_buffer(jh); | ||
343 | __journal_file_buffer(jh, commit_transaction, | ||
344 | BJ_Locked); | ||
345 | jbd_unlock_bh_state(bh); | ||
346 | if (lock_need_resched(&journal->j_list_lock)) { | ||
347 | spin_unlock(&journal->j_list_lock); | ||
348 | goto write_out_data; | ||
349 | } | ||
350 | } else { | ||
351 | if (buffer_dirty(bh)) { | ||
352 | BUFFER_TRACE(bh, "start journal writeout"); | ||
353 | get_bh(bh); | ||
354 | wbuf[bufs++] = bh; | ||
355 | if (bufs == journal->j_wbufsize) { | ||
356 | jbd_debug(2, "submit %d writes\n", | ||
357 | bufs); | ||
358 | spin_unlock(&journal->j_list_lock); | ||
359 | ll_rw_block(SWRITE, bufs, wbuf); | ||
360 | journal_brelse_array(wbuf, bufs); | ||
361 | bufs = 0; | ||
362 | goto write_out_data; | ||
363 | } | ||
364 | } else { | ||
365 | BUFFER_TRACE(bh, "writeout complete: unfile"); | ||
366 | if (!inverted_lock(journal, bh)) | ||
367 | goto write_out_data; | ||
368 | __journal_unfile_buffer(jh); | ||
369 | jbd_unlock_bh_state(bh); | ||
370 | journal_remove_journal_head(bh); | ||
371 | put_bh(bh); | ||
372 | if (lock_need_resched(&journal->j_list_lock)) { | ||
373 | spin_unlock(&journal->j_list_lock); | ||
374 | goto write_out_data; | ||
375 | } | ||
376 | } | ||
377 | } | ||
378 | } | ||
379 | |||
380 | if (bufs) { | ||
381 | spin_unlock(&journal->j_list_lock); | ||
382 | ll_rw_block(SWRITE, bufs, wbuf); | ||
383 | journal_brelse_array(wbuf, bufs); | ||
384 | spin_lock(&journal->j_list_lock); | ||
385 | } | ||
386 | 429 | ||
387 | /* | 430 | /* |
388 | * Wait for all previously submitted IO to complete. | 431 | * Wait for all previously submitted IO to complete. |
389 | */ | 432 | */ |
433 | spin_lock(&journal->j_list_lock); | ||
390 | while (commit_transaction->t_locked_list) { | 434 | while (commit_transaction->t_locked_list) { |
391 | struct buffer_head *bh; | 435 | struct buffer_head *bh; |
392 | 436 | ||
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 942156225447..5bbd60896050 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -157,10 +157,12 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
157 | "SwapCached: %8lu kB\n" | 157 | "SwapCached: %8lu kB\n" |
158 | "Active: %8lu kB\n" | 158 | "Active: %8lu kB\n" |
159 | "Inactive: %8lu kB\n" | 159 | "Inactive: %8lu kB\n" |
160 | #ifdef CONFIG_HIGHMEM | ||
160 | "HighTotal: %8lu kB\n" | 161 | "HighTotal: %8lu kB\n" |
161 | "HighFree: %8lu kB\n" | 162 | "HighFree: %8lu kB\n" |
162 | "LowTotal: %8lu kB\n" | 163 | "LowTotal: %8lu kB\n" |
163 | "LowFree: %8lu kB\n" | 164 | "LowFree: %8lu kB\n" |
165 | #endif | ||
164 | "SwapTotal: %8lu kB\n" | 166 | "SwapTotal: %8lu kB\n" |
165 | "SwapFree: %8lu kB\n" | 167 | "SwapFree: %8lu kB\n" |
166 | "Dirty: %8lu kB\n" | 168 | "Dirty: %8lu kB\n" |
@@ -168,6 +170,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
168 | "AnonPages: %8lu kB\n" | 170 | "AnonPages: %8lu kB\n" |
169 | "Mapped: %8lu kB\n" | 171 | "Mapped: %8lu kB\n" |
170 | "Slab: %8lu kB\n" | 172 | "Slab: %8lu kB\n" |
173 | "SReclaimable: %8lu kB\n" | ||
174 | "SUnreclaim: %8lu kB\n" | ||
171 | "PageTables: %8lu kB\n" | 175 | "PageTables: %8lu kB\n" |
172 | "NFS_Unstable: %8lu kB\n" | 176 | "NFS_Unstable: %8lu kB\n" |
173 | "Bounce: %8lu kB\n" | 177 | "Bounce: %8lu kB\n" |
@@ -183,17 +187,22 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
183 | K(total_swapcache_pages), | 187 | K(total_swapcache_pages), |
184 | K(active), | 188 | K(active), |
185 | K(inactive), | 189 | K(inactive), |
190 | #ifdef CONFIG_HIGHMEM | ||
186 | K(i.totalhigh), | 191 | K(i.totalhigh), |
187 | K(i.freehigh), | 192 | K(i.freehigh), |
188 | K(i.totalram-i.totalhigh), | 193 | K(i.totalram-i.totalhigh), |
189 | K(i.freeram-i.freehigh), | 194 | K(i.freeram-i.freehigh), |
195 | #endif | ||
190 | K(i.totalswap), | 196 | K(i.totalswap), |
191 | K(i.freeswap), | 197 | K(i.freeswap), |
192 | K(global_page_state(NR_FILE_DIRTY)), | 198 | K(global_page_state(NR_FILE_DIRTY)), |
193 | K(global_page_state(NR_WRITEBACK)), | 199 | K(global_page_state(NR_WRITEBACK)), |
194 | K(global_page_state(NR_ANON_PAGES)), | 200 | K(global_page_state(NR_ANON_PAGES)), |
195 | K(global_page_state(NR_FILE_MAPPED)), | 201 | K(global_page_state(NR_FILE_MAPPED)), |
196 | K(global_page_state(NR_SLAB)), | 202 | K(global_page_state(NR_SLAB_RECLAIMABLE) + |
203 | global_page_state(NR_SLAB_UNRECLAIMABLE)), | ||
204 | K(global_page_state(NR_SLAB_RECLAIMABLE)), | ||
205 | K(global_page_state(NR_SLAB_UNRECLAIMABLE)), | ||
197 | K(global_page_state(NR_PAGETABLE)), | 206 | K(global_page_state(NR_PAGETABLE)), |
198 | K(global_page_state(NR_UNSTABLE_NFS)), | 207 | K(global_page_state(NR_UNSTABLE_NFS)), |
199 | K(global_page_state(NR_BOUNCE)), | 208 | K(global_page_state(NR_BOUNCE)), |