aboutsummaryrefslogtreecommitdiffstats
BranchCommit messageAuthorAge
archive/unc-master-3.0P-FP: fix BUG_ON releated to priority inheritanceBjoern Brandenburg13 years
archived-2013.1uncachedev: mmap memory that is not cached by CPUsGlenn Elliott12 years
archived-private-masterMerge branch 'wip-2.6.34' into old-private-masterAndrea Bastoni15 years
archived-semi-partMerge branch 'wip-semi-part' of ssh://cvs/cvs/proj/litmus/repo/litmus2010 int...Andrea Bastoni15 years
demoFurther refinementsJonathan Herman14 years
ecrts-pgm-finalMerge branch 'wip-ecrts14-pgm' of ssh://rtsrv.cs.unc.edu/home/litmus/litmus-r...Glenn Elliott12 years
ecrts14-pgm-finalMerge branch 'wip-ecrts14-pgm' of ssh://rtsrv.cs.unc.edu/home/litmus/litmus-r...Glenn Elliott12 years
gpusync-rtss12Final GPUSync implementation.Glenn Elliott12 years
gpusync/stagingRename IKGLP R2DGLP.Glenn Elliott12 years
linux-tipMerge branch 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/p...Linus Torvalds15 years
litmus2008-patch-seriesadd i386 feather-trace implementationBjoern B. Brandenburg16 years
masterPSN-EDF: use inferred_sporadic_job_release_atBjoern Brandenburg9 years
pgmmake it compileGlenn Elliott12 years
prop/litmus-signalsInfrastructure for Litmus signals.Glenn Elliott13 years
prop/robust-tie-breakFixed bug in edf_higher_prio().Glenn Elliott13 years
stagingFix tracepoint compilation errorFelipe Cerqueira13 years
test9/23/2016Namhoon Kim9 years
tracing-develTest kernel tracing events capabilitiesAndrea Bastoni16 years
v2.6.34-with-arm-patchessmsc911x: Add spinlocks around registers accessCatalin Marinas15 years
v2015.1Add ARM syscall def for get_current_budgetBjoern Brandenburg10 years
wip-2011.2-bbbLitmus core: simplify np-section protocolBjoern B. Brandenburg14 years
wip-2011.2-bbb-traceRefactor sched_trace_log_message() -> debug_trace_log_message()Andrea Bastoni14 years
wip-2012.3-gpuSOBLIV draining support for C-EDF.Glenn Elliott12 years
wip-2012.3-gpu-preportpick up last C-RM fileGlenn Elliott12 years
wip-2012.3-gpu-rtss13Fix critical bug in GPU tracker.Glenn Elliott12 years
wip-2012.3-gpu-sobliv-budget-w-ksharkProper sobliv draining and many bug fixes.Glenn Elliott12 years
wip-aedzl-finalMake it easier to compile AEDZL interfaces in liblitmus.Glenn Elliott15 years
wip-aedzl-revisedAdd sched_trace data for Apative EDZLGlenn Elliott15 years
wip-arbit-deadlineFix compilation bug.Glenn Elliott13 years
wip-aux-tasksDescription of refined aux task inheritance.Glenn Elliott13 years
wip-bbbGSN-EDF & Core: improve debug TRACE'ing for NP sectionsBjoern B. Brandenburg14 years
wip-bbb-prio-donuse correct timestampBjoern B. Brandenburg14 years
wip-better-breakImplement hash-based EDF tie-breaking.Glenn Elliott13 years
wip-binary-heapMake C-EDF work with simplified binheap_deleteGlenn Elliott13 years
wip-budgetAdded support for choices in budget policy enforcement.Glenn Elliott15 years
wip-colorSummarize schedulability with final recordJonathan Herman13 years
wip-color-jlhsched_color: Fixed two bugs causing crashing on experiment restart and a rare...Jonathan Herman13 years
wip-d10-hz1000Enable HZ=1000 on District 10Bjoern B. Brandenburg15 years
wip-default-clusteringFeature: Make default C-EDF clustering compile-time configurable.Glenn Elliott15 years
wip-dissipation-jericksoUpdate from 2.6.36 to 2.6.36.4Jeremy Erickson11 years
wip-dissipation2-jericksoUpdate 2.6.36 to 2.6.36.4Jeremy Erickson11 years
wip-ecrts14-pgmMerge branch 'wip-ecrts14-pgm' of ssh://rtsrv.cs.unc.edu/home/litmus/litmus-r...Glenn Elliott12 years
wip-edf-hsblast tested versionJonathan Herman14 years
wip-edf-osLookup table EDF-osJeremy Erickson12 years
wip-edf-tie-breakMerge branch 'wip-edf-tie-break' of ssh://rtsrv.cs.unc.edu/home/litmus/litmus...Glenn Elliott13 years
wip-edzl-critiqueUse hr_timer's active checks instead of having own flag.Glenn Elliott15 years
wip-edzl-finalImplementation of the EDZL scheduler.Glenn Elliott15 years
wip-edzl-revisedClean up comments.Glenn Elliott15 years
wip-eventsAdded support for tracing arbitrary actions.Jonathan Herman15 years
wip-extra-debugDBG: add additional tracingBjoern B. Brandenburg15 years
wip-fix-switch-jericksoAttempt to fix race condition with plugin switchingJeremy Erickson15 years
wip-fix3sched: show length of runqueue clock deactivation in /proc/sched_debugBjoern B. Brandenburg15 years
wip-fmlp-dequeueImprove FMLP queue management.Glenn Elliott14 years
wip-ft-irq-flagFeather-Trace: keep track of interrupt-related interference.Bjoern B. Brandenburg14 years
wip-gpu-cleanupEnable sched_trace log injection from userspaceGlenn Elliott13 years
wip-gpu-interruptsRemove option for threading of all softirqs.Glenn Elliott14 years
wip-gpu-rtas12Generalized GPU cost predictors + EWMA. (untested)Glenn Elliott13 years
wip-gpu-rtss12Final GPUSync implementation.Glenn Elliott13 years
wip-gpu-rtss12-srpexperimental changes to support GPUs under SRPGlenn Elliott13 years
wip-gpusync-mergeCleanup priority tracking for budget enforcement.Glenn Elliott11 years
wip-ikglpMove RSM and IKGLP imp. to own .c filesGlenn Elliott13 years
wip-k-fmlpMerge branch 'mpi-master' into wip-k-fmlpGlenn Elliott14 years
wip-kernel-coloringAdded recolor syscallNamhoon Kim7 years
wip-kernthreadsKludge work-queue processing into klitirqd.Glenn Elliott15 years
wip-klmirqd-to-auxAllow klmirqd threads to be given names.Glenn Elliott13 years
wip-ksharkMerge branch 'mpi-staging' into wip-ksharkJonathan Herman13 years
wip-litmus-3.2Merge commit 'v3.2' into litmus-stagingAndrea Bastoni13 years
wip-litmus2011.2Cleanup: Coding conformance for affinity stuff.Glenn Elliott14 years
wip-litmus3.0-2011.2Feather-Trace: keep track of interrupt-related interference.Bjoern B. Brandenburg14 years
wip-master-2.6.33-rtAvoid deadlock when switching task policy to BACKGROUND (ugly)Andrea Bastoni15 years
wip-mcRemoved ARM-specific hacks which disabled less common mixed-criticality featu...Jonathan Herman12 years
wip-mc-bipasaMC-EDF addedbipasa chattopadhyay13 years
wip-mc-jericksoSplit C/D queuesJeremy Erickson15 years
wip-mc2-cache-slackManually patched mc^2 related codeMing Yang10 years
wip-mcrit-maccosmeticMac Mollison15 years
wip-merge-3.0Prevent Linux to send IPI and queue tasks on remote CPUs.Andrea Bastoni14 years
wip-merge-v3.0Prevent Linux to send IPI and queue tasks on remote CPUs.Andrea Bastoni14 years
wip-migration-affinityNULL affinity dereference in C-EDF.Glenn Elliott14 years
wip-mmap-uncacheshare branch with othersGlenn Elliott13 years
wip-modechangeRTSS 2017 submissionNamhoon Kim8 years
wip-nested-lockingAppears to be working.Bryan Ward12 years
wip-omlp-gedfFirst implementation of G-OMLP.Glenn Elliott15 years
wip-paiSome cleanup of PAIGlenn Elliott14 years
wip-percore-lib9/21/2016Namhoon Kim9 years
wip-performanceCONFIG_DONT_PREEMPT_ON_TIE: Don't preeempt a scheduled task on priority tie.Glenn Elliott14 years
wip-pgmAdd PGM support to C-FLGlenn Elliott12 years
wip-pgm-splitFirst draft of C-FL-splitNamhoon Kim12 years
wip-pm-ovdAdd preemption-and-migration overhead tracing supportAndrea Bastoni15 years
wip-prio-inhP-EDF updated to use the generic pi framework.Glenn Elliott15 years
wip-prioq-dglBUG FIX: Support DGLs with PRIOQ_MUTEXGlenn Elliott13 years
wip-refactored-gedfGeneralizd architecture for GEDF-style scheduelrs to reduce code redundancy.Glenn Elliott15 years
wip-release-master-fixbugfix: release master CPU must signal task was pickedBjoern B. Brandenburg14 years
wip-robust-tie-breakEDF priority tie-breaks.Glenn Elliott13 years
wip-rt-ksharkMove task time accounting into the complete_job method.Jonathan Herman13 years
wip-rtas12-pgmScheduling of PGM jobs.Glenn Elliott13 years
wip-semi-partFix compile error with newer GCCJeremy Erickson12 years
wip-semi-part-edfos-jericksoUse initial CPU set by clientJeremy Erickson12 years
wip-shared-libTODO: Fix condition checks in replicate_page_move_mapping()Namhoon Kim9 years
wip-shared-lib2RTAS 2017 Submission ver.Namhoon Kim9 years
wip-shared-memInitial commit for shared libraryNamhoon Kim9 years
wip-splitting-jericksoFix release behaviorJeremy Erickson13 years
wip-splitting-omlp-jericksoBjoern's Dissertation Code with Priority DonationJeremy Erickson13 years
wip-stage-binheapAn efficient binary heap implementation.Glenn Elliott13 years
wip-sun-portDynamic memory allocation and clean exit for FeatherTraceChristopher Kenna15 years
wip-timer-tracebugfix: C-EDF, clear scheduled field of the correct CPU upon task_exitAndrea Bastoni15 years
wip-tracepointsAdd kernel-style events for sched_trace_XXX() functionsAndrea Bastoni14 years
 
TagDownloadAuthorAge
2015.1commit 8e51b37822...Bjoern Brandenburg10 years
2013.1commit bcaacec1ca...Glenn Elliott12 years
2012.3commit c158b5fbe4...Jonathan Herman13 years
2012.2commit b53c479a0f...Glenn Elliott13 years
2012.1commit 83b11ea1c6...Bjoern B. Brandenburg14 years
rtas12-mc-beta-expcommit 8e236ee20f...Christopher Kenna14 years
2011.1commit d11808b5c6...Christopher Kenna15 years
v2.6.37-rc4commit e8a7e48bb2...Linus Torvalds15 years
v2.6.37-rc3commit 3561d43fd2...Linus Torvalds15 years
v2.6.37-rc2commit e53beacd23...Linus Torvalds15 years
v2.6.37-rc1commit c8ddb2713c...Linus Torvalds15 years
v2.6.36commit f6f94e2ab1...Linus Torvalds15 years
2010.2commit 5c5456402d...Bjoern B. Brandenburg15 years
v2.6.36-rc8commit cd07202cc8...Linus Torvalds15 years
v2.6.36-rc7commit cb655d0f3d...Linus Torvalds15 years
v2.6.36-rc6commit 899611ee7d...Linus Torvalds15 years
v2.6.36-rc5commit b30a3f6257...Linus Torvalds15 years
v2.6.36-rc4commit 49553c2ef8...Linus Torvalds15 years
v2.6.36-rc3commit 2bfc96a127...Linus Torvalds15 years
v2.6.36-rc2commit 76be97c1fc...Linus Torvalds15 years
v2.6.36-rc1commit da5cabf80e...Linus Torvalds15 years
v2.6.35commit 9fe6206f40...Linus Torvalds15 years
v2.6.35-rc6commit b37fa16e78...Linus Torvalds15 years
v2.6.35-rc5commit 1c5474a65b...Linus Torvalds15 years
v2.6.35-rc4commit 815c4163b6...Linus Torvalds15 years
v2.6.35-rc3commit 7e27d6e778...Linus Torvalds15 years
v2.6.35-rc2commit e44a21b726...Linus Torvalds15 years
v2.6.35-rc1commit 67a3e12b05...Linus Torvalds15 years
2010.1commit 7c1ff4c544...Andrea Bastoni15 years
v2.6.34commit e40152ee1e...Linus Torvalds15 years
v2.6.33.4commit 4640b4e7d9...Greg Kroah-Hartman15 years
v2.6.34-rc7commit b57f95a382...Linus Torvalds15 years
v2.6.34-rc6commit 66f41d4c5c...Linus Torvalds15 years
v2.6.33.3commit 3e7ad8ed97...Greg Kroah-Hartman15 years
v2.6.34-rc5commit 01bf0b6457...Linus Torvalds15 years
v2.6.34-rc4commit 0d0fb0f9c5...Linus Torvalds15 years
v2.6.33.2commit 19f00f070c...Greg Kroah-Hartman15 years
v2.6.34-rc3commit 2eaa9cfdf3...Linus Torvalds15 years
v2.6.34-rc2commit 220bf991b0...Linus Torvalds15 years
v2.6.33.1commit dbdafe5ccf...Greg Kroah-Hartman15 years
v2.6.34-rc1commit 57d54889cd...Linus Torvalds16 years
v2.6.33commit 60b341b778...Linus Torvalds16 years
v2.6.33-rc8commit 724e6d3fe8...Linus Torvalds16 years
v2.6.33-rc7commit 29275254ca...Linus Torvalds16 years
v2.6.33-rc6commit abe94c756c...Linus Torvalds16 years
v2.6.33-rc5commit 92dcffb916...Linus Torvalds16 years
v2.6.33-rc4commit 7284ce6c9f...Linus Torvalds16 years
v2.6.33-rc3commit 74d2e4f8d7...Linus Torvalds16 years
v2.6.33-rc2commit 6b7b284958...Linus Torvalds16 years
v2.6.33-rc1commit 55639353a0...Linus Torvalds16 years
v2.6.32commit 22763c5cf3...Linus Torvalds16 years
v2.6.32-rc8commit 648f4e3e50...Linus Torvalds16 years
v2.6.32-rc7commit 156171c71a...Linus Torvalds16 years
v2.6.32-rc6commit b419148e56...Linus Torvalds16 years
v2.6.32-rc5commit 012abeea66...Linus Torvalds16 years
v2.6.32-rc4commit 161291396e...Linus Torvalds16 years
v2.6.32-rc3commit 374576a8b6...Linus Torvalds16 years
v2.6.32-rc1commit 17d857be64...Linus Torvalds16 years
v2.6.32-rc2commit 17d857be64...Linus Torvalds16 years
v2.6.31commit 74fca6a428...Linus Torvalds16 years
v2.6.31-rc9commit e07cccf404...Linus Torvalds16 years
v2.6.31-rc8commit 326ba5010a...Linus Torvalds16 years
v2.6.31-rc7commit 422bef879e...Linus Torvalds16 years
v2.6.31-rc6commit 64f1607ffb...Linus Torvalds16 years
v2.6.31-rc5commit ed680c4ad4...Linus Torvalds16 years
v2.6.31-rc4commit 4be3bd7849...Linus Torvalds16 years
v2.6.31-rc3commit 6847e154e3...Linus Torvalds16 years
v2.6.31-rc2commit 8e4a718ff3...Linus Torvalds16 years
v2.6.31-rc1commit 28d0325ce6...Linus Torvalds16 years
v2.6.30commit 07a2039b8e...Linus Torvalds16 years
v2.6.30-rc8commit 9fa7eb283c...Linus Torvalds16 years
v2.6.30-rc7commit 59a3759d0f...Linus Torvalds16 years
v2.6.30-rc6commit 1406de8e11...Linus Torvalds16 years
v2.6.30-rc5commit 091bf7624d...Linus Torvalds16 years
v2.6.30-rc4commit 091438dd56...Linus Torvalds16 years
v2.6.30-rc3commit 0910697403...Linus Torvalds16 years
v2.6.30-rc2commit 0882e8dd3a...Linus Torvalds16 years
v2.6.30-rc1commit 577c9c456f...Linus Torvalds16 years
v2.6.29commit 8e0ee43bc2...Linus Torvalds16 years
v2.6.29-rc8commit 041b62374c...Linus Torvalds17 years
v2.6.29-rc7commit fec6c6fec3...Linus Torvalds17 years
v2.6.29-rc6commit 20f4d6c3a2...Linus Torvalds17 years
v2.6.29-rc5commit d2f8d7ee1a...Linus Torvalds17 years
v2.6.29-rc4commit 8e4921515c...Linus Torvalds17 years
v2.6.29-rc3commit 18e352e4a7...Linus Torvalds17 years
v2.6.29-rc2commit 1de9e8e70f...Linus Torvalds17 years
v2.6.29-rc1commit c59765042f...Linus Torvalds17 years
v2.6.28commit 4a6908a3a0...Linus Torvalds17 years
v2.6.28-rc9commit 929096fe9f...Linus Torvalds17 years
v2.6.28-rc8commit 8b1fae4e42...Linus Torvalds17 years
v2.6.28-rc7commit 061e41fdb5...Linus Torvalds17 years
v2.6.28-rc6commit 13d428afc0...Linus Torvalds17 years
v2.6.28-rc5commit 9bf1a2445f...Linus Torvalds17 years
v2.6.28-rc4commit f7160c7573...Linus Torvalds17 years
v2.6.28-rc3commit 45beca08dd...Linus Torvalds17 years
v2.6.28-rc2commit 0173a3265b...Linus Torvalds17 years
v2.6.28-rc1commit 57f8f7b60d...Linus Torvalds17 years
v2.6.27commit 3fa8749e58...Linus Torvalds17 years
v2.6.27-rc9commit 4330ed8ed4...Linus Torvalds17 years
v2.6.27-rc8commit 94aca1dac6...Linus Torvalds17 years
v2.6.27-rc7commit 72d31053f6...Linus Torvalds17 years
v2.6.27-rc6commit adee14b2e1...Linus Torvalds17 years
v2.6.27-rc5commit 24342c34a0...Linus Torvalds17 years
v2.6.27-rc4commit 6a55617ed5...Linus Torvalds17 years
v2.6.27-rc3commit 30a2f3c60a...Linus Torvalds17 years
v2.6.27-rc2commit 0967d61ea0...Linus Torvalds17 years
v2.6.27-rc1commit 6e86841d05...Linus Torvalds17 years
v2.6.26commit bce7f793da...Linus Torvalds17 years
v2.6.26-rc9commit b7279469d6...Linus Torvalds17 years
v2.6.26-rc8commit 543cf4cb3f...Linus Torvalds17 years
v2.6.26-rc7commit d70ac829b7...Linus Torvalds17 years
v2.6.26-rc6commit 5dd34572ad...Linus Torvalds17 years
v2.6.26-rc5commit 53c8ba9540...Linus Torvalds17 years
v2.6.26-rc4commit e490517a03...Linus Torvalds17 years
v2.6.26-rc3commit b8291ad07a...Linus Torvalds17 years
v2.6.26-rc2commit 492c2e476e...Linus Torvalds17 years
v2.6.26-rc1commit 2ddcca36c8...Linus Torvalds17 years
v2.6.25commit 4b119e21d0...Linus Torvalds17 years
v2.6.25-rc9commit 120dd64cac...Linus Torvalds17 years
v2.6.25-rc8commit 0e81a8ae37...Linus Torvalds17 years
v2.6.25-rc7commit 05dda977f2...Linus Torvalds17 years
v2.6.25-rc6commit a978b30af3...Linus Torvalds17 years
v2.6.25-rc5commit cdeeeae056...Linus Torvalds18 years
v2.6.25-rc4commit 29e8c3c304...Linus Torvalds18 years
v2.6.25-rc3commit bfa274e243...Linus Torvalds18 years
v2.6.25-rc2commit 101142c37b...Linus Torvalds18 years
v2.6.25-rc1commit 19af35546d...Linus Torvalds18 years
v2.6.24commit 49914084e7...Linus Torvalds18 years
v2.6.24-rc8commit cbd9c88369...Linus Torvalds18 years
v2.6.24-rc7commit 3ce5445046...Linus Torvalds18 years
v2.6.24-rc6commit ea67db4cdb...Linus Torvalds18 years
v2.6.24-rc5commit 82d29bf6dc...Linus Torvalds18 years
v2.6.24-rc4commit 09b56adc98...Linus Torvalds18 years
v2.6.24-rc3commit d9f8bcbf67...Linus Torvalds18 years
v2.6.24-rc2commit dbeeb816e8...Linus Torvalds18 years
v2.6.24-rc1commit c9927c2bf4...Linus Torvalds18 years
v2.6.23commit bbf25010f1...Linus Torvalds18 years
v2.6.23-rc9commit 3146b39c18...Linus Torvalds18 years
v2.6.23-rc8commit 4942de4a0e...Linus Torvalds18 years
v2.6.23-rc7commit 81cfe79b9c...Linus Torvalds18 years
v2.6.23-rc6commit 0d4cbb5e7f...Linus Torvalds18 years
v2.6.23-rc5commit 40ffbfad6b...Linus Torvalds18 years
v2.6.23-rc4commit b07d68b5ca...Linus Torvalds18 years
v2.6.23-rc3commit 39d3520c92...Linus Torvalds18 years
v2.6.23-rc2commit d4ac2477fa...Linus Torvalds18 years
v2.6.23-rc1commit f695baf2df...Linus Torvalds18 years
v2.6.22commit 7dcca30a32...Linus Torvalds18 years
v2.6.22-rc7commit a38d6181ff...Linus Torvalds18 years
v2.6.22-rc6commit 189548642c...Linus Torvalds18 years
v2.6.22-rc5commit 188e1f81ba...Linus Torvalds18 years
v2.6.22-rc4commit 5ecd3100e6...Linus Torvalds18 years
v2.6.22-rc3commit c420bc9f09...Linus Torvalds18 years
v2.6.22-rc2commit 55b637c6a0...Linus Torvalds18 years
v2.6.22-rc1commit 39403865d2...Linus Torvalds18 years
v2.6.21commit de46c33745...Linus Torvalds18 years
v2.6.21-rc7commit 94a05509a9...Linus Torvalds18 years
v2.6.21-rc6commit a21bd69e15...Linus Torvalds18 years
v2.6.21-rc5commit e0f2e3a06b...Linus Torvalds18 years
v2.6.21-rc4commit db98e0b434...Linus Torvalds19 years
v2.6.21-rc3commit 08e15e81a4...Linus Torvalds19 years
v2.6.21-rc2commit 606135a308...Linus Torvalds19 years
v2.6.21-rc1commit c8f71b01a5...Linus Torvalds19 years
v2.6.20commit 62d0cfcb27...Linus Torvalds19 years
v2.6.20-rc7commit f56df2f4db...Linus Torvalds19 years
v2.6.20-rc6commit 99abfeafb5...Linus Torvalds19 years
v2.6.20-rc5commit a8b3485287...Linus Torvalds19 years
v2.6.20-rc4commit bf81b46482...Linus Torvalds19 years
v2.6.20-rc3commit 669df1b478...Linus Torvalds19 years
v2.6.20-rc2commit 3bf8ba38f3...Linus Torvalds19 years
v2.6.20-rc1commit cc016448b0...Linus Torvalds19 years
v2.6.19commit 0215ffb08c...Linus Torvalds19 years
v2.6.19-rc6commit 44597f65f6...Linus Torvalds19 years
v2.6.19-rc5commit 80c2188127...Linus Torvalds19 years
v2.6.19-rc4commit ae99a78af3...Linus Torvalds19 years
v2.6.19-rc3commit 7059abedd2...Linus Torvalds19 years
v2.6.19-rc2commit b4bd8c6643...Linus Torvalds19 years
v2.6.19-rc1commit d223a60106...Linus Torvalds19 years
v2.6.18commit e478bec0ba...Linus Torvalds19 years
v2.6.18-rc7commit 95064a75eb...Linus Torvalds19 years
v2.6.18-rc6commit c336923b66...Linus Torvalds19 years
v2.6.18-rc5commit 60d4684068...Linus Torvalds19 years
v2.6.18-rc4commit 9f737633e6...Linus Torvalds19 years
v2.6.18-rc3commit b6ff50833a...Linus Torvalds19 years
v2.6.18-rc2commit 82d6897fef...Linus Torvalds19 years
v2.6.18-rc1commit 120bda20c6...Linus Torvalds19 years
v2.6.17commit 427abfa28a...Linus Torvalds19 years
v2.6.17-rc6commit 1def630a6a...Linus Torvalds19 years
v2.6.17-rc5commit a8bd60705a...Linus Torvalds19 years
v2.6.17-rc4commit d8c3291c73...Linus Torvalds19 years
v2.6.17-rc3commit 2be4d50295...Linus Torvalds19 years
v2.6.17-rc2commit 8bbde0e6d5...Linus Torvalds19 years
v2.6.17-rc1commit 6246b6128b...Linus Torvalds19 years
v2.6.16commit 7705a8792b...Linus Torvalds19 years
v2.6.16-rc6commit 535744878e...Linus Torvalds20 years
v2.6.16-rc5commit b9a33cebac...Linus Torvalds20 years
v2.6.16-rc4commit bd71c2b174...Linus Torvalds20 years
v2.6.16-rc3commit e9bb4c9929...Linus Torvalds20 years
v2.6.16-rc2commit 826eeb53a6...Linus Torvalds20 years
v2.6.16-rc1commit 2664b25051...Linus Torvalds20 years
v2.6.15commit 88026842b0...Linus Torvalds20 years
v2.6.15-rc7commit f89f5948fc...Linus Torvalds20 years
v2.6.15-rc6commit df7addbb45...Linus Torvalds20 years
v2.6.15-rc5commit 436b0f76f2...Linus Torvalds20 years
v2.6.15-rc4commit 5666c0947e...Linus Torvalds20 years
v2.6.15-rc3commit 624f54be20...Linus Torvalds20 years
v2.6.15-rc2commit 3bedff1d73...Linus Torvalds20 years
v2.6.15-rc1commit cd52d1ee9a...Linus Torvalds20 years
v2.6.14commit 741b2252a5...Linus Torvalds20 years
v2.6.14-rc5commit 93918e9afc...Linus Torvalds20 years
v2.6.14-rc4commit 907a426179...Linus Torvalds20 years
v2.6.14-rc3commit 1c9426e8a5...Linus Torvalds20 years
v2.6.14-rc2commit 676d55ae30...Linus Torvalds20 years
v2.6.14-rc1commit 2f4ba45a75...Linus Torvalds20 years
v2.6.13commit 02b3e4e2d7...Linus Torvalds20 years
v2.6.13-rc7commit 0572e3da3f...Linus Torvalds20 years
v2.6.13-rc6commit 6fc32179de...Linus Torvalds20 years
v2.6.13-rc5commit 9a351e30d7...Linus Torvalds20 years
v2.6.13-rc4commit 6395352334...Linus Torvalds20 years
v2.6.11tree c39ae07f39...
v2.6.11-treetree c39ae07f39...
v2.6.12commit 9ee1c939d1...
v2.6.12-rc2commit 1da177e4c3...
v2.6.12-rc3commit a2755a80f4...
v2.6.12-rc4commit 88d7bd8cb9...
v2.6.12-rc5commit 2a24ab628a...
v2.6.12-rc6commit 7cef5677ef...
v2.6.13-rc1commit 4c91aedb75...
v2.6.13-rc2commit a18bcb7450...
v2.6.13-rc3commit c32511e271...
n2454' href='#n2454'>2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154



























































































































































































































                                                                              
                          















                                                        
                          


























                                                                             
                          







































































































































































































































































                                                                                      
                                                           




















































































































































































































































                                                                               
                                                          



                                                                          


                                                                              











































































































































































































































































































































































































































                                                                               
                           


















































                                                                              
                                     













































































































































































































































                                                                                
                                        
























































































































































































































































                                                                              
                                      
























































                                                                               



























                                                                               






                                                           

                             
                          





































                                                                             





















                                                                    









































                                                                        
                                      








































                                                                             
                                   
         



                           















































































                                                                         
                                  















                                                                                

                                    

                                              

                                                                      






                                                                            

                                                                


























































































































































































































































































































































































































































































































































































































































































































































                                                                                
                                                          
                                                                     



                                                     
                                                   
                                                                    






































































                                                                               
                                                               




































































































































































































                                                                               
                                                                                 

































                                                                              
/*
 *  linux/fs/buffer.c
 *
 *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
 */

/*
 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
 *
 * Removed a lot of unnecessary code and simplified things now that
 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
 *
 * Speed up hash, lru, and free list operations.  Use gfp() for allocating
 * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
 *
 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
 *
 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
 */

#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>

static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static void invalidate_bh_lrus(void);

#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)

inline void
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
	bh->b_end_io = handler;
	bh->b_private = private;
}

static int sync_buffer(void *word)
{
	struct block_device *bd;
	struct buffer_head *bh
		= container_of(word, struct buffer_head, b_state);

	smp_mb();
	bd = bh->b_bdev;
	if (bd)
		blk_run_address_space(bd->bd_inode->i_mapping);
	io_schedule();
	return 0;
}

void fastcall __lock_buffer(struct buffer_head *bh)
{
	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
							TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);

void fastcall unlock_buffer(struct buffer_head *bh)
{
	clear_buffer_locked(bh);
	smp_mb__after_clear_bit();
	wake_up_bit(&bh->b_state, BH_Lock);
}

/*
 * Block until a buffer comes unlocked.  This doesn't stop it
 * from becoming locked again - you have to lock it yourself
 * if you want to preserve its state.
 */
void __wait_on_buffer(struct buffer_head * bh)
{
	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
}

static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
	page->private = 0;
	page_cache_release(page);
}

static void buffer_io_error(struct buffer_head *bh)
{
	char b[BDEVNAME_SIZE];

	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
			bdevname(bh->b_bdev, b),
			(unsigned long long)bh->b_blocknr);
}

/*
 * Default synchronous end-of-IO handler..  Just mark it up-to-date and
 * unlock the buffer. This is what ll_rw_block uses too.
 */
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
		/* This happens, due to failed READA attempts. */
		clear_buffer_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
	char b[BDEVNAME_SIZE];

	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
			buffer_io_error(bh);
			printk(KERN_WARNING "lost page write due to "
					"I/O error on %s\n",
				       bdevname(bh->b_bdev, b));
		}
		set_buffer_write_io_error(bh);
		clear_buffer_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

/*
 * Write out and wait upon all the dirty data associated with a block
 * device via its mapping.  Does not take the superblock lock.
 */
int sync_blockdev(struct block_device *bdev)
{
	int ret = 0;

	if (bdev) {
		int err;

		ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
		err = filemap_fdatawait(bdev->bd_inode->i_mapping);
		if (!ret)
			ret = err;
	}
	return ret;
}
EXPORT_SYMBOL(sync_blockdev);

/*
 * Write out and wait upon all dirty data associated with this
 * superblock.  Filesystem data as well as the underlying block
 * device.  Takes the superblock lock.
 */
int fsync_super(struct super_block *sb)
{
	sync_inodes_sb(sb, 0);
	DQUOT_SYNC(sb);
	lock_super(sb);
	if (sb->s_dirt && sb->s_op->write_super)
		sb->s_op->write_super(sb);
	unlock_super(sb);
	if (sb->s_op->sync_fs)
		sb->s_op->sync_fs(sb, 1);
	sync_blockdev(sb->s_bdev);
	sync_inodes_sb(sb, 1);

	return sync_blockdev(sb->s_bdev);
}

/*
 * Write out and wait upon all dirty data associated with this
 * device.   Filesystem data as well as the underlying block
 * device.  Takes the superblock lock.
 */
int fsync_bdev(struct block_device *bdev)
{
	struct super_block *sb = get_super(bdev);
	if (sb) {
		int res = fsync_super(sb);
		drop_super(sb);
		return res;
	}
	return sync_blockdev(bdev);
}

/**
 * freeze_bdev  --  lock a filesystem and force it into a consistent state
 * @bdev:	blockdevice to lock
 *
 * This takes the block device bd_mount_sem to make sure no new mounts
 * happen on bdev until thaw_bdev() is called.
 * If a superblock is found on this device, we take the s_umount semaphore
 * on it to make sure nobody unmounts until the snapshot creation is done.
 */
struct super_block *freeze_bdev(struct block_device *bdev)
{
	struct super_block *sb;

	down(&bdev->bd_mount_sem);
	sb = get_super(bdev);
	if (sb && !(sb->s_flags & MS_RDONLY)) {
		sb->s_frozen = SB_FREEZE_WRITE;
		smp_wmb();

		sync_inodes_sb(sb, 0);
		DQUOT_SYNC(sb);

		lock_super(sb);
		if (sb->s_dirt && sb->s_op->write_super)
			sb->s_op->write_super(sb);
		unlock_super(sb);

		if (sb->s_op->sync_fs)
			sb->s_op->sync_fs(sb, 1);

		sync_blockdev(sb->s_bdev);
		sync_inodes_sb(sb, 1);

		sb->s_frozen = SB_FREEZE_TRANS;
		smp_wmb();

		sync_blockdev(sb->s_bdev);

		if (sb->s_op->write_super_lockfs)
			sb->s_op->write_super_lockfs(sb);
	}

	sync_blockdev(bdev);
	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
}
EXPORT_SYMBOL(freeze_bdev);

/**
 * thaw_bdev  -- unlock filesystem
 * @bdev:	blockdevice to unlock
 * @sb:		associated superblock
 *
 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
 */
void thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
	if (sb) {
		BUG_ON(sb->s_bdev != bdev);

		if (sb->s_op->unlockfs)
			sb->s_op->unlockfs(sb);
		sb->s_frozen = SB_UNFROZEN;
		smp_wmb();
		wake_up(&sb->s_wait_unfrozen);
		drop_super(sb);
	}

	up(&bdev->bd_mount_sem);
}
EXPORT_SYMBOL(thaw_bdev);

/*
 * sync everything.  Start out by waking pdflush, because that writes back
 * all queues in parallel.
 */
static void do_sync(unsigned long wait)
{
	wakeup_bdflush(0);
	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
	DQUOT_SYNC(NULL);
	sync_supers();		/* Write the superblocks */
	sync_filesystems(0);	/* Start syncing the filesystems */
	sync_filesystems(wait);	/* Waitingly sync the filesystems */
	sync_inodes(wait);	/* Mappings, inodes and blockdevs, again. */
	if (!wait)
		printk("Emergency Sync complete\n");
	if (unlikely(laptop_mode))
		laptop_sync_completion();
}

asmlinkage long sys_sync(void)
{
	do_sync(1);
	return 0;
}

void emergency_sync(void)
{
	pdflush_operation(do_sync, 0);
}

/*
 * Generic function to fsync a file.
 *
 * filp may be NULL if called via the msync of a vma.
 */
 
int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
	struct inode * inode = dentry->d_inode;
	struct super_block * sb;
	int ret, err;

	/* sync the inode to buffers */
	ret = write_inode_now(inode, 0);

	/* sync the superblock to buffers */
	sb = inode->i_sb;
	lock_super(sb);
	if (sb->s_op->write_super)
		sb->s_op->write_super(sb);
	unlock_super(sb);

	/* .. finally sync the buffers to disk */
	err = sync_blockdev(sb->s_bdev);
	if (!ret)
		ret = err;
	return ret;
}

asmlinkage long sys_fsync(unsigned int fd)
{
	struct file * file;
	struct address_space *mapping;
	int ret, err;

	ret = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;

	mapping = file->f_mapping;

	ret = -EINVAL;
	if (!file->f_op || !file->f_op->fsync) {
		/* Why?  We can still call filemap_fdatawrite */
		goto out_putf;
	}

	current->flags |= PF_SYNCWRITE;
	ret = filemap_fdatawrite(mapping);

	/*
	 * We need to protect against concurrent writers,
	 * which could cause livelocks in fsync_buffers_list
	 */
	down(&mapping->host->i_sem);
	err = file->f_op->fsync(file, file->f_dentry, 0);
	if (!ret)
		ret = err;
	up(&mapping->host->i_sem);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
	current->flags &= ~PF_SYNCWRITE;

out_putf:
	fput(file);
out:
	return ret;
}

asmlinkage long sys_fdatasync(unsigned int fd)
{
	struct file * file;
	struct address_space *mapping;
	int ret, err;

	ret = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;

	ret = -EINVAL;
	if (!file->f_op || !file->f_op->fsync)
		goto out_putf;

	mapping = file->f_mapping;

	current->flags |= PF_SYNCWRITE;
	ret = filemap_fdatawrite(mapping);
	down(&mapping->host->i_sem);
	err = file->f_op->fsync(file, file->f_dentry, 1);
	if (!ret)
		ret = err;
	up(&mapping->host->i_sem);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
	current->flags &= ~PF_SYNCWRITE;

out_putf:
	fput(file);
out:
	return ret;
}

/*
 * Various filesystems appear to want __find_get_block to be non-blocking.
 * But it's the page lock which protects the buffers.  To get around this,
 * we get exclusion from try_to_free_buffers with the blockdev mapping's
 * private_lock.
 *
 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
 * may be quite high.  This code could TryLock the page, and if that
 * succeeds, there is no need to take private_lock. (But if
 * private_lock is contended then so is mapping->tree_lock).
 */
static struct buffer_head *
__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
{
	struct inode *bd_inode = bdev->bd_inode;
	struct address_space *bd_mapping = bd_inode->i_mapping;
	struct buffer_head *ret = NULL;
	pgoff_t index;
	struct buffer_head *bh;
	struct buffer_head *head;
	struct page *page;
	int all_mapped = 1;

	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
	page = find_get_page(bd_mapping, index);
	if (!page)
		goto out;

	spin_lock(&bd_mapping->private_lock);
	if (!page_has_buffers(page))
		goto out_unlock;
	head = page_buffers(page);
	bh = head;
	do {
		if (bh->b_blocknr == block) {
			ret = bh;
			get_bh(bh);
			goto out_unlock;
		}
		if (!buffer_mapped(bh))
			all_mapped = 0;
		bh = bh->b_this_page;
	} while (bh != head);

	/* we might be here because some of the buffers on this page are
	 * not mapped.  This is due to various races between
	 * file io on the block device and getblk.  It gets dealt with
	 * elsewhere, don't buffer_error if we had some unmapped buffers
	 */
	if (all_mapped) {
		printk("__find_get_block_slow() failed. "
			"block=%llu, b_blocknr=%llu\n",
			(unsigned long long)block, (unsigned long long)bh->b_blocknr);
		printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
	}
out_unlock:
	spin_unlock(&bd_mapping->private_lock);
	page_cache_release(page);
out:
	return ret;
}

/* If invalidate_buffers() will trash dirty buffers, it means some kind
   of fs corruption is going on. Trashing dirty data always imply losing
   information that was supposed to be just stored on the physical layer
   by the user.

   Thus invalidate_buffers in general usage is not allwowed to trash
   dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
   be preserved.  These buffers are simply skipped.
  
   We also skip buffers which are still in use.  For example this can
   happen if a userspace program is reading the block device.

   NOTE: In the case where the user removed a removable-media-disk even if
   there's still dirty data not synced on disk (due a bug in the device driver
   or due an error of the user), by not destroying the dirty buffers we could
   generate corruption also on the next media inserted, thus a parameter is
   necessary to handle this case in the most safe way possible (trying
   to not corrupt also the new disk inserted with the data belonging to
   the old now corrupted disk). Also for the ramdisk the natural thing
   to do in order to release the ramdisk memory is to destroy dirty buffers.

   These are two special cases. Normal usage imply the device driver
   to issue a sync on the device (without waiting I/O completion) and
   then an invalidate_buffers call that doesn't trash dirty buffers.

   For handling cache coherency with the blkdev pagecache the 'update' case
   is been introduced. It is needed to re-read from disk any pinned
   buffer. NOTE: re-reading from disk is destructive so we can do it only
   when we assume nobody is changing the buffercache under our I/O and when
   we think the disk contains more recent information than the buffercache.
   The update == 1 pass marks the buffers we need to update, the update == 2
   pass does the actual I/O. */
void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
{
	invalidate_bh_lrus();
	/*
	 * FIXME: what about destroy_dirty_buffers?
	 * We really want to use invalidate_inode_pages2() for
	 * that, but not until that's cleaned up.
	 */
	invalidate_inode_pages(bdev->bd_inode->i_mapping);
}

/*
 * Kick pdflush then try to free up some ZONE_NORMAL memory.
 */
static void free_more_memory(void)
{
	struct zone **zones;
	pg_data_t *pgdat;

	wakeup_bdflush(1024);
	yield();

	for_each_pgdat(pgdat) {
		zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
		if (*zones)
			try_to_free_pages(zones, GFP_NOFS);
	}
}

/*
 * I/O completion handler for block_read_full_page() - pages
 * which come unlocked at the end of I/O.
 */
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
	static DEFINE_SPINLOCK(page_uptodate_lock);
	unsigned long flags;
	struct buffer_head *tmp;
	struct page *page;
	int page_uptodate = 1;

	BUG_ON(!buffer_async_read(bh));

	page = bh->b_page;
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
		clear_buffer_uptodate(bh);
		if (printk_ratelimit())
			buffer_io_error(bh);
		SetPageError(page);
	}

	/*
	 * Be _very_ careful from here on. Bad things can happen if
	 * two buffer heads end IO at almost the same time and both
	 * decide that the page is now completely done.
	 */
	spin_lock_irqsave(&page_uptodate_lock, flags);
	clear_buffer_async_read(bh);
	unlock_buffer(bh);
	tmp = bh;
	do {
		if (!buffer_uptodate(tmp))
			page_uptodate = 0;
		if (buffer_async_read(tmp)) {
			BUG_ON(!buffer_locked(tmp));
			goto still_busy;
		}
		tmp = tmp->b_this_page;
	} while (tmp != bh);
	spin_unlock_irqrestore(&page_uptodate_lock, flags);

	/*
	 * If none of the buffers had errors and they are all
	 * uptodate then we can set the page uptodate.
	 */
	if (page_uptodate && !PageError(page))
		SetPageUptodate(page);
	unlock_page(page);
	return;

still_busy:
	spin_unlock_irqrestore(&page_uptodate_lock, flags);
	return;
}

/*
 * Completion handler for block_write_full_page() - pages which are unlocked
 * during I/O, and which have PageWriteback cleared upon I/O completion.
 */
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
	char b[BDEVNAME_SIZE];
	static DEFINE_SPINLOCK(page_uptodate_lock);
	unsigned long flags;
	struct buffer_head *tmp;
	struct page *page;

	BUG_ON(!buffer_async_write(bh));

	page = bh->b_page;
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
		if (printk_ratelimit()) {
			buffer_io_error(bh);
			printk(KERN_WARNING "lost page write due to "
					"I/O error on %s\n",
			       bdevname(bh->b_bdev, b));
		}
		set_bit(AS_EIO, &page->mapping->flags);
		clear_buffer_uptodate(bh);
		SetPageError(page);
	}

	spin_lock_irqsave(&page_uptodate_lock, flags);
	clear_buffer_async_write(bh);
	unlock_buffer(bh);
	tmp = bh->b_this_page;
	while (tmp != bh) {
		if (buffer_async_write(tmp)) {
			BUG_ON(!buffer_locked(tmp));
			goto still_busy;
		}
		tmp = tmp->b_this_page;
	}
	spin_unlock_irqrestore(&page_uptodate_lock, flags);
	end_page_writeback(page);
	return;

still_busy:
	spin_unlock_irqrestore(&page_uptodate_lock, flags);
	return;
}

/*
 * If a page's buffers are under async readin (end_buffer_async_read
 * completion) then there is a possibility that another thread of
 * control could lock one of the buffers after it has completed
 * but while some of the other buffers have not completed.  This
 * locked buffer would confuse end_buffer_async_read() into not unlocking
 * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
 * that this buffer is not under async I/O.
 *
 * The page comes unlocked when it has no locked buffer_async buffers
 * left.
 *
 * PageLocked prevents anyone starting new async I/O reads any of
 * the buffers.
 *
 * PageWriteback is used to prevent simultaneous writeout of the same
 * page.
 *
 * PageLocked prevents anyone from starting writeback of a page which is
 * under read I/O (PageWriteback is only ever set against a locked page).
 */
static void mark_buffer_async_read(struct buffer_head *bh)
{
	bh->b_end_io = end_buffer_async_read;
	set_buffer_async_read(bh);
}

void mark_buffer_async_write(struct buffer_head *bh)
{
	bh->b_end_io = end_buffer_async_write;
	set_buffer_async_write(bh);
}
EXPORT_SYMBOL(mark_buffer_async_write);


/*
 * fs/buffer.c contains helper functions for buffer-backed address space's
 * fsync functions.  A common requirement for buffer-based filesystems is
 * that certain data from the backing blockdev needs to be written out for
 * a successful fsync().  For example, ext2 indirect blocks need to be
 * written back and waited upon before fsync() returns.
 *
 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
 * management of a list of dependent buffers at ->i_mapping->private_list.
 *
 * Locking is a little subtle: try_to_free_buffers() will remove buffers
 * from their controlling inode's queue when they are being freed.  But
 * try_to_free_buffers() will be operating against the *blockdev* mapping
 * at the time, not against the S_ISREG file which depends on those buffers.
 * So the locking for private_list is via the private_lock in the address_space
 * which backs the buffers.  Which is different from the address_space 
 * against which the buffers are listed.  So for a particular address_space,
 * mapping->private_lock does *not* protect mapping->private_list!  In fact,
 * mapping->private_list will always be protected by the backing blockdev's
 * ->private_lock.
 *
 * Which introduces a requirement: all buffers on an address_space's
 * ->private_list must be from the same address_space: the blockdev's.
 *
 * address_spaces which do not place buffers at ->private_list via these
 * utility functions are free to use private_lock and private_list for
 * whatever they want.  The only requirement is that list_empty(private_list)
 * be true at clear_inode() time.
 *
 * FIXME: clear_inode should not call invalidate_inode_buffers().  The
 * filesystems should do that.  invalidate_inode_buffers() should just go
 * BUG_ON(!list_empty).
 *
 * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
 * take an address_space, not an inode.  And it should be called
 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
 * queued up.
 *
 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
 * list if it is already on a list.  Because if the buffer is on a list,
 * it *must* already be on the right one.  If not, the filesystem is being
 * silly.  This will save a ton of locking.  But first we have to ensure
 * that buffers are taken *off* the old inode's list when they are freed
 * (presumably in truncate).  That requires careful auditing of all
 * filesystems (do it inside bforget()).  It could also be done by bringing
 * b_inode back.
 */

/*
 * The buffer's backing address_space's private_lock must be held
 */
static inline void __remove_assoc_queue(struct buffer_head *bh)
{
	list_del_init(&bh->b_assoc_buffers);
}

int inode_has_buffers(struct inode *inode)
{
	return !list_empty(&inode->i_data.private_list);
}

/*
 * osync is designed to support O_SYNC io.  It waits synchronously for
 * all already-submitted IO to complete, but does not queue any new
 * writes to the disk.
 *
 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
 * you dirty the buffers, and then use osync_inode_buffers to wait for
 * completion.  Any other dirty buffers which are not yet queued for
 * write will not be flushed to disk by the osync.
 */
static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
{
	struct buffer_head *bh;
	struct list_head *p;
	int err = 0;

	spin_lock(lock);
repeat:
	list_for_each_prev(p, list) {
		bh = BH_ENTRY(p);
		if (buffer_locked(bh)) {
			get_bh(bh);
			spin_unlock(lock);
			wait_on_buffer(bh);
			if (!buffer_uptodate(bh))
				err = -EIO;
			brelse(bh);
			spin_lock(lock);
			goto repeat;
		}
	}
	spin_unlock(lock);
	return err;
}

/**
 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
 *                        buffers
 * @mapping: the mapping which wants those buffers written
 *
 * Starts I/O against the buffers at mapping->private_list, and waits upon
 * that I/O.
 *
 * Basically, this is a convenience function for fsync().
 * @mapping is a file or directory which needs those buffers to be written for
 * a successful fsync().
 */
int sync_mapping_buffers(struct address_space *mapping)
{
	struct address_space *buffer_mapping = mapping->assoc_mapping;

	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
		return 0;

	return fsync_buffers_list(&buffer_mapping->private_lock,
					&mapping->private_list);
}
EXPORT_SYMBOL(sync_mapping_buffers);

/*
 * Called when we've recently written block `bblock', and it is known that
 * `bblock' was for a buffer_boundary() buffer.  This means that the block at
 * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
 * dirty, schedule it for IO.  So that indirects merge nicely with their data.
 */
void write_boundary_block(struct block_device *bdev,
			sector_t bblock, unsigned blocksize)
{
	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
	if (bh) {
		if (buffer_dirty(bh))
			ll_rw_block(WRITE, 1, &bh);
		put_bh(bh);
	}
}

void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
	struct address_space *mapping = inode->i_mapping;
	struct address_space *buffer_mapping = bh->b_page->mapping;

	mark_buffer_dirty(bh);
	if (!mapping->assoc_mapping) {
		mapping->assoc_mapping = buffer_mapping;
	} else {
		if (mapping->assoc_mapping != buffer_mapping)
			BUG();
	}
	if (list_empty(&bh->b_assoc_buffers)) {
		spin_lock(&buffer_mapping->private_lock);
		list_move_tail(&bh->b_assoc_buffers,
				&mapping->private_list);
		spin_unlock(&buffer_mapping->private_lock);
	}
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);

/*
 * Add a page to the dirty page list.
 *
 * It is a sad fact of life that this function is called from several places
 * deeply under spinlocking.  It may not sleep.
 *
 * If the page has buffers, the uptodate buffers are set dirty, to preserve
 * dirty-state coherency between the page and the buffers.  It the page does
 * not have buffers then when they are later attached they will all be set
 * dirty.
 *
 * The buffers are dirtied before the page is dirtied.  There's a small race
 * window in which a writepage caller may see the page cleanness but not the
 * buffer dirtiness.  That's fine.  If this code were to set the page dirty
 * before the buffers, a concurrent writepage caller could clear the page dirty
 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
 * page on the dirty page list.
 *
 * We use private_lock to lock against try_to_free_buffers while using the
 * page's buffer list.  Also use this to protect against clean buffers being
 * added to the page after it was set dirty.
 *
 * FIXME: may need to call ->reservepage here as well.  That's rather up to the
 * address_space though.
 */
int __set_page_dirty_buffers(struct page *page)
{
	struct address_space * const mapping = page->mapping;

	spin_lock(&mapping->private_lock);
	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
		struct buffer_head *bh = head;

		do {
			set_buffer_dirty(bh);
			bh = bh->b_this_page;
		} while (bh != head);
	}
	spin_unlock(&mapping->private_lock);

	if (!TestSetPageDirty(page)) {
		write_lock_irq(&mapping->tree_lock);
		if (page->mapping) {	/* Race with truncate? */
			if (mapping_cap_account_dirty(mapping))
				inc_page_state(nr_dirty);
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
		}
		write_unlock_irq(&mapping->tree_lock);
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	}
	
	return 0;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);

/*
 * Write out and wait upon a list of buffers.
 *
 * We have conflicting pressures: we want to make sure that all
 * initially dirty buffers get waited on, but that any subsequently
 * dirtied buffers don't.  After all, we don't want fsync to last
 * forever if somebody is actively writing to the file.
 *
 * Do this in two main stages: first we copy dirty buffers to a
 * temporary inode list, queueing the writes as we go.  Then we clean
 * up, waiting for those writes to complete.
 * 
 * During this second stage, any subsequent updates to the file may end
 * up refiling the buffer on the original inode's dirty list again, so
 * there is a chance we will end up with a buffer queued for write but
 * not yet completed on that list.  So, as a final cleanup we go through
 * the osync code to catch these locked, dirty buffers without requeuing
 * any newly dirty buffers for write.
 */
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
	struct buffer_head *bh;
	struct list_head tmp;
	int err = 0, err2;

	INIT_LIST_HEAD(&tmp);

	spin_lock(lock);
	while (!list_empty(list)) {
		bh = BH_ENTRY(list->next);
		list_del_init(&bh->b_assoc_buffers);
		if (buffer_dirty(bh) || buffer_locked(bh)) {
			list_add(&bh->b_assoc_buffers, &tmp);
			if (buffer_dirty(bh)) {
				get_bh(bh);
				spin_unlock(lock);
				/*
				 * Ensure any pending I/O completes so that
				 * ll_rw_block() actually writes the current
				 * contents - it is a noop if I/O is still in
				 * flight on potentially older contents.
				 */
				wait_on_buffer(bh);
				ll_rw_block(WRITE, 1, &bh);
				brelse(bh);
				spin_lock(lock);
			}
		}
	}

	while (!list_empty(&tmp)) {
		bh = BH_ENTRY(tmp.prev);
		__remove_assoc_queue(bh);
		get_bh(bh);
		spin_unlock(lock);
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh))
			err = -EIO;
		brelse(bh);
		spin_lock(lock);
	}
	
	spin_unlock(lock);
	err2 = osync_buffers_list(lock, list);
	if (err)
		return err;
	else
		return err2;
}

/*
 * Invalidate any and all dirty buffers on a given inode.  We are
 * probably unmounting the fs, but that doesn't mean we have already
 * done a sync().  Just drop the buffers from the inode list.
 *
 * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
 * assumes that all the buffers are against the blockdev.  Not true
 * for reiserfs.
 */
void invalidate_inode_buffers(struct inode *inode)
{
	if (inode_has_buffers(inode)) {
		struct address_space *mapping = &inode->i_data;
		struct list_head *list = &mapping->private_list;
		struct address_space *buffer_mapping = mapping->assoc_mapping;

		spin_lock(&buffer_mapping->private_lock);
		while (!list_empty(list))
			__remove_assoc_queue(BH_ENTRY(list->next));
		spin_unlock(&buffer_mapping->private_lock);
	}
}

/*
 * Remove any clean buffers from the inode's buffer list.  This is called
 * when we're trying to free the inode itself.  Those buffers can pin it.
 *
 * Returns true if all buffers were removed.
 */
int remove_inode_buffers(struct inode *inode)
{
	int ret = 1;

	if (inode_has_buffers(inode)) {
		struct address_space *mapping = &inode->i_data;
		struct list_head *list = &mapping->private_list;
		struct address_space *buffer_mapping = mapping->assoc_mapping;

		spin_lock(&buffer_mapping->private_lock);
		while (!list_empty(list)) {
			struct buffer_head *bh = BH_ENTRY(list->next);
			if (buffer_dirty(bh)) {
				ret = 0;
				break;
			}
			__remove_assoc_queue(bh);
		}
		spin_unlock(&buffer_mapping->private_lock);
	}
	return ret;
}

/*
 * Create the appropriate buffers when given a page for data area and
 * the size of each buffer.. Use the bh->b_this_page linked list to
 * follow the buffers created.  Return NULL if unable to create more
 * buffers.
 *
 * The retry flag is used to differentiate async IO (paging, swapping)
 * which may not fail from ordinary buffer allocations.
 */
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
		int retry)
{
	struct buffer_head *bh, *head;
	long offset;

try_again:
	head = NULL;
	offset = PAGE_SIZE;
	while ((offset -= size) >= 0) {
		bh = alloc_buffer_head(GFP_NOFS);
		if (!bh)
			goto no_grow;

		bh->b_bdev = NULL;
		bh->b_this_page = head;
		bh->b_blocknr = -1;
		head = bh;

		bh->b_state = 0;
		atomic_set(&bh->b_count, 0);
		bh->b_size = size;

		/* Link the buffer to its page */
		set_bh_page(bh, page, offset);

		bh->b_end_io = NULL;
	}
	return head;
/*
 * In case anything failed, we just free everything we got.
 */
no_grow:
	if (head) {
		do {
			bh = head;
			head = head->b_this_page;
			free_buffer_head(bh);
		} while (head);
	}

	/*
	 * Return failure for non-async IO requests.  Async IO requests
	 * are not allowed to fail, so we have to wait until buffer heads
	 * become available.  But we don't want tasks sleeping with 
	 * partially complete buffers, so all were released above.
	 */
	if (!retry)
		return NULL;

	/* We're _really_ low on memory. Now we just
	 * wait for old buffer heads to become free due to
	 * finishing IO.  Since this is an async request and
	 * the reserve list is empty, we're sure there are 
	 * async buffer heads in use.
	 */
	free_more_memory();
	goto try_again;
}
EXPORT_SYMBOL_GPL(alloc_page_buffers);

static inline void
link_dev_buffers(struct page *page, struct buffer_head *head)
{
	struct buffer_head *bh, *tail;

	bh = head;
	do {
		tail = bh;
		bh = bh->b_this_page;
	} while (bh);
	tail->b_this_page = head;
	attach_page_buffers(page, head);
}

/*
 * Initialise the state of a blockdev page's buffers.
 */ 
static void
init_page_buffers(struct page *page, struct block_device *bdev,
			sector_t block, int size)
{
	struct buffer_head *head = page_buffers(page);
	struct buffer_head *bh = head;
	int uptodate = PageUptodate(page);

	do {
		if (!buffer_mapped(bh)) {
			init_buffer(bh, NULL, NULL);
			bh->b_bdev = bdev;
			bh->b_blocknr = block;
			if (uptodate)
				set_buffer_uptodate(bh);
			set_buffer_mapped(bh);
		}
		block++;
		bh = bh->b_this_page;
	} while (bh != head);
}

/*
 * Create the page-cache page that contains the requested block.
 *
 * This is user purely for blockdev mappings.
 */
static struct page *
grow_dev_page(struct block_device *bdev, sector_t block,
		pgoff_t index, int size)
{
	struct inode *inode = bdev->bd_inode;
	struct page *page;
	struct buffer_head *bh;

	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
	if (!page)
		return NULL;

	if (!PageLocked(page))
		BUG();

	if (page_has_buffers(page)) {
		bh = page_buffers(page);
		if (bh->b_size == size) {
			init_page_buffers(page, bdev, block, size);
			return page;