/*
* mm/page-writeback.c
*
* Copyright (C) 2002, Linus Torvalds.
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Contains functions related to writing back dirty pages at the
* address_space level.
*
* 10Apr2002 Andrew Morton
* Initial version
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/blkdev.h>
#include <linux/mpage.h>
#include <linux/rmap.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
#include <linux/pagevec.h>
#include <linux/timer.h>
#include <linux/sched/rt.h>
#include <trace/events/writeback.h>
/*
* Sleep at most 200ms at a time in balance_dirty_pages().
*/
#define MAX_PAUSE max(HZ/5, 1)
/*
* Try to keep balance_dirty_pages() call intervals higher than this many pages
* by raising pause time to max_pause when falls below it.
*/
#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
/*
* Estimate write bandwidth at 200ms intervals.
*/
#define BANDWIDTH_INTERVAL max(HZ/5, 1)
#define RATELIMIT_CALC_SHIFT 10
/*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* will look to see if it needs to force writeback or throttling.
*/
static long ratelimit_pages = 32;
/* The following parameters are exported via /proc/sys/vm */
/*
* Start background writeback (via writeback threads) at this percentage
*/
int dirty_background_ratio = 10;
/*
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
* dirty_background_ratio * the amount of dirtyable memory
*/
unsigned long dirty_background_bytes;
/*
* free highmem will not be subtracted from the total free memory
* for calculating free ratios if vm_highmem_is_dirtyable is true
*/
int vm_highmem_is_dirtyable;
/*
* The generator of dirty data starts writeback at this percentage
*/
int vm_dirty_ratio = 20;
/*
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
* vm_dirty_ratio * the amount of dirtyable memory
*/
unsigned long vm_dirty_bytes;
/*
* The interval between `kupdate'-style writebacks
*/
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
EXPORT_SYMBOL_GPL(dirty_writeback_interval);
/*
* The longest time for which data is allowed to remain dirty
*/
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
/*
* Flag that makes the machine dump writes/reads and block dirtyings.
*/
int block_dump;
/*
* Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
* a full sync is triggered after this time elapses without any disk activity.
*/
int laptop_mode;
EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */
unsigned long global_dirty_limit;
/*
* Scale the writeback cache size proportional to the relative writeout speeds.
*
* We do this by keeping a floating proportion between BDIs, based on page
* writeback completions [end_page_writeback()]. Those devices that write out
* pages fastest will get the larger share, while the slower will get a smaller
* share.
*
* We use page writeout completions because we are interested in getting rid of
* dirty pages. Having them written out is the primary goal.
*
* We introduce a concept of time, a period over which we measure these events,
* because demand can/will vary over time. The length of this period itself is
* measured in page writeback completions.
*
*/
static struct fprop_global writeout_completions;
static void writeout_period(unsigned long t);
/* Timer for aging of writeout_completions */
static struct timer_list writeout_period_timer =
TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0);
static unsigned long writeout_period_time = 0;
/*
* Length of period for aging writeout fractions of bdis. This is an
* arbitrarily chosen number. The longer the period, the slower fractions will
* reflect changes in current writeout rate.
*/
#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/
/*
* In a memory zone, there is a certain amount of pages we consider
* available for the page cache, which is essentially the number of
* free and reclaimable pages, minus some zone reserves to protect
* lowmem and the ability to uphold the zone's watermarks without
* requiring writeback.
*
* This number of dirtyable pages is the base value of which the
* user-configurable dirty ratio is the effictive number of pages that
* are allowed to be actually dirtied. Per individual zone, or
* globally by using the sum of dirtyable pages over all zones.
*
* Because the user is allowed to specify the dirty limit globally as
* absolute number of bytes, calculating the per-zone dirty limit can
* require translating the configured limit into a percentage of
* global dirtyable memory first.
*/
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
}
/*
* Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below
* the zone's dirty balance reserve and the above calculation
* will underflow. However we still want to add in nodes
* which are below threshold (negative values) to get a more
* accurate calculation but make sure that the total never
* underflows.
*/
if ((long)x < 0)
x = 0;
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}
/**
* global_dirtyable_memory - number of globally dirtyable pages
*
* Returns the global number of pages potentially available for dirty
* page cache. This is the base value for the global dirty limits.
*/
static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
x -= min(x, dirty_balance_reserve);
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
/* Subtract min_free_kbytes */
x -= min_t(unsigned long, x, min_free_kbytes >> (PAGE_SHIFT - 10));
return x + 1; /* Ensure that we never return 0 */
}
/*
* global_dirty_limits - background-writeback and dirty-throttling thresholds
*
* Calculate the dirty thresholds based on sysctl parameters
* - vm.dirty_background_ratio or vm.dirty_background_bytes
* - vm.dirty_ratio or vm.dirty_bytes
* The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
* real-time tasks.
*/
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
unsigned long background;
unsigned long dirty;
unsigned long uninitialized_var(available_memory);
struct task_struct *tsk;
if (!vm_dirty_bytes || !dirty_background_bytes)
available_memory = global_dirtyable_memory();
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
else
dirty = (vm_dirty_ratio * available_memory) / 100;
if (dirty_background_bytes)
background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
else
background = (dirty_background_ratio * available_memory) / 100;
if (background >= dirty)
background = dirty / 2;
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
background += background / 4;
dirty += dirty / 4;
}
*pbackground = background;
*pdirty = dirty;
trace_global_dirty_state(background, dirty);
}
/**
* zone_dirtyable_memory - number of dirtyable pages in a zone
* @zone: the zone
*
* Returns the zone's number of pages potentially available for dirty
* page cache. This is the base value for the per-zone dirty limits.
*/
static unsigned long zone_dirtyable_memory(struct zone *zone)
{
/*
* The effective global number of dirtyable pages may exclude
* highmem as a big-picture measure to keep the ratio between
* dirty memory and lowmem reasonable.
*
* But this function is purely about the individual zone and a
* highmem zone can hold its share of dirty pages, so we don't
* care about vm_highmem_is_dirtyable here.
*/
unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
zone_reclaimable_pages(zone);
/* don't allow this to underflow */
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
return nr_pages;
}
/**
* zone_dirty_limit - maximum number of dirty pages allowed in a zone
* @zone: the zone
*
* Returns the maximum number of dirty pages allowed in a zone, based
* on the zone's dirtyable memory.
*/
static unsigned long zone_dirty_limit(struct zone *zone)
{
unsigned long zone_memory = zone_dirtyable_memory(zone);
struct task_struct *tsk = current;
unsigned long dirty;
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
zone_memory / global_dirtyable_memory();
else
dirty = vm_dirty_ratio * zone_memory / 100;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
dirty += dirty / 4;
return dirty;
}
/**
* zone_dirty_ok - tells whether a zone is within its dirty limits
* @zone: the zone to check
*
* Returns %true when the dirty pages in @zone are within the zone's
* dirty limit, %false if the limit is exceeded.
*/
bool zone_dirty_ok(struct zone *zone)
{
unsigned long limit = zone_dirty_limit(zone);
return zone_page_state(zone, NR_FILE_DIRTY) +
zone_page_state(zone, NR_UNSTABLE_NFS) +
zone_page_state(zone, NR_WRITEBACK) <= limit;
}
int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
|