aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjorn Helgaas <bjorn.helgaas@hp.com>2010-02-23 12:24:41 -0500
committerJesse Barnes <jbarnes@virtuousgeek.org>2010-02-23 12:43:42 -0500
commit7bc5e3f2be32ae6fb0c74cd0f707f986b3a01a26 (patch)
tree25db7dccd13f5826a91389c25810425ce36ba2d4
parent2fe2abf896c1e7a0ee65faaf3ef0ce654848abbd (diff)
x86/PCI: use host bridge _CRS info by default on 2008 and newer machines
The main benefit of using ACPI host bridge window information is that we can do better resource allocation in systems with multiple host bridges, e.g., http://bugzilla.kernel.org/show_bug.cgi?id=14183 Sometimes we need _CRS information even if we only have one host bridge, e.g., https://bugs.launchpad.net/ubuntu/+source/linux/+bug/341681 Most of these systems are relatively new, so this patch turns on "pci=use_crs" only on machines with a BIOS date of 2008 or newer. Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--arch/ia64/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/pci/acpi.c53
-rw-r--r--arch/x86/pci/common.c3
-rw-r--r--drivers/acpi/pci_root.c1
-rw-r--r--include/acpi/acpi_drivers.h1
7 files changed, 60 insertions, 8 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 516225a864f9..3e69c1c4f508 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1948,8 +1948,12 @@ and is between 256 and 4096 characters. It is defined in the file
1948 IRQ routing is enabled. 1948 IRQ routing is enabled.
1949 noacpi [X86] Do not use ACPI for IRQ routing 1949 noacpi [X86] Do not use ACPI for IRQ routing
1950 or for PCI scanning. 1950 or for PCI scanning.
1951 use_crs [X86] Use _CRS for PCI resource 1951 use_crs [X86] Use PCI host bridge window information
1952 allocation. 1952 from ACPI. On BIOSes from 2008 or later, this
1953 is enabled by default. If you need to use this,
1954 please report a bug.
1955 nocrs [X86] Ignore PCI host bridge windows from ACPI.
1956 If you need to use this, please report a bug.
1953 routeirq Do IRQ routing for all PCI devices. 1957 routeirq Do IRQ routing for all PCI devices.
1954 This is normally done in pci_enable_device(), 1958 This is normally done in pci_enable_device(),
1955 so this option is a temporary workaround 1959 so this option is a temporary workaround
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index e97b255d97bc..93997bd5edc3 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -98,6 +98,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
98#endif 98#endif
99#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 99#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
100static inline void disable_acpi(void) { } 100static inline void disable_acpi(void) { }
101static inline void pci_acpi_crs_quirks(void) { }
101 102
102const char *acpi_get_sysname (void); 103const char *acpi_get_sysname (void);
103int acpi_request_vector (u32 int_type); 104int acpi_request_vector (u32 int_type);
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b4bf9a942ed0..05b58ccb2e82 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -29,6 +29,7 @@
29#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 29#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
30#define PCI_HAS_IO_ECS 0x40000 30#define PCI_HAS_IO_ECS 0x40000
31#define PCI_NOASSIGN_ROMS 0x80000 31#define PCI_NOASSIGN_ROMS 0x80000
32#define PCI_ROOT_NO_CRS 0x100000
32 33
33extern unsigned int pci_probe; 34extern unsigned int pci_probe;
34extern unsigned long pirq_table_addr; 35extern unsigned long pirq_table_addr;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index a2f8cdb8c1d5..5f11ff6f5389 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -15,6 +15,51 @@ struct pci_root_info {
15 int busnum; 15 int busnum;
16}; 16};
17 17
18static bool pci_use_crs = true;
19
20static int __init set_use_crs(const struct dmi_system_id *id)
21{
22 pci_use_crs = true;
23 return 0;
24}
25
26static const struct dmi_system_id pci_use_crs_table[] __initconst = {
27 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
28 {
29 .callback = set_use_crs,
30 .ident = "IBM System x3800",
31 .matches = {
32 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
33 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
34 },
35 },
36 {}
37};
38
39void __init pci_acpi_crs_quirks(void)
40{
41 int year;
42
43 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
44 pci_use_crs = false;
45
46 dmi_check_system(pci_use_crs_table);
47
48 /*
49 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
50 * takes precedence over anything we figured out above.
51 */
52 if (pci_probe & PCI_ROOT_NO_CRS)
53 pci_use_crs = false;
54 else if (pci_probe & PCI_USE__CRS)
55 pci_use_crs = true;
56
57 printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
58 "if necessary, use \"pci=%s\" and report a bug\n",
59 pci_use_crs ? "Using" : "Ignoring",
60 pci_use_crs ? "nocrs" : "use_crs");
61}
62
18static acpi_status 63static acpi_status
19resource_to_addr(struct acpi_resource *resource, 64resource_to_addr(struct acpi_resource *resource,
20 struct acpi_resource_address64 *addr) 65 struct acpi_resource_address64 *addr)
@@ -106,7 +151,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
106 res->child = NULL; 151 res->child = NULL;
107 align_resource(info->bridge, res); 152 align_resource(info->bridge, res);
108 153
109 if (!(pci_probe & PCI_USE__CRS)) { 154 if (!pci_use_crs) {
110 dev_printk(KERN_DEBUG, &info->bridge->dev, 155 dev_printk(KERN_DEBUG, &info->bridge->dev,
111 "host bridge window %pR (ignored)\n", res); 156 "host bridge window %pR (ignored)\n", res);
112 return AE_OK; 157 return AE_OK;
@@ -137,12 +182,8 @@ get_current_resources(struct acpi_device *device, int busnum,
137 struct pci_root_info info; 182 struct pci_root_info info;
138 size_t size; 183 size_t size;
139 184
140 if (pci_probe & PCI_USE__CRS) 185 if (pci_use_crs)
141 pci_bus_remove_resources(bus); 186 pci_bus_remove_resources(bus);
142 else
143 dev_info(&device->dev,
144 "ignoring host bridge windows from ACPI; "
145 "boot with \"pci=use_crs\" to use them\n");
146 187
147 info.bridge = device; 188 info.bridge = device;
148 info.bus = bus; 189 info.bus = bus;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index d2552c68e94d..3736176acaab 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -520,6 +520,9 @@ char * __devinit pcibios_setup(char *str)
520 } else if (!strcmp(str, "use_crs")) { 520 } else if (!strcmp(str, "use_crs")) {
521 pci_probe |= PCI_USE__CRS; 521 pci_probe |= PCI_USE__CRS;
522 return NULL; 522 return NULL;
523 } else if (!strcmp(str, "nocrs")) {
524 pci_probe |= PCI_ROOT_NO_CRS;
525 return NULL;
523 } else if (!strcmp(str, "earlydump")) { 526 } else if (!strcmp(str, "earlydump")) {
524 pci_early_dump_regs = 1; 527 pci_early_dump_regs = 1;
525 return NULL; 528 return NULL;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 9cd8bedb1e5a..d724736d56c8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -566,6 +566,7 @@ static int __init acpi_pci_root_init(void)
566 if (acpi_pci_disabled) 566 if (acpi_pci_disabled)
567 return 0; 567 return 0;
568 568
569 pci_acpi_crs_quirks();
569 if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) 570 if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
570 return -ENODEV; 571 return -ENODEV;
571 572
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index f4906f6568d4..3a4767c01c5f 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -104,6 +104,7 @@ int acpi_pci_bind_root(struct acpi_device *device);
104 104
105struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain, 105struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
106 int bus); 106 int bus);
107void pci_acpi_crs_quirks(void);
107 108
108/* -------------------------------------------------------------------------- 109/* --------------------------------------------------------------------------
109 Processor 110 Processor
1502'>1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
/*
 *  Anticipatory & deadline i/o scheduler.
 *
 *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
 *                     Nick Piggin <nickpiggin@yahoo.com.au>
 *
 */
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/hash.h>
#include <linux/rbtree.h>
#include <linux/interrupt.h>

#define REQ_SYNC	1
#define REQ_ASYNC	0

/*
 * See Documentation/block/as-iosched.txt
 */

/*
 * max time before a read is submitted.
 */
#define default_read_expire (HZ / 8)

/*
 * ditto for writes, these limits are not hard, even
 * if the disk is capable of satisfying them.
 */
#define default_write_expire (HZ / 4)

/*
 * read_batch_expire describes how long we will allow a stream of reads to
 * persist before looking to see whether it is time to switch over to writes.
 */
#define default_read_batch_expire (HZ / 2)

/*
 * write_batch_expire describes how long we want a stream of writes to run for.
 * This is not a hard limit, but a target we set for the auto-tuning thingy.
 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
 * a short amount of time...
 */
#define default_write_batch_expire (HZ / 8)

/*
 * max time we may wait to anticipate a read (default around 6ms)
 */
#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)

/*
 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
 * however huge values tend to interfere and not decay fast enough. A program
 * might be in a non-io phase of operation. Waiting on user input for example,
 * or doing a lengthy computation. A small penalty can be justified there, and
 * will still catch out those processes that constantly have large thinktimes.
 */
#define MAX_THINKTIME (HZ/50UL)

/* Bits in as_io_context.state */
enum as_io_states {
	AS_TASK_RUNNING=0,	/* Process has not exited */
	AS_TASK_IOSTARTED,	/* Process has started some IO */
	AS_TASK_IORUNNING,	/* Process has completed some IO */
};

enum anticipation_status {
	ANTIC_OFF=0,		/* Not anticipating (normal operation)	*/
	ANTIC_WAIT_REQ,		/* The last read has not yet completed  */
	ANTIC_WAIT_NEXT,	/* Currently anticipating a request vs
				   last read (which has completed) */
	ANTIC_FINISHED,		/* Anticipating but have found a candidate
				 * or timed out */
};

struct as_data {
	/*
	 * run time data
	 */

	struct request_queue *q;	/* the "owner" queue */

	/*
	 * requests (as_rq s) are present on both sort_list and fifo_list
	 */
	struct rb_root sort_list[2];
	struct list_head fifo_list[2];

	struct as_rq *next_arq[2];	/* next in sort order */
	sector_t last_sector[2];	/* last REQ_SYNC & REQ_ASYNC sectors */
	struct list_head *hash;		/* request hash */

	unsigned long exit_prob;	/* probability a task will exit while
					   being waited on */
	unsigned long exit_no_coop;	/* probablility an exited task will
					   not be part of a later cooperating
					   request */
	unsigned long new_ttime_total; 	/* mean thinktime on new proc */
	unsigned long new_ttime_mean;
	u64 new_seek_total;		/* mean seek on new proc */
	sector_t new_seek_mean;

	unsigned long current_batch_expires;
	unsigned long last_check_fifo[2];
	int changed_batch;		/* 1: waiting for old batch to end */
	int new_batch;			/* 1: waiting on first read complete */
	int batch_data_dir;		/* current batch REQ_SYNC / REQ_ASYNC */
	int write_batch_count;		/* max # of reqs in a write batch */
	int current_write_count;	/* how many requests left this batch */
	int write_batch_idled;		/* has the write batch gone idle? */
	mempool_t *arq_pool;

	enum anticipation_status antic_status;
	unsigned long antic_start;	/* jiffies: when it started */
	struct timer_list antic_timer;	/* anticipatory scheduling timer */
	struct work_struct antic_work;	/* Deferred unplugging */
	struct io_context *io_context;	/* Identify the expected process */
	int ioc_finished; /* IO associated with io_context is finished */
	int nr_dispatched;

	/*
	 * settings that change how the i/o scheduler behaves
	 */
	unsigned long fifo_expire[2];
	unsigned long batch_expire[2];
	unsigned long antic_expire;
};

#define list_entry_fifo(ptr)	list_entry((ptr), struct as_rq, fifo)

/*
 * per-request data.
 */
enum arq_state {
	AS_RQ_NEW=0,		/* New - not referenced and not on any lists */
	AS_RQ_QUEUED,		/* In the request queue. It belongs to the
				   scheduler */
	AS_RQ_DISPATCHED,	/* On the dispatch list. It belongs to the
				   driver now */
	AS_RQ_PRESCHED,		/* Debug poisoning for requests being used */
	AS_RQ_REMOVED,
	AS_RQ_MERGED,
	AS_RQ_POSTSCHED,	/* when they shouldn't be */
};

struct as_rq {
	/*
	 * rbtree index, key is the starting offset
	 */
	struct rb_node rb_node;
	sector_t rb_key;

	struct request *request;

	struct io_context *io_context;	/* The submitting task */

	/*
	 * request hash, key is the ending offset (for back merge lookup)
	 */
	struct list_head hash;
	unsigned int on_hash;

	/*
	 * expire fifo
	 */
	struct list_head fifo;
	unsigned long expires;

	unsigned int is_sync;
	enum arq_state state;
};

#define RQ_DATA(rq)	((struct as_rq *) (rq)->elevator_private)

static kmem_cache_t *arq_pool;

static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
static void as_antic_stop(struct as_data *ad);

/*
 * IO Context helper functions
 */

/* Called to deallocate the as_io_context */
static void free_as_io_context(struct as_io_context *aic)
{
	kfree(aic);
}

/* Called when the task exits */
static void exit_as_io_context(struct as_io_context *aic)
{
	WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
	clear_bit(AS_TASK_RUNNING, &aic->state);
}

static struct as_io_context *alloc_as_io_context(void)
{
	struct as_io_context *ret;

	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
	if (ret) {
		ret->dtor = free_as_io_context;
		ret->exit = exit_as_io_context;
		ret->state = 1 << AS_TASK_RUNNING;
		atomic_set(&ret->nr_queued, 0);
		atomic_set(&ret->nr_dispatched, 0);
		spin_lock_init(&ret->lock);
		ret->ttime_total = 0;
		ret->ttime_samples = 0;
		ret->ttime_mean = 0;
		ret->seek_total = 0;
		ret->seek_samples = 0;
		ret->seek_mean = 0;
	}

	return ret;
}

/*
 * If the current task has no AS IO context then create one and initialise it.
 * Then take a ref on the task's io context and return it.
 */
static struct io_context *as_get_io_context(void)
{
	struct io_context *ioc = get_io_context(GFP_ATOMIC);
	if (ioc && !ioc->aic) {
		ioc->aic = alloc_as_io_context();
		if (!ioc->aic) {
			put_io_context(ioc);
			ioc = NULL;
		}
	}
	return ioc;
}

static void as_put_io_context(struct as_rq *arq)
{
	struct as_io_context *aic;

	if (unlikely(!arq->io_context))
		return;

	aic = arq->io_context->aic;

	if (arq->is_sync == REQ_SYNC && aic) {
		spin_lock(&aic->lock);
		set_bit(AS_TASK_IORUNNING, &aic->state);
		aic->last_end_request = jiffies;
		spin_unlock(&aic->lock);
	}

	put_io_context(arq->io_context);
}

/*
 * the back merge hash support functions
 */
static const int as_hash_shift = 6;
#define AS_HASH_BLOCK(sec)	((sec) >> 3)
#define AS_HASH_FN(sec)		(hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
#define AS_HASH_ENTRIES		(1 << as_hash_shift)
#define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
#define list_entry_hash(ptr)	list_entry((ptr), struct as_rq, hash)

static inline void __as_del_arq_hash(struct as_rq *arq)
{
	arq->on_hash = 0;
	list_del_init(&arq->hash);
}

static inline void as_del_arq_hash(struct as_rq *arq)
{
	if (arq->on_hash)
		__as_del_arq_hash(arq);
}

static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
{
	struct request *rq = arq->request;

	BUG_ON(arq->on_hash);

	arq->on_hash = 1;
	list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
}

/*
 * move hot entry to front of chain
 */
static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
{
	struct request *rq = arq->request;
	struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];

	if (!arq->on_hash) {
		WARN_ON(1);
		return;
	}

	if (arq->hash.prev != head) {
		list_del(&arq->hash);
		list_add(&arq->hash, head);
	}
}

static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
{
	struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
	struct list_head *entry, *next = hash_list->next;

	while ((entry = next) != hash_list) {
		struct as_rq *arq = list_entry_hash(entry);
		struct request *__rq = arq->request;

		next = entry->next;

		BUG_ON(!arq->on_hash);

		if (!rq_mergeable(__rq)) {
			as_del_arq_hash(arq);
			continue;
		}

		if (rq_hash_key(__rq) == offset)
			return __rq;
	}

	return NULL;
}

/*
 * rb tree support functions
 */
#define RB_NONE		(2)
#define RB_EMPTY(root)	((root)->rb_node == NULL)
#define ON_RB(node)	((node)->rb_color != RB_NONE)
#define RB_CLEAR(node)	((node)->rb_color = RB_NONE)
#define rb_entry_arq(node)	rb_entry((node), struct as_rq, rb_node)
#define ARQ_RB_ROOT(ad, arq)	(&(ad)->sort_list[(arq)->is_sync])
#define rq_rb_key(rq)		(rq)->sector

/*
 * as_find_first_arq finds the first (lowest sector numbered) request
 * for the specified data_dir. Used to sweep back to the start of the disk
 * (1-way elevator) after we process the last (highest sector) request.
 */
static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
{
	struct rb_node *n = ad->sort_list[data_dir].rb_node;

	if (n == NULL)
		return NULL;

	for (;;) {
		if (n->rb_left == NULL)
			return rb_entry_arq(n);

		n = n->rb_left;
	}
}

/*
 * Add the request to the rb tree if it is unique.  If there is an alias (an
 * existing request against the same sector), which can happen when using
 * direct IO, then return the alias.
 */
static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
	struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
	struct rb_node *parent = NULL;
	struct as_rq *__arq;
	struct request *rq = arq->request;

	arq->rb_key = rq_rb_key(rq);

	while (*p) {
		parent = *p;
		__arq = rb_entry_arq(parent);

		if (arq->rb_key < __arq->rb_key)
			p = &(*p)->rb_left;
		else if (arq->rb_key > __arq->rb_key)
			p = &(*p)->rb_right;
		else
			return __arq;
	}

	rb_link_node(&arq->rb_node, parent, p);
	rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));

	return NULL;
}

static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
	struct as_rq *alias;

	while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
		as_move_to_dispatch(ad, alias);
		as_antic_stop(ad);
	}
}

static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
	if (!ON_RB(&arq->rb_node)) {
		WARN_ON(1);
		return;
	}

	rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
	RB_CLEAR(&arq->rb_node);
}

static struct request *
as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
{
	struct rb_node *n = ad->sort_list[data_dir].rb_node;
	struct as_rq *arq;

	while (n) {
		arq = rb_entry_arq(n);

		if (sector < arq->rb_key)
			n = n->rb_left;
		else if (sector > arq->rb_key)
			n = n->rb_right;
		else
			return arq->request;
	}

	return NULL;
}

/*
 * IO Scheduler proper
 */

#define MAXBACK (1024 * 1024)	/*
				 * Maximum distance the disk will go backward
				 * for a request.
				 */

#define BACK_PENALTY	2

/*
 * as_choose_req selects the preferred one of two requests of the same data_dir
 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
 */
static struct as_rq *
as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
{
	int data_dir;
	sector_t last, s1, s2, d1, d2;
	int r1_wrap=0, r2_wrap=0;	/* requests are behind the disk head */
	const sector_t maxback = MAXBACK;

	if (arq1 == NULL || arq1 == arq2)
		return arq2;
	if (arq2 == NULL)
		return arq1;

	data_dir = arq1->is_sync;

	last = ad->last_sector[data_dir];
	s1 = arq1->request->sector;
	s2 = arq2->request->sector;

	BUG_ON(data_dir != arq2->is_sync);

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1+maxback >= last)
		d1 = (last - s1)*BACK_PENALTY;
	else {
		r1_wrap = 1;
		d1 = 0; /* shut up, gcc */
	}

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2+maxback >= last)
		d2 = (last - s2)*BACK_PENALTY;
	else {
		r2_wrap = 1;
		d2 = 0;
	}

	/* Found required data */
	if (!r1_wrap && r2_wrap)
		return arq1;
	else if (!r2_wrap && r1_wrap)
		return arq2;
	else if (r1_wrap && r2_wrap) {
		/* both behind the head */
		if (s1 <= s2)
			return arq1;
		else
			return arq2;
	}

	/* Both requests in front of the head */
	if (d1 < d2)
		return arq1;
	else if (d2 < d1)
		return arq2;
	else {
		if (s1 >= s2)
			return arq1;
		else
			return arq2;
	}
}

/*
 * as_find_next_arq finds the next request after @prev in elevator order.
 * this with as_choose_req form the basis for how the scheduler chooses
 * what request to process next. Anticipation works on top of this.
 */
static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
{
	const int data_dir = last->is_sync;
	struct as_rq *ret;
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
	struct as_rq *arq_next, *arq_prev;

	BUG_ON(!ON_RB(&last->rb_node));

	if (rbprev)
		arq_prev = rb_entry_arq(rbprev);
	else
		arq_prev = NULL;

	if (rbnext)
		arq_next = rb_entry_arq(rbnext);
	else {
		arq_next = as_find_first_arq(ad, data_dir);
		if (arq_next == last)
			arq_next = NULL;
	}

	ret = as_choose_req(ad,	arq_next, arq_prev);

	return ret;
}

/*
 * anticipatory scheduling functions follow
 */

/*
 * as_antic_expired tells us when we have anticipated too long.
 * The funny "absolute difference" math on the elapsed time is to handle
 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
 */
static int as_antic_expired(struct as_data *ad)
{
	long delta_jif;

	delta_jif = jiffies - ad->antic_start;
	if (unlikely(delta_jif < 0))
		delta_jif = -delta_jif;
	if (delta_jif < ad->antic_expire)
		return 0;

	return 1;
}

/*
 * as_antic_waitnext starts anticipating that a nice request will soon be
 * submitted. See also as_antic_waitreq
 */
static void as_antic_waitnext(struct as_data *ad)
{
	unsigned long timeout;

	BUG_ON(ad->antic_status != ANTIC_OFF
			&& ad->antic_status != ANTIC_WAIT_REQ);

	timeout = ad->antic_start + ad->antic_expire;

	mod_timer(&ad->antic_timer, timeout);

	ad->antic_status = ANTIC_WAIT_NEXT;
}

/*
 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
 * until the request that we're anticipating on has finished. This means we
 * are timing from when the candidate process wakes up hopefully.
 */
static void as_antic_waitreq(struct as_data *ad)
{
	BUG_ON(ad->antic_status == ANTIC_FINISHED);
	if (ad->antic_status == ANTIC_OFF) {
		if (!ad->io_context || ad->ioc_finished)
			as_antic_waitnext(ad);
		else
			ad->antic_status = ANTIC_WAIT_REQ;
	}
}

/*
 * This is called directly by the functions in this file to stop anticipation.
 * We kill the timer and schedule a call to the request_fn asap.
 */
static void as_antic_stop(struct as_data *ad)
{
	int status = ad->antic_status;

	if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
		if (status == ANTIC_WAIT_NEXT)
			del_timer(&ad->antic_timer);
		ad->antic_status = ANTIC_FINISHED;
		/* see as_work_handler */
		kblockd_schedule_work(&ad->antic_work);
	}
}

/*
 * as_antic_timeout is the timer function set by as_antic_waitnext.
 */
static void as_antic_timeout(unsigned long data)
{
	struct request_queue *q = (struct request_queue *)data;
	struct as_data *ad = q->elevator->elevator_data;
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	if (ad->antic_status == ANTIC_WAIT_REQ
			|| ad->antic_status == ANTIC_WAIT_NEXT) {
		struct as_io_context *aic = ad->io_context->aic;

		ad->antic_status = ANTIC_FINISHED;
		kblockd_schedule_work(&ad->antic_work);

		if (aic->ttime_samples == 0) {
			/* process anticipated on has exited or timed out*/
			ad->exit_prob = (7*ad->exit_prob + 256)/8;
		}
		if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
			/* process not "saved" by a cooperating request */
			ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
		}
	}
	spin_unlock_irqrestore(q->queue_lock, flags);
}

static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic,
				unsigned long ttime)
{
	/* fixed point: 1.0 == 1<<8 */
	if (aic->ttime_samples == 0) {
		ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
		ad->new_ttime_mean = ad->new_ttime_total / 256;

		ad->exit_prob = (7*ad->exit_prob)/8;
	}
	aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
	aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
	aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
}

static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
				sector_t sdist)
{
	u64 total;

	if (aic->seek_samples == 0) {
		ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
		ad->new_seek_mean = ad->new_seek_total / 256;
	}

	/*
	 * Don't allow the seek distance to get too large from the
	 * odd fragment, pagein, etc
	 */
	if (aic->seek_samples <= 60) /* second&third seek */
		sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
	else
		sdist = min(sdist, (aic->seek_mean * 4)	+ 2*1024*64);

	aic->seek_samples = (7*aic->seek_samples + 256) / 8;
	aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
	total = aic->seek_total + (aic->seek_samples/2);
	do_div(total, aic->seek_samples);
	aic->seek_mean = (sector_t)total;
}

/*
 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
 * updates @aic->ttime_mean based on that. It is called when a new
 * request is queued.
 */
static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
				struct request *rq)
{
	struct as_rq *arq = RQ_DATA(rq);
	int data_dir = arq->is_sync;
	unsigned long thinktime = 0;
	sector_t seek_dist;

	if (aic == NULL)
		return;

	if (data_dir == REQ_SYNC) {
		unsigned long in_flight = atomic_read(&aic->nr_queued)
					+ atomic_read(&aic->nr_dispatched);
		spin_lock(&aic->lock);
		if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
			test_bit(AS_TASK_IOSTARTED, &aic->state)) {
			/* Calculate read -> read thinktime */
			if (test_bit(AS_TASK_IORUNNING, &aic->state)
							&& in_flight == 0) {
				thinktime = jiffies - aic->last_end_request;
				thinktime = min(thinktime, MAX_THINKTIME-1);
			}
			as_update_thinktime(ad, aic, thinktime);

			/* Calculate read -> read seek distance */
			if (aic->last_request_pos < rq->sector)
				seek_dist = rq->sector - aic->last_request_pos;
			else
				seek_dist = aic->last_request_pos - rq->sector;
			as_update_seekdist(ad, aic, seek_dist);
		}
		aic->last_request_pos = rq->sector + rq->nr_sectors;
		set_bit(AS_TASK_IOSTARTED, &aic->state);
		spin_unlock(&aic->lock);
	}
}

/*
 * as_close_req decides if one request is considered "close" to the
 * previous one issued.
 */
static int as_close_req(struct as_data *ad, struct as_io_context *aic,
				struct as_rq *arq)
{
	unsigned long delay;	/* milliseconds */
	sector_t last = ad->last_sector[ad->batch_data_dir];
	sector_t next = arq->request->sector;
	sector_t delta; /* acceptable close offset (in sectors) */
	sector_t s;

	if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
		delay = 0;
	else
		delay = ((jiffies - ad->antic_start) * 1000) / HZ;

	if (delay == 0)
		delta = 8192;
	else if (delay <= 20 && delay <= ad->antic_expire)
		delta = 8192 << delay;
	else
		return 1;

	if ((last <= next + (delta>>1)) && (next <= last + delta))
		return 1;

	if (last < next)
		s = next - last;
	else
		s = last - next;

	if (aic->seek_samples == 0) {
		/*
		 * Process has just started IO. Use past statistics to
		 * gauge success possibility
		 */
		if (ad->new_seek_mean > s) {
			/* this request is better than what we're expecting */
			return 1;
		}

	} else {
		if (aic->seek_mean > s) {
			/* this request is better than what we're expecting */
			return 1;
		}
	}

	return 0;
}

/*
 * as_can_break_anticipation returns true if we have been anticipating this
 * request.
 *
 * It also returns true if the process against which we are anticipating
 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
 * dispatch it ASAP, because we know that application will not be submitting
 * any new reads.
 *
 * If the task which has submitted the request has exited, break anticipation.
 *
 * If this task has queued some other IO, do not enter enticipation.
 */
static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
{
	struct io_context *ioc;
	struct as_io_context *aic;

	ioc = ad->io_context;
	BUG_ON(!ioc);

	if (arq && ioc == arq->io_context) {
		/* request from same process */
		return 1;
	}

	if (ad->ioc_finished && as_antic_expired(ad)) {
		/*
		 * In this situation status should really be FINISHED,
		 * however the timer hasn't had the chance to run yet.
		 */
		return 1;
	}

	aic = ioc->aic;
	if (!aic)
		return 0;

	if (atomic_read(&aic->nr_queued) > 0) {
		/* process has more requests queued */
		return 1;
	}

	if (atomic_read(&aic->nr_dispatched) > 0) {
		/* process has more requests dispatched */
		return 1;
	}

	if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, aic, arq)) {
		/*
		 * Found a close request that is not one of ours.
		 *
		 * This makes close requests from another process update
		 * our IO history. Is generally useful when there are
		 * two or more cooperating processes working in the same
		 * area.
		 */
		if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
			if (aic->ttime_samples == 0)
				ad->exit_prob = (7*ad->exit_prob + 256)/8;

			ad->exit_no_coop = (7*ad->exit_no_coop)/8;
		}

		as_update_iohist(ad, aic, arq->request);
		return 1;
	}

	if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
		/* process anticipated on has exited */
		if (aic->ttime_samples == 0)
			ad->exit_prob = (7*ad->exit_prob + 256)/8;

		if (ad->exit_no_coop > 128)
			return 1;
	}

	if (aic->ttime_samples == 0) {
		if (ad->new_ttime_mean > ad->antic_expire)
			return 1;
		if (ad->exit_prob * ad->exit_no_coop > 128*256)
			return 1;
	} else if (aic->ttime_mean > ad->antic_expire) {
		/* the process thinks too much between requests */
		return 1;
	}

	return 0;
}

/*
 * as_can_anticipate indicates weather we should either run arq
 * or keep anticipating a better request.
 */
static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
{
	if (!ad->io_context)
		/*
		 * Last request submitted was a write
		 */
		return 0;

	if (ad->antic_status == ANTIC_FINISHED)
		/*
		 * Don't restart if we have just finished. Run the next request
		 */
		return 0;

	if (as_can_break_anticipation(ad, arq))
		/*
		 * This request is a good candidate. Don't keep anticipating,
		 * run it.
		 */
		return 0;

	/*
	 * OK from here, we haven't finished, and don't have a decent request!
	 * Status is either ANTIC_OFF so start waiting,
	 * ANTIC_WAIT_REQ so continue waiting for request to finish
	 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
	 */

	return 1;
}

/*
 * as_update_arq must be called whenever a request (arq) is added to
 * the sort_list. This function keeps caches up to date, and checks if the
 * request might be one we are "anticipating"
 */
static void as_update_arq(struct as_data *ad, struct as_rq *arq)
{
	const int data_dir = arq->is_sync;

	/* keep the next_arq cache up to date */
	ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);

	/*
	 * have we been anticipating this request?
	 * or does it come from the same process as the one we are anticipating
	 * for?
	 */
	if (ad->antic_status == ANTIC_WAIT_REQ
			|| ad->antic_status == ANTIC_WAIT_NEXT) {
		if (as_can_break_anticipation(ad, arq))
			as_antic_stop(ad);
	}
}

/*
 * Gathers timings and resizes the write batch automatically
 */
static void update_write_batch(struct as_data *ad)
{
	unsigned long batch = ad->batch_expire[REQ_ASYNC];
	long write_time;

	write_time = (jiffies - ad->current_batch_expires) + batch;
	if (write_time < 0)
		write_time = 0;

	if (write_time > batch && !ad->write_batch_idled) {
		if (write_time > batch * 3)
			ad->write_batch_count /= 2;
		else
			ad->write_batch_count--;
	} else if (write_time < batch && ad->current_write_count == 0) {
		if (batch > write_time * 3)
			ad->write_batch_count *= 2;
		else
			ad->write_batch_count++;
	}

	if (ad->write_batch_count < 1)
		ad->write_batch_count = 1;
}

/*
 * as_completed_request is to be called when a request has completed and
 * returned something to the requesting process, be it an error or data.
 */
static void as_completed_request(request_queue_t *q, struct request *rq)
{
	struct as_data *ad = q->elevator->elevator_data;
	struct as_rq *arq = RQ_DATA(rq);

	WARN_ON(!list_empty(&rq->queuelist));

	if (arq->state != AS_RQ_REMOVED) {