aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation')
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb2
-rw-r--r--Documentation/DMA-API-HOWTO.txt (renamed from Documentation/PCI/PCI-DMA-mapping.txt)352
-rw-r--r--Documentation/DMA-API.txt122
-rw-r--r--Documentation/DocBook/libata.tmpl49
-rw-r--r--Documentation/DocBook/mtdnand.tmpl6
-rw-r--r--Documentation/DocBook/tracepoint.tmpl13
-rw-r--r--Documentation/DocBook/v4l/common.xml2
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-parm.xml2
-rw-r--r--Documentation/HOWTO2
-rw-r--r--Documentation/IPMI.txt12
-rw-r--r--Documentation/Makefile4
-rw-r--r--Documentation/RCU/NMI-RCU.txt39
-rw-r--r--Documentation/RCU/checklist.txt7
-rw-r--r--Documentation/RCU/lockdep.txt28
-rw-r--r--Documentation/RCU/stallwarn.txt94
-rw-r--r--Documentation/RCU/torture.txt10
-rw-r--r--Documentation/RCU/trace.txt35
-rw-r--r--Documentation/RCU/whatisRCU.txt6
-rw-r--r--Documentation/SubmitChecklist8
-rw-r--r--Documentation/arm/Samsung-S3C24XX/CPUfreq.txt4
-rw-r--r--Documentation/arm/Samsung/Overview.txt86
-rwxr-xr-xDocumentation/arm/Samsung/clksrc-change-registers.awk167
-rw-r--r--Documentation/block/biodoc.txt4
-rw-r--r--Documentation/cgroups/cgroup_event_listener.c110
-rw-r--r--Documentation/cgroups/cgroups.txt42
-rw-r--r--Documentation/cgroups/cpusets.txt127
-rw-r--r--Documentation/cgroups/memcg_test.txt47
-rw-r--r--Documentation/cgroups/memory.txt82
-rw-r--r--Documentation/circular-buffers.txt234
-rw-r--r--Documentation/connector/cn_test.c1
-rw-r--r--Documentation/console/console.txt2
-rw-r--r--Documentation/driver-model/platform.txt2
-rw-r--r--Documentation/eisa.txt2
-rw-r--r--Documentation/email-clients.txt30
-rw-r--r--Documentation/fb/efifb.txt (renamed from Documentation/fb/imacfb.txt)14
-rw-r--r--Documentation/feature-removal-schedule.txt30
-rw-r--r--Documentation/filesystems/00-INDEX4
-rw-r--r--Documentation/filesystems/9p.txt18
-rw-r--r--Documentation/filesystems/Makefile8
-rw-r--r--Documentation/filesystems/ceph.txt140
-rw-r--r--Documentation/filesystems/dnotify.txt39
-rw-r--r--Documentation/filesystems/dnotify_test.c34
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/filesystems/tmpfs.txt6
-rw-r--r--Documentation/hwmon/abituguru2
-rw-r--r--Documentation/i2c/writing-clients5
-rw-r--r--Documentation/input/elantech.txt8
-rw-r--r--Documentation/input/multi-touch-protocol.txt23
-rw-r--r--Documentation/input/rotary-encoder.txt2
-rw-r--r--Documentation/intel_txt.txt16
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt19
-rw-r--r--Documentation/kobject.txt60
-rw-r--r--Documentation/kprobes.txt10
-rw-r--r--Documentation/laptops/00-INDEX6
-rw-r--r--Documentation/laptops/Makefile8
-rw-r--r--Documentation/laptops/dslm.c166
-rw-r--r--Documentation/laptops/laptop-mode.txt170
-rw-r--r--Documentation/memory-barriers.txt20
-rw-r--r--Documentation/networking/Makefile2
-rw-r--r--Documentation/networking/skfp.txt2
-rw-r--r--Documentation/networking/stmmac.txt143
-rw-r--r--Documentation/networking/timestamping.txt76
-rw-r--r--Documentation/networking/timestamping/Makefile11
-rw-r--r--Documentation/networking/timestamping/timestamping.c12
-rw-r--r--Documentation/pnp.txt13
-rw-r--r--Documentation/power/runtime_pm.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt54
-rw-r--r--Documentation/rbtree.txt58
-rw-r--r--Documentation/s390/kvm.txt2
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt54
-rw-r--r--Documentation/scheduler/sched-rt-group.txt20
-rw-r--r--Documentation/scsi/ChangeLog.lpfc10
-rw-r--r--Documentation/serial/tty.txt4
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt16
-rw-r--r--Documentation/spi/spidev_test.c2
-rw-r--r--Documentation/stable_kernel_rules.txt9
-rw-r--r--Documentation/sysctl/vm.txt5
-rw-r--r--Documentation/timers/00-INDEX2
-rw-r--r--Documentation/timers/Makefile8
-rw-r--r--Documentation/timers/hpet.txt273
-rw-r--r--Documentation/timers/hpet_example.c269
-rw-r--r--Documentation/trace/events.txt3
-rw-r--r--Documentation/trace/ftrace.txt52
-rw-r--r--Documentation/trace/kprobetrace.txt4
-rw-r--r--Documentation/vm/00-INDEX16
-rw-r--r--Documentation/vm/Makefile2
-rw-r--r--Documentation/vm/hugepage-mmap.c91
-rw-r--r--Documentation/vm/hugepage-shm.c98
-rw-r--r--Documentation/vm/hugetlbpage.txt169
-rw-r--r--Documentation/vm/map_hugetlb.c6
-rw-r--r--Documentation/volatile-considered-harmful.txt6
-rw-r--r--Documentation/voyager.txt95
-rw-r--r--Documentation/watchdog/src/watchdog-simple.c3
-rw-r--r--Documentation/watchdog/src/watchdog-test.c8
-rw-r--r--Documentation/watchdog/watchdog-api.txt5
96 files changed, 2710 insertions, 1442 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index a986e9bbba3d..bcebb9eaedce 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -160,7 +160,7 @@ Description:
160 match the driver to the device. For example: 160 match the driver to the device. For example:
161 # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id 161 # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
162 162
163What: /sys/bus/usb/device/.../avoid_reset 163What: /sys/bus/usb/device/.../avoid_reset_quirk
164Date: December 2009 164Date: December 2009
165Contact: Oliver Neukum <oliver@neukum.org> 165Contact: Oliver Neukum <oliver@neukum.org>
166Description: 166Description:
diff --git a/Documentation/PCI/PCI-DMA-mapping.txt b/Documentation/DMA-API-HOWTO.txt
index ecad88d9fe59..52618ab069ad 100644
--- a/Documentation/PCI/PCI-DMA-mapping.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -1,12 +1,12 @@
1 Dynamic DMA mapping 1 Dynamic DMA mapping Guide
2 =================== 2 =========================
3 3
4 David S. Miller <davem@redhat.com> 4 David S. Miller <davem@redhat.com>
5 Richard Henderson <rth@cygnus.com> 5 Richard Henderson <rth@cygnus.com>
6 Jakub Jelinek <jakub@redhat.com> 6 Jakub Jelinek <jakub@redhat.com>
7 7
8This document describes the DMA mapping system in terms of the pci_ 8This is a guide to device driver writers on how to use the DMA API
9API. For a similar API that works for generic devices, see 9with example pseudo-code. For a concise description of the API, see
10DMA-API.txt. 10DMA-API.txt.
11 11
12Most of the 64bit platforms have special hardware that translates bus 12Most of the 64bit platforms have special hardware that translates bus
@@ -26,12 +26,15 @@ mapped only for the time they are actually used and unmapped after the DMA
26transfer. 26transfer.
27 27
28The following API will work of course even on platforms where no such 28The following API will work of course even on platforms where no such
29hardware exists, see e.g. arch/x86/include/asm/pci.h for how it is implemented on 29hardware exists.
30top of the virt_to_bus interface. 30
31Note that the DMA API works with any bus independent of the underlying
32microprocessor architecture. You should use the DMA API rather than
33the bus specific DMA API (e.g. pci_dma_*).
31 34
32First of all, you should make sure 35First of all, you should make sure
33 36
34#include <linux/pci.h> 37#include <linux/dma-mapping.h>
35 38
36is in your driver. This file will obtain for you the definition of the 39is in your driver. This file will obtain for you the definition of the
37dma_addr_t (which can hold any valid DMA address for the platform) 40dma_addr_t (which can hold any valid DMA address for the platform)
@@ -78,44 +81,43 @@ for you to DMA from/to.
78 DMA addressing limitations 81 DMA addressing limitations
79 82
80Does your device have any DMA addressing limitations? For example, is 83Does your device have any DMA addressing limitations? For example, is
81your device only capable of driving the low order 24-bits of address 84your device only capable of driving the low order 24-bits of address?
82on the PCI bus for SAC DMA transfers? If so, you need to inform the 85If so, you need to inform the kernel of this fact.
83PCI layer of this fact.
84 86
85By default, the kernel assumes that your device can address the full 87By default, the kernel assumes that your device can address the full
8632-bits in a SAC cycle. For a 64-bit DAC capable device, this needs 8832-bits. For a 64-bit capable device, this needs to be increased.
87to be increased. And for a device with limitations, as discussed in 89And for a device with limitations, as discussed in the previous
88the previous paragraph, it needs to be decreased. 90paragraph, it needs to be decreased.
89 91
90pci_alloc_consistent() by default will return 32-bit DMA addresses. 92Special note about PCI: PCI-X specification requires PCI-X devices to
91PCI-X specification requires PCI-X devices to support 64-bit 93support 64-bit addressing (DAC) for all transactions. And at least
92addressing (DAC) for all transactions. And at least one platform (SGI 94one platform (SGI SN2) requires 64-bit consistent allocations to
93SN2) requires 64-bit consistent allocations to operate correctly when 95operate correctly when the IO bus is in PCI-X mode.
94the IO bus is in PCI-X mode. Therefore, like with pci_set_dma_mask(), 96
95it's good practice to call pci_set_consistent_dma_mask() to set the 97For correct operation, you must interrogate the kernel in your device
96appropriate mask even if your device only supports 32-bit DMA 98probe routine to see if the DMA controller on the machine can properly
97(default) and especially if it's a PCI-X device. 99support the DMA addressing limitation your device has. It is good
98 100style to do this even if your device holds the default setting,
99For correct operation, you must interrogate the PCI layer in your
100device probe routine to see if the PCI controller on the machine can
101properly support the DMA addressing limitation your device has. It is
102good style to do this even if your device holds the default setting,
103because this shows that you did think about these issues wrt. your 101because this shows that you did think about these issues wrt. your
104device. 102device.
105 103
106The query is performed via a call to pci_set_dma_mask(): 104The query is performed via a call to dma_set_mask():
107 105
108 int pci_set_dma_mask(struct pci_dev *pdev, u64 device_mask); 106 int dma_set_mask(struct device *dev, u64 mask);
109 107
110The query for consistent allocations is performed via a call to 108The query for consistent allocations is performed via a call to
111pci_set_consistent_dma_mask(): 109dma_set_coherent_mask():
112 110
113 int pci_set_consistent_dma_mask(struct pci_dev *pdev, u64 device_mask); 111 int dma_set_coherent_mask(struct device *dev, u64 mask);
114 112
115Here, pdev is a pointer to the PCI device struct of your device, and 113Here, dev is a pointer to the device struct of your device, and mask
116device_mask is a bit mask describing which bits of a PCI address your 114is a bit mask describing which bits of an address your device
117device supports. It returns zero if your card can perform DMA 115supports. It returns zero if your card can perform DMA properly on
118properly on the machine given the address mask you provided. 116the machine given the address mask you provided. In general, the
117device struct of your device is embedded in the bus specific device
118struct of your device. For example, a pointer to the device struct of
119your PCI device is pdev->dev (pdev is a pointer to the PCI device
120struct of your device).
119 121
120If it returns non-zero, your device cannot perform DMA properly on 122If it returns non-zero, your device cannot perform DMA properly on
121this platform, and attempting to do so will result in undefined 123this platform, and attempting to do so will result in undefined
@@ -133,31 +135,30 @@ of your driver reports that performance is bad or that the device is not
133even detected, you can ask them for the kernel messages to find out 135even detected, you can ask them for the kernel messages to find out
134exactly why. 136exactly why.
135 137
136The standard 32-bit addressing PCI device would do something like 138The standard 32-bit addressing device would do something like this:
137this:
138 139
139 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 140 if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
140 printk(KERN_WARNING 141 printk(KERN_WARNING
141 "mydev: No suitable DMA available.\n"); 142 "mydev: No suitable DMA available.\n");
142 goto ignore_this_device; 143 goto ignore_this_device;
143 } 144 }
144 145
145Another common scenario is a 64-bit capable device. The approach 146Another common scenario is a 64-bit capable device. The approach here
146here is to try for 64-bit DAC addressing, but back down to a 147is to try for 64-bit addressing, but back down to a 32-bit mask that
14732-bit mask should that fail. The PCI platform code may fail the 148should not fail. The kernel may fail the 64-bit mask not because the
14864-bit mask not because the platform is not capable of 64-bit 149platform is not capable of 64-bit addressing. Rather, it may fail in
149addressing. Rather, it may fail in this case simply because 150this case simply because 32-bit addressing is done more efficiently
15032-bit SAC addressing is done more efficiently than DAC addressing. 151than 64-bit addressing. For example, Sparc64 PCI SAC addressing is
151Sparc64 is one platform which behaves in this way. 152more efficient than DAC addressing.
152 153
153Here is how you would handle a 64-bit capable device which can drive 154Here is how you would handle a 64-bit capable device which can drive
154all 64-bits when accessing streaming DMA: 155all 64-bits when accessing streaming DMA:
155 156
156 int using_dac; 157 int using_dac;
157 158
158 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 159 if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
159 using_dac = 1; 160 using_dac = 1;
160 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 161 } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
161 using_dac = 0; 162 using_dac = 0;
162 } else { 163 } else {
163 printk(KERN_WARNING 164 printk(KERN_WARNING
@@ -170,36 +171,36 @@ the case would look like this:
170 171
171 int using_dac, consistent_using_dac; 172 int using_dac, consistent_using_dac;
172 173
173 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 174 if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
174 using_dac = 1; 175 using_dac = 1;
175 consistent_using_dac = 1; 176 consistent_using_dac = 1;
176 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 177 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
177 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 178 } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
178 using_dac = 0; 179 using_dac = 0;
179 consistent_using_dac = 0; 180 consistent_using_dac = 0;
180 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 181 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
181 } else { 182 } else {
182 printk(KERN_WARNING 183 printk(KERN_WARNING
183 "mydev: No suitable DMA available.\n"); 184 "mydev: No suitable DMA available.\n");
184 goto ignore_this_device; 185 goto ignore_this_device;
185 } 186 }
186 187
187pci_set_consistent_dma_mask() will always be able to set the same or a 188dma_set_coherent_mask() will always be able to set the same or a
188smaller mask as pci_set_dma_mask(). However for the rare case that a 189smaller mask as dma_set_mask(). However for the rare case that a
189device driver only uses consistent allocations, one would have to 190device driver only uses consistent allocations, one would have to
190check the return value from pci_set_consistent_dma_mask(). 191check the return value from dma_set_coherent_mask().
191 192
192Finally, if your device can only drive the low 24-bits of 193Finally, if your device can only drive the low 24-bits of
193address during PCI bus mastering you might do something like: 194address you might do something like:
194 195
195 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(24))) { 196 if (dma_set_mask(dev, DMA_BIT_MASK(24))) {
196 printk(KERN_WARNING 197 printk(KERN_WARNING
197 "mydev: 24-bit DMA addressing not available.\n"); 198 "mydev: 24-bit DMA addressing not available.\n");
198 goto ignore_this_device; 199 goto ignore_this_device;
199 } 200 }
200 201
201When pci_set_dma_mask() is successful, and returns zero, the PCI layer 202When dma_set_mask() is successful, and returns zero, the kernel saves
202saves away this mask you have provided. The PCI layer will use this 203away this mask you have provided. The kernel will use this
203information later when you make DMA mappings. 204information later when you make DMA mappings.
204 205
205There is a case which we are aware of at this time, which is worth 206There is a case which we are aware of at this time, which is worth
@@ -208,7 +209,7 @@ functions (for example a sound card provides playback and record
208functions) and the various different functions have _different_ 209functions) and the various different functions have _different_
209DMA addressing limitations, you may wish to probe each mask and 210DMA addressing limitations, you may wish to probe each mask and
210only provide the functionality which the machine can handle. It 211only provide the functionality which the machine can handle. It
211is important that the last call to pci_set_dma_mask() be for the 212is important that the last call to dma_set_mask() be for the
212most specific mask. 213most specific mask.
213 214
214Here is pseudo-code showing how this might be done: 215Here is pseudo-code showing how this might be done:
@@ -217,17 +218,17 @@ Here is pseudo-code showing how this might be done:
217 #define RECORD_ADDRESS_BITS DMA_BIT_MASK(24) 218 #define RECORD_ADDRESS_BITS DMA_BIT_MASK(24)
218 219
219 struct my_sound_card *card; 220 struct my_sound_card *card;
220 struct pci_dev *pdev; 221 struct device *dev;
221 222
222 ... 223 ...
223 if (!pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) { 224 if (!dma_set_mask(dev, PLAYBACK_ADDRESS_BITS)) {
224 card->playback_enabled = 1; 225 card->playback_enabled = 1;
225 } else { 226 } else {
226 card->playback_enabled = 0; 227 card->playback_enabled = 0;
227 printk(KERN_WARNING "%s: Playback disabled due to DMA limitations.\n", 228 printk(KERN_WARNING "%s: Playback disabled due to DMA limitations.\n",
228 card->name); 229 card->name);
229 } 230 }
230 if (!pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) { 231 if (!dma_set_mask(dev, RECORD_ADDRESS_BITS)) {
231 card->record_enabled = 1; 232 card->record_enabled = 1;
232 } else { 233 } else {
233 card->record_enabled = 0; 234 card->record_enabled = 0;
@@ -252,8 +253,8 @@ There are two types of DMA mappings:
252 Think of "consistent" as "synchronous" or "coherent". 253 Think of "consistent" as "synchronous" or "coherent".
253 254
254 The current default is to return consistent memory in the low 32 255 The current default is to return consistent memory in the low 32
255 bits of the PCI bus space. However, for future compatibility you 256 bits of the bus space. However, for future compatibility you should
256 should set the consistent mask even if this default is fine for your 257 set the consistent mask even if this default is fine for your
257 driver. 258 driver.
258 259
259 Good examples of what to use consistent mappings for are: 260 Good examples of what to use consistent mappings for are:
@@ -285,9 +286,9 @@ There are two types of DMA mappings:
285 found in PCI bridges (such as by reading a register's value 286 found in PCI bridges (such as by reading a register's value
286 after writing it). 287 after writing it).
287 288
288- Streaming DMA mappings which are usually mapped for one DMA transfer, 289- Streaming DMA mappings which are usually mapped for one DMA
289 unmapped right after it (unless you use pci_dma_sync_* below) and for which 290 transfer, unmapped right after it (unless you use dma_sync_* below)
290 hardware can optimize for sequential accesses. 291 and for which hardware can optimize for sequential accesses.
291 292
292 This of "streaming" as "asynchronous" or "outside the coherency 293 This of "streaming" as "asynchronous" or "outside the coherency
293 domain". 294 domain".
@@ -302,8 +303,8 @@ There are two types of DMA mappings:
302 optimizations the hardware allows. To this end, when using 303 optimizations the hardware allows. To this end, when using
303 such mappings you must be explicit about what you want to happen. 304 such mappings you must be explicit about what you want to happen.
304 305
305Neither type of DMA mapping has alignment restrictions that come 306Neither type of DMA mapping has alignment restrictions that come from
306from PCI, although some devices may have such restrictions. 307the underlying bus, although some devices may have such restrictions.
307Also, systems with caches that aren't DMA-coherent will work better 308Also, systems with caches that aren't DMA-coherent will work better
308when the underlying buffers don't share cache lines with other data. 309when the underlying buffers don't share cache lines with other data.
309 310
@@ -315,33 +316,27 @@ you should do:
315 316
316 dma_addr_t dma_handle; 317 dma_addr_t dma_handle;
317 318
318 cpu_addr = pci_alloc_consistent(pdev, size, &dma_handle); 319 cpu_addr = dma_alloc_coherent(dev, size, &dma_handle, gfp);
319
320where pdev is a struct pci_dev *. This may be called in interrupt context.
321You should use dma_alloc_coherent (see DMA-API.txt) for buses
322where devices don't have struct pci_dev (like ISA, EISA).
323 320
324This argument is needed because the DMA translations may be bus 321where device is a struct device *. This may be called in interrupt
325specific (and often is private to the bus which the device is attached 322context with the GFP_ATOMIC flag.
326to).
327 323
328Size is the length of the region you want to allocate, in bytes. 324Size is the length of the region you want to allocate, in bytes.
329 325
330This routine will allocate RAM for that region, so it acts similarly to 326This routine will allocate RAM for that region, so it acts similarly to
331__get_free_pages (but takes size instead of a page order). If your 327__get_free_pages (but takes size instead of a page order). If your
332driver needs regions sized smaller than a page, you may prefer using 328driver needs regions sized smaller than a page, you may prefer using
333the pci_pool interface, described below. 329the dma_pool interface, described below.
334 330
335The consistent DMA mapping interfaces, for non-NULL pdev, will by 331The consistent DMA mapping interfaces, for non-NULL dev, will by
336default return a DMA address which is SAC (Single Address Cycle) 332default return a DMA address which is 32-bit addressable. Even if the
337addressable. Even if the device indicates (via PCI dma mask) that it 333device indicates (via DMA mask) that it may address the upper 32-bits,
338may address the upper 32-bits and thus perform DAC cycles, consistent 334consistent allocation will only return > 32-bit addresses for DMA if
339allocation will only return > 32-bit PCI addresses for DMA if the 335the consistent DMA mask has been explicitly changed via
340consistent dma mask has been explicitly changed via 336dma_set_coherent_mask(). This is true of the dma_pool interface as
341pci_set_consistent_dma_mask(). This is true of the pci_pool interface 337well.
342as well. 338
343 339dma_alloc_coherent returns two values: the virtual address which you
344pci_alloc_consistent returns two values: the virtual address which you
345can use to access it from the CPU and dma_handle which you pass to the 340can use to access it from the CPU and dma_handle which you pass to the
346card. 341card.
347 342
@@ -354,54 +349,54 @@ buffer you receive will not cross a 64K boundary.
354 349
355To unmap and free such a DMA region, you call: 350To unmap and free such a DMA region, you call:
356 351
357 pci_free_consistent(pdev, size, cpu_addr, dma_handle); 352 dma_free_coherent(dev, size, cpu_addr, dma_handle);
358 353
359where pdev, size are the same as in the above call and cpu_addr and 354where dev, size are the same as in the above call and cpu_addr and
360dma_handle are the values pci_alloc_consistent returned to you. 355dma_handle are the values dma_alloc_coherent returned to you.
361This function may not be called in interrupt context. 356This function may not be called in interrupt context.
362 357
363If your driver needs lots of smaller memory regions, you can write 358If your driver needs lots of smaller memory regions, you can write
364custom code to subdivide pages returned by pci_alloc_consistent, 359custom code to subdivide pages returned by dma_alloc_coherent,
365or you can use the pci_pool API to do that. A pci_pool is like 360or you can use the dma_pool API to do that. A dma_pool is like
366a kmem_cache, but it uses pci_alloc_consistent not __get_free_pages. 361a kmem_cache, but it uses dma_alloc_coherent not __get_free_pages.
367Also, it understands common hardware constraints for alignment, 362Also, it understands common hardware constraints for alignment,
368like queue heads needing to be aligned on N byte boundaries. 363like queue heads needing to be aligned on N byte boundaries.
369 364
370Create a pci_pool like this: 365Create a dma_pool like this:
371 366
372 struct pci_pool *pool; 367 struct dma_pool *pool;
373 368
374 pool = pci_pool_create(name, pdev, size, align, alloc); 369 pool = dma_pool_create(name, dev, size, align, alloc);
375 370
376The "name" is for diagnostics (like a kmem_cache name); pdev and size 371The "name" is for diagnostics (like a kmem_cache name); dev and size
377are as above. The device's hardware alignment requirement for this 372are as above. The device's hardware alignment requirement for this
378type of data is "align" (which is expressed in bytes, and must be a 373type of data is "align" (which is expressed in bytes, and must be a
379power of two). If your device has no boundary crossing restrictions, 374power of two). If your device has no boundary crossing restrictions,
380pass 0 for alloc; passing 4096 says memory allocated from this pool 375pass 0 for alloc; passing 4096 says memory allocated from this pool
381must not cross 4KByte boundaries (but at that time it may be better to 376must not cross 4KByte boundaries (but at that time it may be better to
382go for pci_alloc_consistent directly instead). 377go for dma_alloc_coherent directly instead).
383 378
384Allocate memory from a pci pool like this: 379Allocate memory from a dma pool like this:
385 380
386 cpu_addr = pci_pool_alloc(pool, flags, &dma_handle); 381 cpu_addr = dma_pool_alloc(pool, flags, &dma_handle);
387 382
388flags are SLAB_KERNEL if blocking is permitted (not in_interrupt nor 383flags are SLAB_KERNEL if blocking is permitted (not in_interrupt nor
389holding SMP locks), SLAB_ATOMIC otherwise. Like pci_alloc_consistent, 384holding SMP locks), SLAB_ATOMIC otherwise. Like dma_alloc_coherent,
390this returns two values, cpu_addr and dma_handle. 385this returns two values, cpu_addr and dma_handle.
391 386
392Free memory that was allocated from a pci_pool like this: 387Free memory that was allocated from a dma_pool like this:
393 388
394 pci_pool_free(pool, cpu_addr, dma_handle); 389 dma_pool_free(pool, cpu_addr, dma_handle);
395 390
396where pool is what you passed to pci_pool_alloc, and cpu_addr and 391where pool is what you passed to dma_pool_alloc, and cpu_addr and
397dma_handle are the values pci_pool_alloc returned. This function 392dma_handle are the values dma_pool_alloc returned. This function
398may be called in interrupt context. 393may be called in interrupt context.
399 394
400Destroy a pci_pool by calling: 395Destroy a dma_pool by calling:
401 396
402 pci_pool_destroy(pool); 397 dma_pool_destroy(pool);
403 398
404Make sure you've called pci_pool_free for all memory allocated 399Make sure you've called dma_pool_free for all memory allocated
405from a pool before you destroy the pool. This function may not 400from a pool before you destroy the pool. This function may not
406be called in interrupt context. 401be called in interrupt context.
407 402
@@ -411,15 +406,15 @@ The interfaces described in subsequent portions of this document
411take a DMA direction argument, which is an integer and takes on 406take a DMA direction argument, which is an integer and takes on
412one of the following values: 407one of the following values:
413 408
414 PCI_DMA_BIDIRECTIONAL 409 DMA_BIDIRECTIONAL
415 PCI_DMA_TODEVICE 410 DMA_TO_DEVICE
416 PCI_DMA_FROMDEVICE 411 DMA_FROM_DEVICE
417 PCI_DMA_NONE 412 DMA_NONE
418 413
419One should provide the exact DMA direction if you know it. 414One should provide the exact DMA direction if you know it.
420 415
421PCI_DMA_TODEVICE means "from main memory to the PCI device" 416DMA_TO_DEVICE means "from main memory to the device"
422PCI_DMA_FROMDEVICE means "from the PCI device to main memory" 417DMA_FROM_DEVICE means "from the device to main memory"
423It is the direction in which the data moves during the DMA 418It is the direction in which the data moves during the DMA
424transfer. 419transfer.
425 420
@@ -427,12 +422,12 @@ You are _strongly_ encouraged to specify this as precisely
427as you possibly can. 422as you possibly can.
428 423
429If you absolutely cannot know the direction of the DMA transfer, 424If you absolutely cannot know the direction of the DMA transfer,
430specify PCI_DMA_BIDIRECTIONAL. It means that the DMA can go in 425specify DMA_BIDIRECTIONAL. It means that the DMA can go in
431either direction. The platform guarantees that you may legally 426either direction. The platform guarantees that you may legally
432specify this, and that it will work, but this may be at the 427specify this, and that it will work, but this may be at the
433cost of performance for example. 428cost of performance for example.
434 429
435The value PCI_DMA_NONE is to be used for debugging. One can 430The value DMA_NONE is to be used for debugging. One can
436hold this in a data structure before you come to know the 431hold this in a data structure before you come to know the
437precise direction, and this will help catch cases where your 432precise direction, and this will help catch cases where your
438direction tracking logic has failed to set things up properly. 433direction tracking logic has failed to set things up properly.
@@ -442,21 +437,21 @@ potential platform-specific optimizations of such) is for debugging.
442Some platforms actually have a write permission boolean which DMA 437Some platforms actually have a write permission boolean which DMA
443mappings can be marked with, much like page protections in the user 438mappings can be marked with, much like page protections in the user
444program address space. Such platforms can and do report errors in the 439program address space. Such platforms can and do report errors in the
445kernel logs when the PCI controller hardware detects violation of the 440kernel logs when the DMA controller hardware detects violation of the
446permission setting. 441permission setting.
447 442
448Only streaming mappings specify a direction, consistent mappings 443Only streaming mappings specify a direction, consistent mappings
449implicitly have a direction attribute setting of 444implicitly have a direction attribute setting of
450PCI_DMA_BIDIRECTIONAL. 445DMA_BIDIRECTIONAL.
451 446
452The SCSI subsystem tells you the direction to use in the 447The SCSI subsystem tells you the direction to use in the
453'sc_data_direction' member of the SCSI command your driver is 448'sc_data_direction' member of the SCSI command your driver is
454working on. 449working on.
455 450
456For Networking drivers, it's a rather simple affair. For transmit 451For Networking drivers, it's a rather simple affair. For transmit
457packets, map/unmap them with the PCI_DMA_TODEVICE direction 452packets, map/unmap them with the DMA_TO_DEVICE direction
458specifier. For receive packets, just the opposite, map/unmap them 453specifier. For receive packets, just the opposite, map/unmap them
459with the PCI_DMA_FROMDEVICE direction specifier. 454with the DMA_FROM_DEVICE direction specifier.
460 455
461 Using Streaming DMA mappings 456 Using Streaming DMA mappings
462 457
@@ -467,43 +462,43 @@ scatterlist.
467 462
468To map a single region, you do: 463To map a single region, you do:
469 464
470 struct pci_dev *pdev = mydev->pdev; 465 struct device *dev = &my_dev->dev;
471 dma_addr_t dma_handle; 466 dma_addr_t dma_handle;
472 void *addr = buffer->ptr; 467 void *addr = buffer->ptr;
473 size_t size = buffer->len; 468 size_t size = buffer->len;
474 469
475 dma_handle = pci_map_single(pdev, addr, size, direction); 470 dma_handle = dma_map_single(dev, addr, size, direction);
476 471
477and to unmap it: 472and to unmap it:
478 473
479 pci_unmap_single(pdev, dma_handle, size, direction); 474 dma_unmap_single(dev, dma_handle, size, direction);
480 475
481You should call pci_unmap_single when the DMA activity is finished, e.g. 476You should call dma_unmap_single when the DMA activity is finished, e.g.
482from the interrupt which told you that the DMA transfer is done. 477from the interrupt which told you that the DMA transfer is done.
483 478
484Using cpu pointers like this for single mappings has a disadvantage, 479Using cpu pointers like this for single mappings has a disadvantage,
485you cannot reference HIGHMEM memory in this way. Thus, there is a 480you cannot reference HIGHMEM memory in this way. Thus, there is a
486map/unmap interface pair akin to pci_{map,unmap}_single. These 481map/unmap interface pair akin to dma_{map,unmap}_single. These
487interfaces deal with page/offset pairs instead of cpu pointers. 482interfaces deal with page/offset pairs instead of cpu pointers.
488Specifically: 483Specifically:
489 484
490 struct pci_dev *pdev = mydev->pdev; 485 struct device *dev = &my_dev->dev;
491 dma_addr_t dma_handle; 486 dma_addr_t dma_handle;
492 struct page *page = buffer->page; 487 struct page *page = buffer->page;
493 unsigned long offset = buffer->offset; 488 unsigned long offset = buffer->offset;
494 size_t size = buffer->len; 489 size_t size = buffer->len;
495 490
496 dma_handle = pci_map_page(pdev, page, offset, size, direction); 491 dma_handle = dma_map_page(dev, page, offset, size, direction);
497 492
498 ... 493 ...
499 494
500 pci_unmap_page(pdev, dma_handle, size, direction); 495 dma_unmap_page(dev, dma_handle, size, direction);
501 496
502Here, "offset" means byte offset within the given page. 497Here, "offset" means byte offset within the given page.
503 498
504With scatterlists, you map a region gathered from several regions by: 499With scatterlists, you map a region gathered from several regions by:
505 500
506 int i, count = pci_map_sg(pdev, sglist, nents, direction); 501 int i, count = dma_map_sg(dev, sglist, nents, direction);
507 struct scatterlist *sg; 502 struct scatterlist *sg;
508 503
509 for_each_sg(sglist, sg, count, i) { 504 for_each_sg(sglist, sg, count, i) {
@@ -527,16 +522,16 @@ accessed sg->address and sg->length as shown above.
527 522
528To unmap a scatterlist, just call: 523To unmap a scatterlist, just call:
529 524
530 pci_unmap_sg(pdev, sglist, nents, direction); 525 dma_unmap_sg(dev, sglist, nents, direction);
531 526
532Again, make sure DMA activity has already finished. 527Again, make sure DMA activity has already finished.
533 528
534PLEASE NOTE: The 'nents' argument to the pci_unmap_sg call must be 529PLEASE NOTE: The 'nents' argument to the dma_unmap_sg call must be
535 the _same_ one you passed into the pci_map_sg call, 530 the _same_ one you passed into the dma_map_sg call,
536 it should _NOT_ be the 'count' value _returned_ from the 531 it should _NOT_ be the 'count' value _returned_ from the
537 pci_map_sg call. 532 dma_map_sg call.
538 533
539Every pci_map_{single,sg} call should have its pci_unmap_{single,sg} 534Every dma_map_{single,sg} call should have its dma_unmap_{single,sg}
540counterpart, because the bus address space is a shared resource (although 535counterpart, because the bus address space is a shared resource (although
541in some ports the mapping is per each BUS so less devices contend for the 536in some ports the mapping is per each BUS so less devices contend for the
542same bus address space) and you could render the machine unusable by eating 537same bus address space) and you could render the machine unusable by eating
@@ -547,14 +542,14 @@ the data in between the DMA transfers, the buffer needs to be synced
547properly in order for the cpu and device to see the most uptodate and 542properly in order for the cpu and device to see the most uptodate and
548correct copy of the DMA buffer. 543correct copy of the DMA buffer.
549 544
550So, firstly, just map it with pci_map_{single,sg}, and after each DMA 545So, firstly, just map it with dma_map_{single,sg}, and after each DMA
551transfer call either: 546transfer call either:
552 547
553 pci_dma_sync_single_for_cpu(pdev, dma_handle, size, direction); 548 dma_sync_single_for_cpu(dev, dma_handle, size, direction);
554 549
555or: 550or:
556 551
557 pci_dma_sync_sg_for_cpu(pdev, sglist, nents, direction); 552 dma_sync_sg_for_cpu(dev, sglist, nents, direction);
558 553
559as appropriate. 554as appropriate.
560 555
@@ -562,27 +557,27 @@ Then, if you wish to let the device get at the DMA area again,
562finish accessing the data with the cpu, and then before actually 557finish accessing the data with the cpu, and then before actually
563giving the buffer to the hardware call either: 558giving the buffer to the hardware call either:
564 559
565 pci_dma_sync_single_for_device(pdev, dma_handle, size, direction); 560 dma_sync_single_for_device(dev, dma_handle, size, direction);
566 561
567or: 562or:
568 563
569 pci_dma_sync_sg_for_device(dev, sglist, nents, direction); 564 dma_sync_sg_for_device(dev, sglist, nents, direction);
570 565
571as appropriate. 566as appropriate.
572 567
573After the last DMA transfer call one of the DMA unmap routines 568After the last DMA transfer call one of the DMA unmap routines
574pci_unmap_{single,sg}. If you don't touch the data from the first pci_map_* 569dma_unmap_{single,sg}. If you don't touch the data from the first dma_map_*
575call till pci_unmap_*, then you don't have to call the pci_dma_sync_* 570call till dma_unmap_*, then you don't have to call the dma_sync_*
576routines at all. 571routines at all.
577 572
578Here is pseudo code which shows a situation in which you would need 573Here is pseudo code which shows a situation in which you would need
579to use the pci_dma_sync_*() interfaces. 574to use the dma_sync_*() interfaces.
580 575
581 my_card_setup_receive_buffer(struct my_card *cp, char *buffer, int len) 576 my_card_setup_receive_buffer(struct my_card *cp, char *buffer, int len)
582 { 577 {
583 dma_addr_t mapping; 578 dma_addr_t mapping;
584 579
585 mapping = pci_map_single(cp->pdev, buffer, len, PCI_DMA_FROMDEVICE); 580 mapping = dma_map_single(cp->dev, buffer, len, DMA_FROM_DEVICE);
586 581
587 cp->rx_buf = buffer; 582 cp->rx_buf = buffer;
588 cp->rx_len = len; 583 cp->rx_len = len;
@@ -606,25 +601,25 @@ to use the pci_dma_sync_*() interfaces.
606 * the DMA transfer with the CPU first 601 * the DMA transfer with the CPU first
607 * so that we see updated contents. 602 * so that we see updated contents.
608 */ 603 */
609 pci_dma_sync_single_for_cpu(cp->pdev, cp->rx_dma, 604 dma_sync_single_for_cpu(&cp->dev, cp->rx_dma,
610 cp->rx_len, 605 cp->rx_len,
611 PCI_DMA_FROMDEVICE); 606 DMA_FROM_DEVICE);
612 607
613 /* Now it is safe to examine the buffer. */ 608 /* Now it is safe to examine the buffer. */
614 hp = (struct my_card_header *) cp->rx_buf; 609 hp = (struct my_card_header *) cp->rx_buf;
615 if (header_is_ok(hp)) { 610 if (header_is_ok(hp)) {
616 pci_unmap_single(cp->pdev, cp->rx_dma, cp->rx_len, 611 dma_unmap_single(&cp->dev, cp->rx_dma, cp->rx_len,
617 PCI_DMA_FROMDEVICE); 612 DMA_FROM_DEVICE);
618 pass_to_upper_layers(cp->rx_buf); 613 pass_to_upper_layers(cp->rx_buf);
619 make_and_setup_new_rx_buf(cp); 614 make_and_setup_new_rx_buf(cp);
620 } else { 615 } else {
621 /* Just sync the buffer and give it back 616 /* Just sync the buffer and give it back
622 * to the card. 617 * to the card.
623 */ 618 */
624 pci_dma_sync_single_for_device(cp->pdev, 619 dma_sync_single_for_device(&cp->dev,
625 cp->rx_dma, 620 cp->rx_dma,
626 cp->rx_len, 621 cp->rx_len,
627 PCI_DMA_FROMDEVICE); 622 DMA_FROM_DEVICE);
628 give_rx_buf_to_card(cp); 623 give_rx_buf_to_card(cp);
629 } 624 }
630 } 625 }
@@ -634,19 +629,19 @@ Drivers converted fully to this interface should not use virt_to_bus any
634longer, nor should they use bus_to_virt. Some drivers have to be changed a 629longer, nor should they use bus_to_virt. Some drivers have to be changed a
635little bit, because there is no longer an equivalent to bus_to_virt in the 630little bit, because there is no longer an equivalent to bus_to_virt in the
636dynamic DMA mapping scheme - you have to always store the DMA addresses 631dynamic DMA mapping scheme - you have to always store the DMA addresses
637returned by the pci_alloc_consistent, pci_pool_alloc, and pci_map_single 632returned by the dma_alloc_coherent, dma_pool_alloc, and dma_map_single
638calls (pci_map_sg stores them in the scatterlist itself if the platform 633calls (dma_map_sg stores them in the scatterlist itself if the platform
639supports dynamic DMA mapping in hardware) in your driver structures and/or 634supports dynamic DMA mapping in hardware) in your driver structures and/or
640in the card registers. 635in the card registers.
641 636
642All PCI drivers should be using these interfaces with no exceptions. 637All drivers should be using these interfaces with no exceptions. It
643It is planned to completely remove virt_to_bus() and bus_to_virt() as 638is planned to completely remove virt_to_bus() and bus_to_virt() as
644they are entirely deprecated. Some ports already do not provide these 639they are entirely deprecated. Some ports already do not provide these
645as it is impossible to correctly support them. 640as it is impossible to correctly support them.
646 641
647 Optimizing Unmap State Space Consumption 642 Optimizing Unmap State Space Consumption
648 643
649On many platforms, pci_unmap_{single,page}() is simply a nop. 644On many platforms, dma_unmap_{single,page}() is simply a nop.
650Therefore, keeping track of the mapping address and length is a waste 645Therefore, keeping track of the mapping address and length is a waste
651of space. Instead of filling your drivers up with ifdefs and the like 646of space. Instead of filling your drivers up with ifdefs and the like
652to "work around" this (which would defeat the whole purpose of a 647to "work around" this (which would defeat the whole purpose of a
@@ -655,7 +650,7 @@ portable API) the following facilities are provided.
655Actually, instead of describing the macros one by one, we'll 650Actually, instead of describing the macros one by one, we'll
656transform some example code. 651transform some example code.
657 652
6581) Use DECLARE_PCI_UNMAP_{ADDR,LEN} in state saving structures. 6531) Use DEFINE_DMA_UNMAP_{ADDR,LEN} in state saving structures.
659 Example, before: 654 Example, before:
660 655
661 struct ring_state { 656 struct ring_state {
@@ -668,14 +663,11 @@ transform some example code.
668 663
669 struct ring_state { 664 struct ring_state {
670 struct sk_buff *skb; 665 struct sk_buff *skb;
671 DECLARE_PCI_UNMAP_ADDR(mapping) 666 DEFINE_DMA_UNMAP_ADDR(mapping);
672 DECLARE_PCI_UNMAP_LEN(len) 667 DEFINE_DMA_UNMAP_LEN(len);
673 }; 668 };
674 669
675 NOTE: DO NOT put a semicolon at the end of the DECLARE_*() 6702) Use dma_unmap_{addr,len}_set to set these values.
676 macro.
677
6782) Use pci_unmap_{addr,len}_set to set these values.
679 Example, before: 671 Example, before:
680 672
681 ringp->mapping = FOO; 673 ringp->mapping = FOO;
@@ -683,21 +675,21 @@ transform some example code.
683 675
684 after: 676 after:
685 677
686 pci_unmap_addr_set(ringp, mapping, FOO); 678 dma_unmap_addr_set(ringp, mapping, FOO);
687 pci_unmap_len_set(ringp, len, BAR); 679 dma_unmap_len_set(ringp, len, BAR);
688 680
6893) Use pci_unmap_{addr,len} to access these values. 6813) Use dma_unmap_{addr,len} to access these values.
690 Example, before: 682 Example, before:
691 683
692 pci_unmap_single(pdev, ringp->mapping, ringp->len, 684 dma_unmap_single(dev, ringp->mapping, ringp->len,
693 PCI_DMA_FROMDEVICE); 685 DMA_FROM_DEVICE);
694 686
695 after: 687 after:
696 688
697 pci_unmap_single(pdev, 689 dma_unmap_single(dev,
698 pci_unmap_addr(ringp, mapping), 690 dma_unmap_addr(ringp, mapping),
699 pci_unmap_len(ringp, len), 691 dma_unmap_len(ringp, len),
700 PCI_DMA_FROMDEVICE); 692 DMA_FROM_DEVICE);
701 693
702It really should be self-explanatory. We treat the ADDR and LEN 694It really should be self-explanatory. We treat the ADDR and LEN
703separately, because it is possible for an implementation to only 695separately, because it is possible for an implementation to only
@@ -732,15 +724,15 @@ to "Closing".
732DMA address space is limited on some architectures and an allocation 724DMA address space is limited on some architectures and an allocation
733failure can be determined by: 725failure can be determined by:
734 726
735- checking if pci_alloc_consistent returns NULL or pci_map_sg returns 0 727- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
736 728
737- checking the returned dma_addr_t of pci_map_single and pci_map_page 729- checking the returned dma_addr_t of dma_map_single and dma_map_page
738 by using pci_dma_mapping_error(): 730 by using dma_mapping_error():
739 731
740 dma_addr_t dma_handle; 732 dma_addr_t dma_handle;
741 733
742 dma_handle = pci_map_single(pdev, addr, size, direction); 734 dma_handle = dma_map_single(dev, addr, size, direction);
743 if (pci_dma_mapping_error(pdev, dma_handle)) { 735 if (dma_mapping_error(dev, dma_handle)) {
744 /* 736 /*
745 * reduce current DMA mapping usage, 737 * reduce current DMA mapping usage,
746 * delay and try again later or 738 * delay and try again later or
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 5aceb88b3f8b..05e2ae236865 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -4,20 +4,18 @@
4 James E.J. Bottomley <James.Bottomley@HansenPartnership.com> 4 James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
5 5
6This document describes the DMA API. For a more gentle introduction 6This document describes the DMA API. For a more gentle introduction
7phrased in terms of the pci_ equivalents (and actual examples) see 7of the API (and actual examples) see
8Documentation/PCI/PCI-DMA-mapping.txt. 8Documentation/DMA-API-HOWTO.txt.
9 9
10This API is split into two pieces. Part I describes the API and the 10This API is split into two pieces. Part I describes the API. Part II
11corresponding pci_ API. Part II describes the extensions to the API 11describes the extensions to the API for supporting non-consistent
12for supporting non-consistent memory machines. Unless you know that 12memory machines. Unless you know that your driver absolutely has to
13your driver absolutely has to support non-consistent platforms (this 13support non-consistent platforms (this is usually only legacy
14is usually only legacy platforms) you should only use the API 14platforms) you should only use the API described in part I.
15described in part I.
16 15
17Part I - pci_ and dma_ Equivalent API 16Part I - dma_ API
18------------------------------------- 17-------------------------------------
19 18
20To get the pci_ API, you must #include <linux/pci.h>
21To get the dma_ API, you must #include <linux/dma-mapping.h> 19To get the dma_ API, you must #include <linux/dma-mapping.h>
22 20
23 21
@@ -27,9 +25,6 @@ Part Ia - Using large dma-coherent buffers
27void * 25void *
28dma_alloc_coherent(struct device *dev, size_t size, 26dma_alloc_coherent(struct device *dev, size_t size,
29 dma_addr_t *dma_handle, gfp_t flag) 27 dma_addr_t *dma_handle, gfp_t flag)
30void *
31pci_alloc_consistent(struct pci_dev *dev, size_t size,
32 dma_addr_t *dma_handle)
33 28
34Consistent memory is memory for which a write by either the device or 29Consistent memory is memory for which a write by either the device or
35the processor can immediately be read by the processor or device 30the processor can immediately be read by the processor or device
@@ -53,15 +48,11 @@ The simplest way to do that is to use the dma_pool calls (see below).
53The flag parameter (dma_alloc_coherent only) allows the caller to 48The flag parameter (dma_alloc_coherent only) allows the caller to
54specify the GFP_ flags (see kmalloc) for the allocation (the 49specify the GFP_ flags (see kmalloc) for the allocation (the
55implementation may choose to ignore flags that affect the location of 50implementation may choose to ignore flags that affect the location of
56the returned memory, like GFP_DMA). For pci_alloc_consistent, you 51the returned memory, like GFP_DMA).
57must assume GFP_ATOMIC behaviour.
58 52
59void 53void
60dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 54dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
61 dma_addr_t dma_handle) 55 dma_addr_t dma_handle)
62void
63pci_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr,
64 dma_addr_t dma_handle)
65 56
66Free the region of consistent memory you previously allocated. dev, 57Free the region of consistent memory you previously allocated. dev,
67size and dma_handle must all be the same as those passed into the 58size and dma_handle must all be the same as those passed into the
@@ -89,10 +80,6 @@ for alignment, like queue heads needing to be aligned on N-byte boundaries.
89 dma_pool_create(const char *name, struct device *dev, 80 dma_pool_create(const char *name, struct device *dev,
90 size_t size, size_t align, size_t alloc); 81 size_t size, size_t align, size_t alloc);
91 82
92 struct pci_pool *
93 pci_pool_create(const char *name, struct pci_device *dev,
94 size_t size, size_t align, size_t alloc);
95
96The pool create() routines initialize a pool of dma-coherent buffers 83The pool create() routines initialize a pool of dma-coherent buffers
97for use with a given device. It must be called in a context which 84for use with a given device. It must be called in a context which
98can sleep. 85can sleep.
@@ -108,9 +95,6 @@ from this pool must not cross 4KByte boundaries.
108 void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, 95 void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags,
109 dma_addr_t *dma_handle); 96 dma_addr_t *dma_handle);
110 97
111 void *pci_pool_alloc(struct pci_pool *pool, gfp_t gfp_flags,
112 dma_addr_t *dma_handle);
113
114This allocates memory from the pool; the returned memory will meet the size 98This allocates memory from the pool; the returned memory will meet the size
115and alignment requirements specified at creation time. Pass GFP_ATOMIC to 99and alignment requirements specified at creation time. Pass GFP_ATOMIC to
116prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks), 100prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks),
@@ -122,9 +106,6 @@ pool's device.
122 void dma_pool_free(struct dma_pool *pool, void *vaddr, 106 void dma_pool_free(struct dma_pool *pool, void *vaddr,
123 dma_addr_t addr); 107 dma_addr_t addr);
124 108
125 void pci_pool_free(struct pci_pool *pool, void *vaddr,
126 dma_addr_t addr);
127
128This puts memory back into the pool. The pool is what was passed to 109This puts memory back into the pool. The pool is what was passed to
129the pool allocation routine; the cpu (vaddr) and dma addresses are what 110the pool allocation routine; the cpu (vaddr) and dma addresses are what
130were returned when that routine allocated the memory being freed. 111were returned when that routine allocated the memory being freed.
@@ -132,8 +113,6 @@ were returned when that routine allocated the memory being freed.
132 113
133 void dma_pool_destroy(struct dma_pool *pool); 114 void dma_pool_destroy(struct dma_pool *pool);
134 115
135 void pci_pool_destroy(struct pci_pool *pool);
136
137The pool destroy() routines free the resources of the pool. They must be 116The pool destroy() routines free the resources of the pool. They must be
138called in a context which can sleep. Make sure you've freed all allocated 117called in a context which can sleep. Make sure you've freed all allocated
139memory back to the pool before you destroy it. 118memory back to the pool before you destroy it.
@@ -144,8 +123,6 @@ Part Ic - DMA addressing limitations
144 123
145int 124int
146dma_supported(struct device *dev, u64 mask) 125dma_supported(struct device *dev, u64 mask)
147int
148pci_dma_supported(struct pci_dev *hwdev, u64 mask)
149 126
150Checks to see if the device can support DMA to the memory described by 127Checks to see if the device can support DMA to the memory described by
151mask. 128mask.
@@ -159,8 +136,14 @@ driver writers.
159 136
160int 137int
161dma_set_mask(struct device *dev, u64 mask) 138dma_set_mask(struct device *dev, u64 mask)
139
140Checks to see if the mask is possible and updates the device
141parameters if it is.
142
143Returns: 0 if successful and a negative error if not.
144
162int 145int
163pci_set_dma_mask(struct pci_device *dev, u64 mask) 146dma_set_coherent_mask(struct device *dev, u64 mask)
164 147
165Checks to see if the mask is possible and updates the device 148Checks to see if the mask is possible and updates the device
166parameters if it is. 149parameters if it is.
@@ -187,9 +170,6 @@ Part Id - Streaming DMA mappings
187dma_addr_t 170dma_addr_t
188dma_map_single(struct device *dev, void *cpu_addr, size_t size, 171dma_map_single(struct device *dev, void *cpu_addr, size_t size,
189 enum dma_data_direction direction) 172 enum dma_data_direction direction)
190dma_addr_t
191pci_map_single(struct pci_dev *hwdev, void *cpu_addr, size_t size,
192 int direction)
193 173
194Maps a piece of processor virtual memory so it can be accessed by the 174Maps a piece of processor virtual memory so it can be accessed by the
195device and returns the physical handle of the memory. 175device and returns the physical handle of the memory.
@@ -198,14 +178,10 @@ The direction for both api's may be converted freely by casting.
198However the dma_ API uses a strongly typed enumerator for its 178However the dma_ API uses a strongly typed enumerator for its
199direction: 179direction:
200 180
201DMA_NONE = PCI_DMA_NONE no direction (used for 181DMA_NONE no direction (used for debugging)
202 debugging) 182DMA_TO_DEVICE data is going from the memory to the device
203DMA_TO_DEVICE = PCI_DMA_TODEVICE data is going from the 183DMA_FROM_DEVICE data is coming from the device to the memory
204 memory to the device 184DMA_BIDIRECTIONAL direction isn't known
205DMA_FROM_DEVICE = PCI_DMA_FROMDEVICE data is coming from
206 the device to the
207 memory
208DMA_BIDIRECTIONAL = PCI_DMA_BIDIRECTIONAL direction isn't known
209 185
210Notes: Not all memory regions in a machine can be mapped by this 186Notes: Not all memory regions in a machine can be mapped by this
211API. Further, regions that appear to be physically contiguous in 187API. Further, regions that appear to be physically contiguous in
@@ -268,9 +244,6 @@ cache lines are updated with data that the device may have changed).
268void 244void
269dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 245dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
270 enum dma_data_direction direction) 246 enum dma_data_direction direction)
271void
272pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
273 size_t size, int direction)
274 247
275Unmaps the region previously mapped. All the parameters passed in 248Unmaps the region previously mapped. All the parameters passed in
276must be identical to those passed in (and returned) by the mapping 249must be identical to those passed in (and returned) by the mapping
@@ -280,15 +253,9 @@ dma_addr_t
280dma_map_page(struct device *dev, struct page *page, 253dma_map_page(struct device *dev, struct page *page,
281 unsigned long offset, size_t size, 254 unsigned long offset, size_t size,
282 enum dma_data_direction direction) 255 enum dma_data_direction direction)
283dma_addr_t
284pci_map_page(struct pci_dev *hwdev, struct page *page,
285 unsigned long offset, size_t size, int direction)
286void 256void
287dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 257dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
288 enum dma_data_direction direction) 258 enum dma_data_direction direction)
289void
290pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
291 size_t size, int direction)
292 259
293API for mapping and unmapping for pages. All the notes and warnings 260API for mapping and unmapping for pages. All the notes and warnings
294for the other mapping APIs apply here. Also, although the <offset> 261for the other mapping APIs apply here. Also, although the <offset>
@@ -299,9 +266,6 @@ cache width is.
299int 266int
300dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 267dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
301 268
302int
303pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr)
304
305In some circumstances dma_map_single and dma_map_page will fail to create 269In some circumstances dma_map_single and dma_map_page will fail to create
306a mapping. A driver can check for these errors by testing the returned 270a mapping. A driver can check for these errors by testing the returned
307dma address with dma_mapping_error(). A non-zero return value means the mapping 271dma address with dma_mapping_error(). A non-zero return value means the mapping
@@ -311,9 +275,6 @@ reduce current DMA mapping usage or delay and try again later).
311 int 275 int
312 dma_map_sg(struct device *dev, struct scatterlist *sg, 276 dma_map_sg(struct device *dev, struct scatterlist *sg,
313 int nents, enum dma_data_direction direction) 277 int nents, enum dma_data_direction direction)
314 int
315 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
316 int nents, int direction)
317 278
318Returns: the number of physical segments mapped (this may be shorter 279Returns: the number of physical segments mapped (this may be shorter
319than <nents> passed in if some elements of the scatter/gather list are 280than <nents> passed in if some elements of the scatter/gather list are
@@ -353,9 +314,6 @@ accessed sg->address and sg->length as shown above.
353 void 314 void
354 dma_unmap_sg(struct device *dev, struct scatterlist *sg, 315 dma_unmap_sg(struct device *dev, struct scatterlist *sg,
355 int nhwentries, enum dma_data_direction direction) 316 int nhwentries, enum dma_data_direction direction)
356 void
357 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
358 int nents, int direction)
359 317
360Unmap the previously mapped scatter/gather list. All the parameters 318Unmap the previously mapped scatter/gather list. All the parameters
361must be the same as those and passed in to the scatter/gather mapping 319must be the same as those and passed in to the scatter/gather mapping
@@ -365,21 +323,23 @@ Note: <nents> must be the number you passed in, *not* the number of
365physical entries returned. 323physical entries returned.
366 324
367void 325void
368dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, 326dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
369 enum dma_data_direction direction) 327 enum dma_data_direction direction)
370void 328void
371pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, 329dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
372 size_t size, int direction) 330 enum dma_data_direction direction)
373void 331void
374dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, 332dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
375 enum dma_data_direction direction) 333 enum dma_data_direction direction)
376void 334void
377pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, 335dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
378 int nelems, int direction) 336 enum dma_data_direction direction)
379 337
380Synchronise a single contiguous or scatter/gather mapping. All the 338Synchronise a single contiguous or scatter/gather mapping for the cpu
381parameters must be the same as those passed into the single mapping 339and device. With the sync_sg API, all the parameters must be the same
382API. 340as those passed into the single mapping API. With the sync_single API,
341you can use dma_handle and size parameters that aren't identical to
342those passed into the single mapping API to do a partial sync.
383 343
384Notes: You must do this: 344Notes: You must do this:
385 345
@@ -461,9 +421,9 @@ void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
461Part II - Advanced dma_ usage 421Part II - Advanced dma_ usage
462----------------------------- 422-----------------------------
463 423
464Warning: These pieces of the DMA API have no PCI equivalent. They 424Warning: These pieces of the DMA API should not be used in the
465should also not be used in the majority of cases, since they cater for 425majority of cases, since they cater for unlikely corner cases that
466unlikely corner cases that don't belong in usual drivers. 426don't belong in usual drivers.
467 427
468If you don't understand how cache line coherency works between a 428If you don't understand how cache line coherency works between a
469processor and an I/O device, you should not be using this part of the 429processor and an I/O device, you should not be using this part of the
@@ -514,16 +474,6 @@ into the width returned by this call. It will also always be a power
514of two for easy alignment. 474of two for easy alignment.
515 475
516void 476void
517dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
518 unsigned long offset, size_t size,
519 enum dma_data_direction direction)
520
521Does a partial sync, starting at offset and continuing for size. You
522must be careful to observe the cache alignment and width when doing
523anything like this. You must also be extra careful about accessing
524memory you intend to sync partially.
525
526void
527dma_cache_sync(struct device *dev, void *vaddr, size_t size, 477dma_cache_sync(struct device *dev, void *vaddr, size_t size,
528 enum dma_data_direction direction) 478 enum dma_data_direction direction)
529 479
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index ba9975771503..ff3e5bec1c24 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -107,10 +107,6 @@ void (*dev_config) (struct ata_port *, struct ata_device *);
107 issue of SET FEATURES - XFER MODE, and prior to operation. 107 issue of SET FEATURES - XFER MODE, and prior to operation.
108 </para> 108 </para>
109 <para> 109 <para>
110 Called by ata_device_add() after ata_dev_identify() determines
111 a device is present.
112 </para>
113 <para>
114 This entry may be specified as NULL in ata_port_operations. 110 This entry may be specified as NULL in ata_port_operations.
115 </para> 111 </para>
116 112
@@ -154,8 +150,8 @@ unsigned int (*mode_filter) (struct ata_port *, struct ata_device *, unsigned in
154 150
155 <sect2><title>Taskfile read/write</title> 151 <sect2><title>Taskfile read/write</title>
156 <programlisting> 152 <programlisting>
157void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf); 153void (*sff_tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
158void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); 154void (*sff_tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
159 </programlisting> 155 </programlisting>
160 156
161 <para> 157 <para>
@@ -164,36 +160,35 @@ void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
164 hardware registers / DMA buffers, to obtain the current set of 160 hardware registers / DMA buffers, to obtain the current set of
165 taskfile register values. 161 taskfile register values.
166 Most drivers for taskfile-based hardware (PIO or MMIO) use 162 Most drivers for taskfile-based hardware (PIO or MMIO) use
167 ata_tf_load() and ata_tf_read() for these hooks. 163 ata_sff_tf_load() and ata_sff_tf_read() for these hooks.
168 </para> 164 </para>
169 165
170 </sect2> 166 </sect2>
171 167
172 <sect2><title>PIO data read/write</title> 168 <sect2><title>PIO data read/write</title>
173 <programlisting> 169 <programlisting>
174void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); 170void (*sff_data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
175 </programlisting> 171 </programlisting>
176 172
177 <para> 173 <para>
178All bmdma-style drivers must implement this hook. This is the low-level 174All bmdma-style drivers must implement this hook. This is the low-level
179operation that actually copies the data bytes during a PIO data 175operation that actually copies the data bytes during a PIO data
180transfer. 176transfer.
181Typically the driver 177Typically the driver will choose one of ata_sff_data_xfer_noirq(),
182will choose one of ata_pio_data_xfer_noirq(), ata_pio_data_xfer(), or 178ata_sff_data_xfer(), or ata_sff_data_xfer32().
183ata_mmio_data_xfer().
184 </para> 179 </para>
185 180
186 </sect2> 181 </sect2>
187 182
188 <sect2><title>ATA command execute</title> 183 <sect2><title>ATA command execute</title>
189 <programlisting> 184 <programlisting>
190void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf); 185void (*sff_exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
191 </programlisting> 186 </programlisting>
192 187
193 <para> 188 <para>
194 causes an ATA command, previously loaded with 189 causes an ATA command, previously loaded with
195 ->tf_load(), to be initiated in hardware. 190 ->tf_load(), to be initiated in hardware.
196 Most drivers for taskfile-based hardware use ata_exec_command() 191 Most drivers for taskfile-based hardware use ata_sff_exec_command()
197 for this hook. 192 for this hook.
198 </para> 193 </para>
199 194
@@ -218,8 +213,8 @@ command.
218 213
219 <sect2><title>Read specific ATA shadow registers</title> 214 <sect2><title>Read specific ATA shadow registers</title>
220 <programlisting> 215 <programlisting>
221u8 (*check_status)(struct ata_port *ap); 216u8 (*sff_check_status)(struct ata_port *ap);
222u8 (*check_altstatus)(struct ata_port *ap); 217u8 (*sff_check_altstatus)(struct ata_port *ap);
223 </programlisting> 218 </programlisting>
224 219
225 <para> 220 <para>
@@ -227,20 +222,14 @@ u8 (*check_altstatus)(struct ata_port *ap);
227 hardware. On some hardware, reading the Status register has 222 hardware. On some hardware, reading the Status register has
228 the side effect of clearing the interrupt condition. 223 the side effect of clearing the interrupt condition.
229 Most drivers for taskfile-based hardware use 224 Most drivers for taskfile-based hardware use
230 ata_check_status() for this hook. 225 ata_sff_check_status() for this hook.
231 </para>
232 <para>
233 Note that because this is called from ata_device_add(), at
234 least a dummy function that clears device interrupts must be
235 provided for all drivers, even if the controller doesn't
236 actually have a taskfile status register.
237 </para> 226 </para>
238 227
239 </sect2> 228 </sect2>
240 229
241 <sect2><title>Select ATA device on bus</title> 230 <sect2><title>Select ATA device on bus</title>
242 <programlisting> 231 <programlisting>
243void (*dev_select)(struct ata_port *ap, unsigned int device); 232void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
244 </programlisting> 233 </programlisting>
245 234
246 <para> 235 <para>
@@ -251,9 +240,7 @@ void (*dev_select)(struct ata_port *ap, unsigned int device);
251 </para> 240 </para>
252 <para> 241 <para>
253 Most drivers for taskfile-based hardware use 242 Most drivers for taskfile-based hardware use
254 ata_std_dev_select() for this hook. Controllers which do not 243 ata_sff_dev_select() for this hook.
255 support second drives on a port (such as SATA contollers) will
256 use ata_noop_dev_select().
257 </para> 244 </para>
258 245
259 </sect2> 246 </sect2>
@@ -441,13 +428,13 @@ void (*irq_clear) (struct ata_port *);
441 to struct ata_host_set. 428 to struct ata_host_set.
442 </para> 429 </para>
443 <para> 430 <para>
444 Most legacy IDE drivers use ata_interrupt() for the 431 Most legacy IDE drivers use ata_sff_interrupt() for the
445 irq_handler hook, which scans all ports in the host_set, 432 irq_handler hook, which scans all ports in the host_set,
446 determines which queued command was active (if any), and calls 433 determines which queued command was active (if any), and calls
447 ata_host_intr(ap,qc). 434 ata_sff_host_intr(ap,qc).
448 </para> 435 </para>
449 <para> 436 <para>
450 Most legacy IDE drivers use ata_bmdma_irq_clear() for the 437 Most legacy IDE drivers use ata_sff_irq_clear() for the
451 irq_clear() hook, which simply clears the interrupt and error 438 irq_clear() hook, which simply clears the interrupt and error
452 flags in the DMA status register. 439 flags in the DMA status register.
453 </para> 440 </para>
@@ -496,10 +483,6 @@ void (*host_stop) (struct ata_host_set *host_set);
496 data from port at this time. 483 data from port at this time.
497 </para> 484 </para>
498 <para> 485 <para>
499 Many drivers use ata_port_stop() as this hook, which frees the
500 PRD table.
501 </para>
502 <para>
503 ->host_stop() is called after all ->port_stop() calls 486 ->host_stop() is called after all ->port_stop() calls
504have completed. The hook must finalize hardware shutdown, release DMA 487have completed. The hook must finalize hardware shutdown, release DMA
505and other resources, etc. 488and other resources, etc.
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 5e7d84b48505..133cd6c3f3c1 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -488,7 +488,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
488 The ECC bytes must be placed immidiately after the data 488 The ECC bytes must be placed immidiately after the data
489 bytes in order to make the syndrome generator work. This 489 bytes in order to make the syndrome generator work. This
490 is contrary to the usual layout used by software ECC. The 490 is contrary to the usual layout used by software ECC. The
491 seperation of data and out of band area is not longer 491 separation of data and out of band area is not longer
492 possible. The nand driver code handles this layout and 492 possible. The nand driver code handles this layout and
493 the remaining free bytes in the oob area are managed by 493 the remaining free bytes in the oob area are managed by
494 the autoplacement code. Provide a matching oob-layout 494 the autoplacement code. Provide a matching oob-layout
@@ -560,7 +560,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
560 bad blocks. They have factory marked good blocks. The marker pattern 560 bad blocks. They have factory marked good blocks. The marker pattern
561 is erased when the block is erased to be reused. So in case of 561 is erased when the block is erased to be reused. So in case of
562 powerloss before writing the pattern back to the chip this block 562 powerloss before writing the pattern back to the chip this block
563 would be lost and added to the bad blocks. Therefor we scan the 563 would be lost and added to the bad blocks. Therefore we scan the
564 chip(s) when we detect them the first time for good blocks and 564 chip(s) when we detect them the first time for good blocks and
565 store this information in a bad block table before erasing any 565 store this information in a bad block table before erasing any
566 of the blocks. 566 of the blocks.
@@ -1094,7 +1094,7 @@ in this page</entry>
1094 manufacturers specifications. This applies similar to the spare area. 1094 manufacturers specifications. This applies similar to the spare area.
1095 </para> 1095 </para>
1096 <para> 1096 <para>
1097 Therefor NAND aware filesystems must either write in page size chunks 1097 Therefore NAND aware filesystems must either write in page size chunks
1098 or hold a writebuffer to collect smaller writes until they sum up to 1098 or hold a writebuffer to collect smaller writes until they sum up to
1099 pagesize. Available NAND aware filesystems: JFFS2, YAFFS. 1099 pagesize. Available NAND aware filesystems: JFFS2, YAFFS.
1100 </para> 1100 </para>
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl
index 8bca1d5cec09..e8473eae2a20 100644
--- a/Documentation/DocBook/tracepoint.tmpl
+++ b/Documentation/DocBook/tracepoint.tmpl
@@ -16,6 +16,15 @@
16 </address> 16 </address>
17 </affiliation> 17 </affiliation>
18 </author> 18 </author>
19 <author>
20 <firstname>William</firstname>
21 <surname>Cohen</surname>
22 <affiliation>
23 <address>
24 <email>wcohen@redhat.com</email>
25 </address>
26 </affiliation>
27 </author>
19 </authorgroup> 28 </authorgroup>
20 29
21 <legalnotice> 30 <legalnotice>
@@ -91,4 +100,8 @@
91!Iinclude/trace/events/signal.h 100!Iinclude/trace/events/signal.h
92 </chapter> 101 </chapter>
93 102
103 <chapter id="block">
104 <title>Block IO</title>
105!Iinclude/trace/events/block.h
106 </chapter>
94</book> 107</book>
diff --git a/Documentation/DocBook/v4l/common.xml b/Documentation/DocBook/v4l/common.xml
index c65f0ac9b6ee..cea23e1c4fc6 100644
--- a/Documentation/DocBook/v4l/common.xml
+++ b/Documentation/DocBook/v4l/common.xml
@@ -1170,7 +1170,7 @@ frames per second. If less than this number of frames is to be
1170captured or output, applications can request frame skipping or 1170captured or output, applications can request frame skipping or
1171duplicating on the driver side. This is especially useful when using 1171duplicating on the driver side. This is especially useful when using
1172the &func-read; or &func-write;, which are not augmented by timestamps 1172the &func-read; or &func-write;, which are not augmented by timestamps
1173or sequence counters, and to avoid unneccessary data copying.</para> 1173or sequence counters, and to avoid unnecessary data copying.</para>
1174 1174
1175 <para>Finally these ioctls can be used to determine the number of 1175 <para>Finally these ioctls can be used to determine the number of
1176buffers used internally by a driver in read/write mode. For 1176buffers used internally by a driver in read/write mode. For
diff --git a/Documentation/DocBook/v4l/vidioc-g-parm.xml b/Documentation/DocBook/v4l/vidioc-g-parm.xml
index 78332d365ce9..392aa9e5571e 100644
--- a/Documentation/DocBook/v4l/vidioc-g-parm.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-parm.xml
@@ -55,7 +55,7 @@ captured or output, applications can request frame skipping or
55duplicating on the driver side. This is especially useful when using 55duplicating on the driver side. This is especially useful when using
56the <function>read()</function> or <function>write()</function>, which 56the <function>read()</function> or <function>write()</function>, which
57are not augmented by timestamps or sequence counters, and to avoid 57are not augmented by timestamps or sequence counters, and to avoid
58unneccessary data copying.</para> 58unnecessary data copying.</para>
59 59
60 <para>Further these ioctls can be used to determine the number of 60 <para>Further these ioctls can be used to determine the number of
61buffers used internally by a driver in read/write mode. For 61buffers used internally by a driver in read/write mode. For
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index f5395af88a41..40ada93b820a 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -234,7 +234,7 @@ process is as follows:
234 Linus, usually the patches that have already been included in the 234 Linus, usually the patches that have already been included in the
235 -next kernel for a few weeks. The preferred way to submit big changes 235 -next kernel for a few weeks. The preferred way to submit big changes
236 is using git (the kernel's source management tool, more information 236 is using git (the kernel's source management tool, more information
237 can be found at http://git.or.cz/) but plain patches are also just 237 can be found at http://git-scm.com/) but plain patches are also just
238 fine. 238 fine.
239 - After two weeks a -rc1 kernel is released it is now possible to push 239 - After two weeks a -rc1 kernel is released it is now possible to push
240 only patches that do not include new features that could affect the 240 only patches that do not include new features that could affect the
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index bc38283379f0..69dd29ed824e 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -365,6 +365,7 @@ You can change this at module load time (for a module) with:
365 regshifts=<shift1>,<shift2>,... 365 regshifts=<shift1>,<shift2>,...
366 slave_addrs=<addr1>,<addr2>,... 366 slave_addrs=<addr1>,<addr2>,...
367 force_kipmid=<enable1>,<enable2>,... 367 force_kipmid=<enable1>,<enable2>,...
368 kipmid_max_busy_us=<ustime1>,<ustime2>,...
368 unload_when_empty=[0|1] 369 unload_when_empty=[0|1]
369 370
370Each of these except si_trydefaults is a list, the first item for the 371Each of these except si_trydefaults is a list, the first item for the
@@ -433,6 +434,7 @@ kernel command line as:
433 ipmi_si.regshifts=<shift1>,<shift2>,... 434 ipmi_si.regshifts=<shift1>,<shift2>,...
434 ipmi_si.slave_addrs=<addr1>,<addr2>,... 435 ipmi_si.slave_addrs=<addr1>,<addr2>,...
435 ipmi_si.force_kipmid=<enable1>,<enable2>,... 436 ipmi_si.force_kipmid=<enable1>,<enable2>,...
437 ipmi_si.kipmid_max_busy_us=<ustime1>,<ustime2>,...
436 438
437It works the same as the module parameters of the same names. 439It works the same as the module parameters of the same names.
438 440
@@ -450,6 +452,16 @@ force this thread on or off. If you force it off and don't have
450interrupts, the driver will run VERY slowly. Don't blame me, 452interrupts, the driver will run VERY slowly. Don't blame me,
451these interfaces suck. 453these interfaces suck.
452 454
455Unfortunately, this thread can use a lot of CPU depending on the
456interface's performance. This can waste a lot of CPU and cause
457various issues with detecting idle CPU and using extra power. To
458avoid this, the kipmid_max_busy_us sets the maximum amount of time, in
459microseconds, that kipmid will spin before sleeping for a tick. This
460value sets a balance between performance and CPU waste and needs to be
461tuned to your needs. Maybe, someday, auto-tuning will be added, but
462that's not a simple thing and even the auto-tuning would need to be
463tuned to the user's desired performance.
464
453The driver supports a hot add and remove of interfaces. This way, 465The driver supports a hot add and remove of interfaces. This way,
454interfaces can be added or removed after the kernel is up and running. 466interfaces can be added or removed after the kernel is up and running.
455This is done using /sys/modules/ipmi_si/parameters/hotmod, which is a 467This is done using /sys/modules/ipmi_si/parameters/hotmod, which is a
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 94b945733534..6fc7ea1d1f9d 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -1,3 +1,3 @@
1obj-m := DocBook/ accounting/ auxdisplay/ connector/ \ 1obj-m := DocBook/ accounting/ auxdisplay/ connector/ \
2 filesystems/configfs/ ia64/ networking/ \ 2 filesystems/ filesystems/configfs/ ia64/ laptops/ networking/ \
3 pcmcia/ spi/ video4linux/ vm/ watchdog/src/ 3 pcmcia/ spi/ timers/ video4linux/ vm/ watchdog/src/
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index a6d32e65d222..a8536cb88091 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -34,7 +34,7 @@ NMI handler.
34 cpu = smp_processor_id(); 34 cpu = smp_processor_id();
35 ++nmi_count(cpu); 35 ++nmi_count(cpu);
36 36
37 if (!rcu_dereference(nmi_callback)(regs, cpu)) 37 if (!rcu_dereference_sched(nmi_callback)(regs, cpu))
38 default_do_nmi(regs); 38 default_do_nmi(regs);
39 39
40 nmi_exit(); 40 nmi_exit();
@@ -47,12 +47,13 @@ function pointer. If this handler returns zero, do_nmi() invokes the
47default_do_nmi() function to handle a machine-specific NMI. Finally, 47default_do_nmi() function to handle a machine-specific NMI. Finally,
48preemption is restored. 48preemption is restored.
49 49
50Strictly speaking, rcu_dereference() is not needed, since this code runs 50In theory, rcu_dereference_sched() is not needed, since this code runs
51only on i386, which does not need rcu_dereference() anyway. However, 51only on i386, which in theory does not need rcu_dereference_sched()
52it is a good documentation aid, particularly for anyone attempting to 52anyway. However, in practice it is a good documentation aid, particularly
53do something similar on Alpha. 53for anyone attempting to do something similar on Alpha or on systems
54with aggressive optimizing compilers.
54 55
55Quick Quiz: Why might the rcu_dereference() be necessary on Alpha, 56Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha,
56 given that the code referenced by the pointer is read-only? 57 given that the code referenced by the pointer is read-only?
57 58
58 59
@@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively.
99 100
100Answer to Quick Quiz 101Answer to Quick Quiz
101 102
102 Why might the rcu_dereference() be necessary on Alpha, given 103 Why might the rcu_dereference_sched() be necessary on Alpha, given
103 that the code referenced by the pointer is read-only? 104 that the code referenced by the pointer is read-only?
104 105
105 Answer: The caller to set_nmi_callback() might well have 106 Answer: The caller to set_nmi_callback() might well have
106 initialized some data that is to be used by the 107 initialized some data that is to be used by the new NMI
107 new NMI handler. In this case, the rcu_dereference() 108 handler. In this case, the rcu_dereference_sched() would
108 would be needed, because otherwise a CPU that received 109 be needed, because otherwise a CPU that received an NMI
109 an NMI just after the new handler was set might see 110 just after the new handler was set might see the pointer
110 the pointer to the new NMI handler, but the old 111 to the new NMI handler, but the old pre-initialized
111 pre-initialized version of the handler's data. 112 version of the handler's data.
112 113
113 More important, the rcu_dereference() makes it clear 114 This same sad story can happen on other CPUs when using
114 to someone reading the code that the pointer is being 115 a compiler with aggressive pointer-value speculation
115 protected by RCU. 116 optimizations.
117
118 More important, the rcu_dereference_sched() makes it
119 clear to someone reading the code that the pointer is
120 being protected by RCU-sched.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index cbc180f90194..790d1a812376 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome!
260 The reason that it is permissible to use RCU list-traversal 260 The reason that it is permissible to use RCU list-traversal
261 primitives when the update-side lock is held is that doing so 261 primitives when the update-side lock is held is that doing so
262 can be quite helpful in reducing code bloat when common code is 262 can be quite helpful in reducing code bloat when common code is
263 shared between readers and updaters. 263 shared between readers and updaters. Additional primitives
264 are provided for this case, as discussed in lockdep.txt.
264 265
26510. Conversely, if you are in an RCU read-side critical section, 26610. Conversely, if you are in an RCU read-side critical section,
266 and you don't hold the appropriate update-side lock, you -must- 267 and you don't hold the appropriate update-side lock, you -must-
@@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome!
344 requiring SRCU's read-side deadlock immunity or low read-side 345 requiring SRCU's read-side deadlock immunity or low read-side
345 realtime latency. 346 realtime latency.
346 347
347 Note that, rcu_assign_pointer() and rcu_dereference() relate to 348 Note that, rcu_assign_pointer() relates to SRCU just as they do
348 SRCU just as they do to other forms of RCU. 349 to other forms of RCU.
349 350
35015. The whole point of call_rcu(), synchronize_rcu(), and friends 35115. The whole point of call_rcu(), synchronize_rcu(), and friends
351 is to wait until all pre-existing readers have finished before 352 is to wait until all pre-existing readers have finished before
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt
index fe24b58627bd..d7a49b2f6994 100644
--- a/Documentation/RCU/lockdep.txt
+++ b/Documentation/RCU/lockdep.txt
@@ -32,9 +32,20 @@ checking of rcu_dereference() primitives:
32 srcu_dereference(p, sp): 32 srcu_dereference(p, sp):
33 Check for SRCU read-side critical section. 33 Check for SRCU read-side critical section.
34 rcu_dereference_check(p, c): 34 rcu_dereference_check(p, c):
35 Use explicit check expression "c". 35 Use explicit check expression "c". This is useful in
36 code that is invoked by both readers and updaters.
36 rcu_dereference_raw(p) 37 rcu_dereference_raw(p)
37 Don't check. (Use sparingly, if at all.) 38 Don't check. (Use sparingly, if at all.)
39 rcu_dereference_protected(p, c):
40 Use explicit check expression "c", and omit all barriers
41 and compiler constraints. This is useful when the data
42 structure cannot change, for example, in code that is
43 invoked only by updaters.
44 rcu_access_pointer(p):
45 Return the value of the pointer and omit all barriers,
46 but retain the compiler constraints that prevent duplicating
47 or coalescsing. This is useful when when testing the
48 value of the pointer itself, for example, against NULL.
38 49
39The rcu_dereference_check() check expression can be any boolean 50The rcu_dereference_check() check expression can be any boolean
40expression, but would normally include one of the rcu_read_lock_held() 51expression, but would normally include one of the rcu_read_lock_held()
@@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla
59RCU read-side critical sections, in case (2) the ->file_lock prevents 70RCU read-side critical sections, in case (2) the ->file_lock prevents
60any change from taking place, and finally, in case (3) the current task 71any change from taking place, and finally, in case (3) the current task
61is the only task accessing the file_struct, again preventing any change 72is the only task accessing the file_struct, again preventing any change
62from taking place. 73from taking place. If the above statement was invoked only from updater
74code, it could instead be written as follows:
75
76 file = rcu_dereference_protected(fdt->fd[fd],
77 lockdep_is_held(&files->file_lock) ||
78 atomic_read(&files->count) == 1);
79
80This would verify cases #2 and #3 above, and furthermore lockdep would
81complain if this was used in an RCU read-side critical section unless one
82of these two cases held. Because rcu_dereference_protected() omits all
83barriers and compiler constraints, it generates better code than do the
84other flavors of rcu_dereference(). On the other hand, it is illegal
85to use rcu_dereference_protected() if either the RCU-protected pointer
86or the RCU-protected data that it points to can change concurrently.
63 87
64There are currently only "universal" versions of the rcu_assign_pointer() 88There are currently only "universal" versions of the rcu_assign_pointer()
65and RCU list-/tree-traversal primitives, which do not (yet) check for 89and RCU list-/tree-traversal primitives, which do not (yet) check for
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 1423d2570d78..44c6dcc93d6d 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -3,35 +3,79 @@ Using RCU's CPU Stall Detector
3The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables 3The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables
4RCU's CPU stall detector, which detects conditions that unduly delay 4RCU's CPU stall detector, which detects conditions that unduly delay
5RCU grace periods. The stall detector's idea of what constitutes 5RCU grace periods. The stall detector's idea of what constitutes
6"unduly delayed" is controlled by a pair of C preprocessor macros: 6"unduly delayed" is controlled by a set of C preprocessor macros:
7 7
8RCU_SECONDS_TILL_STALL_CHECK 8RCU_SECONDS_TILL_STALL_CHECK
9 9
10 This macro defines the period of time that RCU will wait from 10 This macro defines the period of time that RCU will wait from
11 the beginning of a grace period until it issues an RCU CPU 11 the beginning of a grace period until it issues an RCU CPU
12 stall warning. It is normally ten seconds. 12 stall warning. This time period is normally ten seconds.
13 13
14RCU_SECONDS_TILL_STALL_RECHECK 14RCU_SECONDS_TILL_STALL_RECHECK
15 15
16 This macro defines the period of time that RCU will wait after 16 This macro defines the period of time that RCU will wait after
17 issuing a stall warning until it issues another stall warning. 17 issuing a stall warning until it issues another stall warning
18 It is normally set to thirty seconds. 18 for the same stall. This time period is normally set to thirty
19 seconds.
19 20
20RCU_STALL_RAT_DELAY 21RCU_STALL_RAT_DELAY
21 22
22 The CPU stall detector tries to make the offending CPU rat on itself, 23 The CPU stall detector tries to make the offending CPU print its
23 as this often gives better-quality stack traces. However, if 24 own warnings, as this often gives better-quality stack traces.
24 the offending CPU does not detect its own stall in the number 25 However, if the offending CPU does not detect its own stall in
25 of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will 26 the number of jiffies specified by RCU_STALL_RAT_DELAY, then
26 complain. This is normally set to two jiffies. 27 some other CPU will complain. This delay is normally set to
28 two jiffies.
27 29
28The following problems can result in an RCU CPU stall warning: 30When a CPU detects that it is stalling, it will print a message similar
31to the following:
32
33INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies)
34
35This message indicates that CPU 5 detected that it was causing a stall,
36and that the stall was affecting RCU-sched. This message will normally be
37followed by a stack dump of the offending CPU. On TREE_RCU kernel builds,
38RCU and RCU-sched are implemented by the same underlying mechanism,
39while on TREE_PREEMPT_RCU kernel builds, RCU is instead implemented
40by rcu_preempt_state.
41
42On the other hand, if the offending CPU fails to print out a stall-warning
43message quickly enough, some other CPU will print a message similar to
44the following:
45
46INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies)
47
48This message indicates that CPU 2 detected that CPUs 3 and 5 were both
49causing stalls, and that the stall was affecting RCU-bh. This message
50will normally be followed by stack dumps for each CPU. Please note that
51TREE_PREEMPT_RCU builds can be stalled by tasks as well as by CPUs,
52and that the tasks will be indicated by PID, for example, "P3421".
53It is even possible for a rcu_preempt_state stall to be caused by both
54CPUs -and- tasks, in which case the offending CPUs and tasks will all
55be called out in the list.
56
57Finally, if the grace period ends just as the stall warning starts
58printing, there will be a spurious stall-warning message:
59
60INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies)
61
62This is rare, but does happen from time to time in real life.
63
64So your kernel printed an RCU CPU stall warning. The next question is
65"What caused it?" The following problems can result in RCU CPU stall
66warnings:
29 67
30o A CPU looping in an RCU read-side critical section. 68o A CPU looping in an RCU read-side critical section.
31 69
32o A CPU looping with interrupts disabled. 70o A CPU looping with interrupts disabled. This condition can
71 result in RCU-sched and RCU-bh stalls.
33 72
34o A CPU looping with preemption disabled. 73o A CPU looping with preemption disabled. This condition can
74 result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
75 stalls.
76
77o A CPU looping with bottom halves disabled. This condition can
78 result in RCU-sched and RCU-bh stalls.
35 79
36o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel 80o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
37 without invoking schedule(). 81 without invoking schedule().
@@ -39,20 +83,24 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
39o A bug in the RCU implementation. 83o A bug in the RCU implementation.
40 84
41o A hardware failure. This is quite unlikely, but has occurred 85o A hardware failure. This is quite unlikely, but has occurred
42 at least once in a former life. A CPU failed in a running system, 86 at least once in real life. A CPU failed in a running system,
43 becoming unresponsive, but not causing an immediate crash. 87 becoming unresponsive, but not causing an immediate crash.
44 This resulted in a series of RCU CPU stall warnings, eventually 88 This resulted in a series of RCU CPU stall warnings, eventually
45 leading the realization that the CPU had failed. 89 leading the realization that the CPU had failed.
46 90
47The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning. 91The RCU, RCU-sched, and RCU-bh implementations have CPU stall
48SRCU does not do so directly, but its calls to synchronize_sched() will 92warning. SRCU does not have its own CPU stall warnings, but its
49result in RCU-sched detecting any CPU stalls that might be occurring. 93calls to synchronize_sched() will result in RCU-sched detecting
50 94RCU-sched-related CPU stalls. Please note that RCU only detects
51To diagnose the cause of the stall, inspect the stack traces. The offending 95CPU stalls when there is a grace period in progress. No grace period,
52function will usually be near the top of the stack. If you have a series 96no CPU stall warnings.
53of stall warnings from a single extended stall, comparing the stack traces 97
54can often help determine where the stall is occurring, which will usually 98To diagnose the cause of the stall, inspect the stack traces.
55be in the function nearest the top of the stack that stays the same from 99The offending function will usually be near the top of the stack.
56trace to trace. 100If you have a series of stall warnings from a single extended stall,
101comparing the stack traces can often help determine where the stall
102is occurring, which will usually be in the function nearest the top of
103that portion of the stack which remains the same from trace to trace.
104If you can reliably trigger the stall, ftrace can be quite helpful.
57 105
58RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. 106RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 0e50bc2aa1e2..5d9016795fd8 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -182,16 +182,6 @@ Similarly, sched_expedited RCU provides the following:
182 sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0 182 sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
183 sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0 183 sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
184 sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0 184 sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
185 state: -1 / 0:0 3:0 4:0
186
187As before, the first four lines are similar to those for RCU.
188The last line shows the task-migration state. The first number is
189-1 if synchronize_sched_expedited() is idle, -2 if in the process of
190posting wakeups to the migration kthreads, and N when waiting on CPU N.
191Each of the colon-separated fields following the "/" is a CPU:state pair.
192Valid states are "0" for idle, "1" for waiting for quiescent state,
193"2" for passed through quiescent state, and "3" when a race with a
194CPU-hotplug event forces use of the synchronize_sched() primitive.
195 185
196 186
197USAGE 187USAGE
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 8608fd85e921..efd8cc95c06b 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -256,23 +256,23 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
256The output of "cat rcu/rcu_pending" looks as follows: 256The output of "cat rcu/rcu_pending" looks as follows:
257 257
258rcu_sched: 258rcu_sched:
259 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 259 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
260 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 260 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
261 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 261 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
262 3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 262 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723
263 4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 263 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110
264 5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 264 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456
265 6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 265 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834
266 7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 266 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888
267rcu_bh: 267rcu_bh:
268 0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 268 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314
269 1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 269 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180
270 2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 270 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936
271 3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 271 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863
272 4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 272 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671
273 5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 273 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235
274 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 274 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
275 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 275 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
276 276
277As always, this is once again split into "rcu_sched" and "rcu_bh" 277As always, this is once again split into "rcu_sched" and "rcu_bh"
278portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional 278portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional
@@ -284,6 +284,9 @@ o "np" is the number of times that __rcu_pending() has been invoked
284o "qsp" is the number of times that the RCU was waiting for a 284o "qsp" is the number of times that the RCU was waiting for a
285 quiescent state from this CPU. 285 quiescent state from this CPU.
286 286
287o "rpq" is the number of times that the CPU had passed through
288 a quiescent state, but not yet reported it to RCU.
289
287o "cbr" is the number of times that this CPU had RCU callbacks 290o "cbr" is the number of times that this CPU had RCU callbacks
288 that had passed through a grace period, and were thus ready 291 that had passed through a grace period, and were thus ready
289 to be invoked. 292 to be invoked.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 1dc00ee97163..cfaac34c4557 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -840,6 +840,12 @@ SRCU: Initialization/cleanup
840 init_srcu_struct 840 init_srcu_struct
841 cleanup_srcu_struct 841 cleanup_srcu_struct
842 842
843All: lockdep-checked RCU-protected pointer access
844
845 rcu_dereference_check
846 rcu_dereference_protected
847 rcu_access_pointer
848
843See the comment headers in the source code (or the docbook generated 849See the comment headers in the source code (or the docbook generated
844from them) for more information. 850from them) for more information.
845 851
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 1053a56be3b1..8916ca48bc95 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -9,10 +9,14 @@ Documentation/SubmittingPatches and elsewhere regarding submitting Linux
9kernel patches. 9kernel patches.
10 10
11 11
121: Builds cleanly with applicable or modified CONFIG options =y, =m, and 121: If you use a facility then #include the file that defines/declares
13 that facility. Don't depend on other header files pulling in ones
14 that you use.
15
162: Builds cleanly with applicable or modified CONFIG options =y, =m, and
13 =n. No gcc warnings/errors, no linker warnings/errors. 17 =n. No gcc warnings/errors, no linker warnings/errors.
14 18
152: Passes allnoconfig, allmodconfig 192b: Passes allnoconfig, allmodconfig
16 20
173: Builds on multiple CPU architectures by using local cross-compile tools 213: Builds on multiple CPU architectures by using local cross-compile tools
18 or some other build farm. 22 or some other build farm.
diff --git a/Documentation/arm/Samsung-S3C24XX/CPUfreq.txt b/Documentation/arm/Samsung-S3C24XX/CPUfreq.txt
index 76b3a11e90be..fa968aa99d67 100644
--- a/Documentation/arm/Samsung-S3C24XX/CPUfreq.txt
+++ b/Documentation/arm/Samsung-S3C24XX/CPUfreq.txt
@@ -14,8 +14,8 @@ Introduction
14 how the clocks are arranged. The first implementation used as single 14 how the clocks are arranged. The first implementation used as single
15 PLL to feed the ARM, memory and peripherals via a series of dividers 15 PLL to feed the ARM, memory and peripherals via a series of dividers
16 and muxes and this is the implementation that is documented here. A 16 and muxes and this is the implementation that is documented here. A
17 newer version where there is a seperate PLL and clock divider for the 17 newer version where there is a separate PLL and clock divider for the
18 ARM core is available as a seperate driver. 18 ARM core is available as a separate driver.
19 19
20 20
21Layout 21Layout
diff --git a/Documentation/arm/Samsung/Overview.txt b/Documentation/arm/Samsung/Overview.txt
new file mode 100644
index 000000000000..7cced1fea9c3
--- /dev/null
+++ b/Documentation/arm/Samsung/Overview.txt
@@ -0,0 +1,86 @@
1 Samsung ARM Linux Overview
2 ==========================
3
4Introduction
5------------
6
7 The Samsung range of ARM SoCs spans many similar devices, from the initial
8 ARM9 through to the newest ARM cores. This document shows an overview of
9 the current kernel support, how to use it and where to find the code
10 that supports this.
11
12 The currently supported SoCs are:
13
14 - S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list
15 - S3C64XX: S3C6400 and S3C6410
16 - S5PC6440
17
18 S5PC100 and S5PC110 support is currently being merged
19
20
21S3C24XX Systems
22---------------
23
24 There is still documentation in Documnetation/arm/Samsung-S3C24XX/ which
25 deals with the architecture and drivers specific to these devices.
26
27 See Documentation/arm/Samsung-S3C24XX/Overview.txt for more information
28 on the implementation details and specific support.
29
30
31Configuration
32-------------
33
34 A number of configurations are supplied, as there is no current way of
35 unifying all the SoCs into one kernel.
36
37 s5p6440_defconfig - S5P6440 specific default configuration
38 s5pc100_defconfig - S5PC100 specific default configuration
39
40
41Layout
42------
43
44 The directory layout is currently being restructured, and consists of
45 several platform directories and then the machine specific directories
46 of the CPUs being built for.
47
48 plat-samsung provides the base for all the implementations, and is the
49 last in the line of include directories that are processed for the build
50 specific information. It contains the base clock, GPIO and device definitions
51 to get the system running.
52
53 plat-s3c is the s3c24xx/s3c64xx platform directory, although it is currently
54 involved in other builds this will be phased out once the relevant code is
55 moved elsewhere.
56
57 plat-s3c24xx is for s3c24xx specific builds, see the S3C24XX docs.
58
59 plat-s3c64xx is for the s3c64xx specific bits, see the S3C24XX docs.
60
61 plat-s5p is for s5p specific builds, more to be added.
62
63
64 [ to finish ]
65
66
67Port Contributors
68-----------------
69
70 Ben Dooks (BJD)
71 Vincent Sanders
72 Herbert Potzl
73 Arnaud Patard (RTP)
74 Roc Wu
75 Klaus Fetscher
76 Dimitry Andric
77 Shannon Holland
78 Guillaume Gourat (NexVision)
79 Christer Weinigel (wingel) (Acer N30)
80 Lucas Correia Villa Real (S3C2400 port)
81
82
83Document Author
84---------------
85
86Copyright 2009-2010 Ben Dooks <ben-linux@fluff.org>
diff --git a/Documentation/arm/Samsung/clksrc-change-registers.awk b/Documentation/arm/Samsung/clksrc-change-registers.awk
new file mode 100755
index 000000000000..0c50220851fb
--- /dev/null
+++ b/Documentation/arm/Samsung/clksrc-change-registers.awk
@@ -0,0 +1,167 @@
1#!/usr/bin/awk -f
2#
3# Copyright 2010 Ben Dooks <ben-linux@fluff.org>
4#
5# Released under GPLv2
6
7# example usage
8# ./clksrc-change-registers.awk arch/arm/plat-s5pc1xx/include/plat/regs-clock.h < src > dst
9
10function extract_value(s)
11{
12 eqat = index(s, "=")
13 comat = index(s, ",")
14 return substr(s, eqat+2, (comat-eqat)-2)
15}
16
17function remove_brackets(b)
18{
19 return substr(b, 2, length(b)-2)
20}
21
22function splitdefine(l, p)
23{
24 r = split(l, tp)
25
26 p[0] = tp[2]
27 p[1] = remove_brackets(tp[3])
28}
29
30function find_length(f)
31{
32 if (0)
33 printf "find_length " f "\n" > "/dev/stderr"
34
35 if (f ~ /0x1/)
36 return 1
37 else if (f ~ /0x3/)
38 return 2
39 else if (f ~ /0x7/)
40 return 3
41 else if (f ~ /0xf/)
42 return 4
43
44 printf "unknown legnth " f "\n" > "/dev/stderr"
45 exit
46}
47
48function find_shift(s)
49{
50 id = index(s, "<")
51 if (id <= 0) {
52 printf "cannot find shift " s "\n" > "/dev/stderr"
53 exit
54 }
55
56 return substr(s, id+2)
57}
58
59
60BEGIN {
61 if (ARGC < 2) {
62 print "too few arguments" > "/dev/stderr"
63 exit
64 }
65
66# read the header file and find the mask values that we will need
67# to replace and create an associative array of values
68
69 while (getline line < ARGV[1] > 0) {
70 if (line ~ /\#define.*_MASK/ &&
71 !(line ~ /S5PC100_EPLL_MASK/) &&
72 !(line ~ /USB_SIG_MASK/)) {
73 splitdefine(line, fields)
74 name = fields[0]
75 if (0)
76 printf "MASK " line "\n" > "/dev/stderr"
77 dmask[name,0] = find_length(fields[1])
78 dmask[name,1] = find_shift(fields[1])
79 if (0)
80 printf "=> '" name "' LENGTH=" dmask[name,0] " SHIFT=" dmask[name,1] "\n" > "/dev/stderr"
81 } else {
82 }
83 }
84
85 delete ARGV[1]
86}
87
88/clksrc_clk.*=.*{/ {
89 shift=""
90 mask=""
91 divshift=""
92 reg_div=""
93 reg_src=""
94 indent=1
95
96 print $0
97
98 for(; indent >= 1;) {
99 if ((getline line) <= 0) {
100 printf "unexpected end of file" > "/dev/stderr"
101 exit 1;
102 }
103
104 if (line ~ /\.shift/) {
105 shift = extract_value(line)
106 } else if (line ~ /\.mask/) {
107 mask = extract_value(line)
108 } else if (line ~ /\.reg_divider/) {
109 reg_div = extract_value(line)
110 } else if (line ~ /\.reg_source/) {
111 reg_src = extract_value(line)
112 } else if (line ~ /\.divider_shift/) {
113 divshift = extract_value(line)
114 } else if (line ~ /{/) {
115 indent++
116 print line
117 } else if (line ~ /}/) {
118 indent--
119
120 if (indent == 0) {
121 if (0) {
122 printf "shift '" shift "' ='" dmask[shift,0] "'\n" > "/dev/stderr"
123 printf "mask '" mask "'\n" > "/dev/stderr"
124 printf "dshft '" divshift "'\n" > "/dev/stderr"
125 printf "rdiv '" reg_div "'\n" > "/dev/stderr"
126 printf "rsrc '" reg_src "'\n" > "/dev/stderr"
127 }
128
129 generated = mask
130 sub(reg_src, reg_div, generated)
131
132 if (0) {
133 printf "/* rsrc " reg_src " */\n"
134 printf "/* rdiv " reg_div " */\n"
135 printf "/* shift " shift " */\n"
136 printf "/* mask " mask " */\n"
137 printf "/* generated " generated " */\n"
138 }
139
140 if (reg_div != "") {
141 printf "\t.reg_div = { "
142 printf ".reg = " reg_div ", "
143 printf ".shift = " dmask[generated,1] ", "
144 printf ".size = " dmask[generated,0] ", "
145 printf "},\n"
146 }
147
148 printf "\t.reg_src = { "
149 printf ".reg = " reg_src ", "
150 printf ".shift = " dmask[mask,1] ", "
151 printf ".size = " dmask[mask,0] ", "
152
153 printf "},\n"
154
155 }
156
157 print line
158 } else {
159 print line
160 }
161
162 if (0)
163 printf indent ":" line "\n" > "/dev/stderr"
164 }
165}
166
167// && ! /clksrc_clk.*=.*{/ { print $0 }
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 6fab97ea7e6b..508b5b2b0289 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -1162,8 +1162,8 @@ where a driver received a request ala this before:
1162 1162
1163As mentioned, there is no virtual mapping of a bio. For DMA, this is 1163As mentioned, there is no virtual mapping of a bio. For DMA, this is
1164not a problem as the driver probably never will need a virtual mapping. 1164not a problem as the driver probably never will need a virtual mapping.
1165Instead it needs a bus mapping (pci_map_page for a single segment or 1165Instead it needs a bus mapping (dma_map_page for a single segment or
1166use blk_rq_map_sg for scatter gather) to be able to ship it to the driver. For 1166use dma_map_sg for scatter gather) to be able to ship it to the driver. For
1167PIO drivers (or drivers that need to revert to PIO transfer once in a 1167PIO drivers (or drivers that need to revert to PIO transfer once in a
1168while (IDE for example)), where the CPU is doing the actual data 1168while (IDE for example)), where the CPU is doing the actual data
1169transfer a virtual mapping is needed. If the driver supports highmem I/O, 1169transfer a virtual mapping is needed. If the driver supports highmem I/O,
diff --git a/Documentation/cgroups/cgroup_event_listener.c b/Documentation/cgroups/cgroup_event_listener.c
new file mode 100644
index 000000000000..8c2bfc4a6358
--- /dev/null
+++ b/Documentation/cgroups/cgroup_event_listener.c
@@ -0,0 +1,110 @@
1/*
2 * cgroup_event_listener.c - Simple listener of cgroup events
3 *
4 * Copyright (C) Kirill A. Shutemov <kirill@shutemov.name>
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <fcntl.h>
10#include <libgen.h>
11#include <limits.h>
12#include <stdio.h>
13#include <string.h>
14#include <unistd.h>
15
16#include <sys/eventfd.h>
17
18#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>\n"
19
20int main(int argc, char **argv)
21{
22 int efd = -1;
23 int cfd = -1;
24 int event_control = -1;
25 char event_control_path[PATH_MAX];
26 char line[LINE_MAX];
27 int ret;
28
29 if (argc != 3) {
30 fputs(USAGE_STR, stderr);
31 return 1;
32 }
33
34 cfd = open(argv[1], O_RDONLY);
35 if (cfd == -1) {
36 fprintf(stderr, "Cannot open %s: %s\n", argv[1],
37 strerror(errno));
38 goto out;
39 }
40
41 ret = snprintf(event_control_path, PATH_MAX, "%s/cgroup.event_control",
42 dirname(argv[1]));
43 if (ret >= PATH_MAX) {
44 fputs("Path to cgroup.event_control is too long\n", stderr);
45 goto out;
46 }
47
48 event_control = open(event_control_path, O_WRONLY);
49 if (event_control == -1) {
50 fprintf(stderr, "Cannot open %s: %s\n", event_control_path,
51 strerror(errno));
52 goto out;
53 }
54
55 efd = eventfd(0, 0);
56 if (efd == -1) {
57 perror("eventfd() failed");
58 goto out;
59 }
60
61 ret = snprintf(line, LINE_MAX, "%d %d %s", efd, cfd, argv[2]);
62 if (ret >= LINE_MAX) {
63 fputs("Arguments string is too long\n", stderr);
64 goto out;
65 }
66
67 ret = write(event_control, line, strlen(line) + 1);
68 if (ret == -1) {
69 perror("Cannot write to cgroup.event_control");
70 goto out;
71 }
72
73 while (1) {
74 uint64_t result;
75
76 ret = read(efd, &result, sizeof(result));
77 if (ret == -1) {
78 if (errno == EINTR)
79 continue;
80 perror("Cannot read from eventfd");
81 break;
82 }
83 assert(ret == sizeof(result));
84
85 ret = access(event_control_path, W_OK);
86 if ((ret == -1) && (errno == ENOENT)) {
87 puts("The cgroup seems to have removed.");
88 ret = 0;
89 break;
90 }
91
92 if (ret == -1) {
93 perror("cgroup.event_control "
94 "is not accessable any more");
95 break;
96 }
97
98 printf("%s %s: crossed\n", argv[1], argv[2]);
99 }
100
101out:
102 if (efd >= 0)
103 close(efd);
104 if (event_control >= 0)
105 close(event_control);
106 if (cfd >= 0)
107 close(cfd);
108
109 return (ret != 0);
110}
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 0b33bfe7dde9..a1ca5924faff 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -22,6 +22,8 @@ CONTENTS:
222. Usage Examples and Syntax 222. Usage Examples and Syntax
23 2.1 Basic Usage 23 2.1 Basic Usage
24 2.2 Attaching processes 24 2.2 Attaching processes
25 2.3 Mounting hierarchies by name
26 2.4 Notification API
253. Kernel API 273. Kernel API
26 3.1 Overview 28 3.1 Overview
27 3.2 Synchronization 29 3.2 Synchronization
@@ -233,8 +235,7 @@ containing the following files describing that cgroup:
233 - cgroup.procs: list of tgids in the cgroup. This list is not 235 - cgroup.procs: list of tgids in the cgroup. This list is not
234 guaranteed to be sorted or free of duplicate tgids, and userspace 236 guaranteed to be sorted or free of duplicate tgids, and userspace
235 should sort/uniquify the list if this property is required. 237 should sort/uniquify the list if this property is required.
236 Writing a tgid into this file moves all threads with that tgid into 238 This is a read-only file, for now.
237 this cgroup.
238 - notify_on_release flag: run the release agent on exit? 239 - notify_on_release flag: run the release agent on exit?
239 - release_agent: the path to use for release notifications (this file 240 - release_agent: the path to use for release notifications (this file
240 exists in the top cgroup only) 241 exists in the top cgroup only)
@@ -434,6 +435,25 @@ you give a subsystem a name.
434The name of the subsystem appears as part of the hierarchy description 435The name of the subsystem appears as part of the hierarchy description
435in /proc/mounts and /proc/<pid>/cgroups. 436in /proc/mounts and /proc/<pid>/cgroups.
436 437
4382.4 Notification API
439--------------------
440
441There is mechanism which allows to get notifications about changing
442status of a cgroup.
443
444To register new notification handler you need:
445 - create a file descriptor for event notification using eventfd(2);
446 - open a control file to be monitored (e.g. memory.usage_in_bytes);
447 - write "<event_fd> <control_fd> <args>" to cgroup.event_control.
448 Interpretation of args is defined by control file implementation;
449
450eventfd will be woken up by control file implementation or when the
451cgroup is removed.
452
453To unregister notification handler just close eventfd.
454
455NOTE: Support of notifications should be implemented for the control
456file. See documentation for the subsystem.
437 457
4383. Kernel API 4583. Kernel API
439============= 459=============
@@ -488,6 +508,11 @@ Each subsystem should:
488- add an entry in linux/cgroup_subsys.h 508- add an entry in linux/cgroup_subsys.h
489- define a cgroup_subsys object called <name>_subsys 509- define a cgroup_subsys object called <name>_subsys
490 510
511If a subsystem can be compiled as a module, it should also have in its
512module initcall a call to cgroup_load_subsys(), and in its exitcall a
513call to cgroup_unload_subsys(). It should also set its_subsys.module =
514THIS_MODULE in its .c file.
515
491Each subsystem may export the following methods. The only mandatory 516Each subsystem may export the following methods. The only mandatory
492methods are create/destroy. Any others that are null are presumed to 517methods are create/destroy. Any others that are null are presumed to
493be successful no-ops. 518be successful no-ops.
@@ -536,10 +561,21 @@ returns an error, this will abort the attach operation. If a NULL
536task is passed, then a successful result indicates that *any* 561task is passed, then a successful result indicates that *any*
537unspecified task can be moved into the cgroup. Note that this isn't 562unspecified task can be moved into the cgroup. Note that this isn't
538called on a fork. If this method returns 0 (success) then this should 563called on a fork. If this method returns 0 (success) then this should
539remain valid while the caller holds cgroup_mutex. If threadgroup is 564remain valid while the caller holds cgroup_mutex and it is ensured that either
565attach() or cancel_attach() will be called in future. If threadgroup is
540true, then a successful result indicates that all threads in the given 566true, then a successful result indicates that all threads in the given
541thread's threadgroup can be moved together. 567thread's threadgroup can be moved together.
542 568
569void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
570 struct task_struct *task, bool threadgroup)
571(cgroup_mutex held by caller)
572
573Called when a task attach operation has failed after can_attach() has succeeded.
574A subsystem whose can_attach() has some side-effects should provide this
575function, so that the subsytem can implement a rollback. If not, not necessary.
576This will be called only about subsystems whose can_attach() operation have
577succeeded.
578
543void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 579void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
544 struct cgroup *old_cgrp, struct task_struct *task, 580 struct cgroup *old_cgrp, struct task_struct *task,
545 bool threadgroup) 581 bool threadgroup)
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 1d7e9784439a..4160df82b3f5 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -168,20 +168,20 @@ Each cpuset is represented by a directory in the cgroup file system
168containing (on top of the standard cgroup files) the following 168containing (on top of the standard cgroup files) the following
169files describing that cpuset: 169files describing that cpuset:
170 170
171 - cpus: list of CPUs in that cpuset 171 - cpuset.cpus: list of CPUs in that cpuset
172 - mems: list of Memory Nodes in that cpuset 172 - cpuset.mems: list of Memory Nodes in that cpuset
173 - memory_migrate flag: if set, move pages to cpusets nodes 173 - cpuset.memory_migrate flag: if set, move pages to cpusets nodes
174 - cpu_exclusive flag: is cpu placement exclusive? 174 - cpuset.cpu_exclusive flag: is cpu placement exclusive?
175 - mem_exclusive flag: is memory placement exclusive? 175 - cpuset.mem_exclusive flag: is memory placement exclusive?
176 - mem_hardwall flag: is memory allocation hardwalled 176 - cpuset.mem_hardwall flag: is memory allocation hardwalled
177 - memory_pressure: measure of how much paging pressure in cpuset 177 - cpuset.memory_pressure: measure of how much paging pressure in cpuset
178 - memory_spread_page flag: if set, spread page cache evenly on allowed nodes 178 - cpuset.memory_spread_page flag: if set, spread page cache evenly on allowed nodes
179 - memory_spread_slab flag: if set, spread slab cache evenly on allowed nodes 179 - cpuset.memory_spread_slab flag: if set, spread slab cache evenly on allowed nodes
180 - sched_load_balance flag: if set, load balance within CPUs on that cpuset 180 - cpuset.sched_load_balance flag: if set, load balance within CPUs on that cpuset
181 - sched_relax_domain_level: the searching range when migrating tasks 181 - cpuset.sched_relax_domain_level: the searching range when migrating tasks
182 182
183In addition, the root cpuset only has the following file: 183In addition, the root cpuset only has the following file:
184 - memory_pressure_enabled flag: compute memory_pressure? 184 - cpuset.memory_pressure_enabled flag: compute memory_pressure?
185 185
186New cpusets are created using the mkdir system call or shell 186New cpusets are created using the mkdir system call or shell
187command. The properties of a cpuset, such as its flags, allowed 187command. The properties of a cpuset, such as its flags, allowed
@@ -229,7 +229,7 @@ If a cpuset is cpu or mem exclusive, no other cpuset, other than
229a direct ancestor or descendant, may share any of the same CPUs or 229a direct ancestor or descendant, may share any of the same CPUs or
230Memory Nodes. 230Memory Nodes.
231 231
232A cpuset that is mem_exclusive *or* mem_hardwall is "hardwalled", 232A cpuset that is cpuset.mem_exclusive *or* cpuset.mem_hardwall is "hardwalled",
233i.e. it restricts kernel allocations for page, buffer and other data 233i.e. it restricts kernel allocations for page, buffer and other data
234commonly shared by the kernel across multiple users. All cpusets, 234commonly shared by the kernel across multiple users. All cpusets,
235whether hardwalled or not, restrict allocations of memory for user 235whether hardwalled or not, restrict allocations of memory for user
@@ -304,15 +304,15 @@ times 1000.
304--------------------------- 304---------------------------
305There are two boolean flag files per cpuset that control where the 305There are two boolean flag files per cpuset that control where the
306kernel allocates pages for the file system buffers and related in 306kernel allocates pages for the file system buffers and related in
307kernel data structures. They are called 'memory_spread_page' and 307kernel data structures. They are called 'cpuset.memory_spread_page' and
308'memory_spread_slab'. 308'cpuset.memory_spread_slab'.
309 309
310If the per-cpuset boolean flag file 'memory_spread_page' is set, then 310If the per-cpuset boolean flag file 'cpuset.memory_spread_page' is set, then
311the kernel will spread the file system buffers (page cache) evenly 311the kernel will spread the file system buffers (page cache) evenly
312over all the nodes that the faulting task is allowed to use, instead 312over all the nodes that the faulting task is allowed to use, instead
313of preferring to put those pages on the node where the task is running. 313of preferring to put those pages on the node where the task is running.
314 314
315If the per-cpuset boolean flag file 'memory_spread_slab' is set, 315If the per-cpuset boolean flag file 'cpuset.memory_spread_slab' is set,
316then the kernel will spread some file system related slab caches, 316then the kernel will spread some file system related slab caches,
317such as for inodes and dentries evenly over all the nodes that the 317such as for inodes and dentries evenly over all the nodes that the
318faulting task is allowed to use, instead of preferring to put those 318faulting task is allowed to use, instead of preferring to put those
@@ -337,21 +337,21 @@ their containing tasks memory spread settings. If memory spreading
337is turned off, then the currently specified NUMA mempolicy once again 337is turned off, then the currently specified NUMA mempolicy once again
338applies to memory page allocations. 338applies to memory page allocations.
339 339
340Both 'memory_spread_page' and 'memory_spread_slab' are boolean flag 340Both 'cpuset.memory_spread_page' and 'cpuset.memory_spread_slab' are boolean flag
341files. By default they contain "0", meaning that the feature is off 341files. By default they contain "0", meaning that the feature is off
342for that cpuset. If a "1" is written to that file, then that turns 342for that cpuset. If a "1" is written to that file, then that turns
343the named feature on. 343the named feature on.
344 344
345The implementation is simple. 345The implementation is simple.
346 346
347Setting the flag 'memory_spread_page' turns on a per-process flag 347Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag
348PF_SPREAD_PAGE for each task that is in that cpuset or subsequently 348PF_SPREAD_PAGE for each task that is in that cpuset or subsequently
349joins that cpuset. The page allocation calls for the page cache 349joins that cpuset. The page allocation calls for the page cache
350is modified to perform an inline check for this PF_SPREAD_PAGE task 350is modified to perform an inline check for this PF_SPREAD_PAGE task
351flag, and if set, a call to a new routine cpuset_mem_spread_node() 351flag, and if set, a call to a new routine cpuset_mem_spread_node()
352returns the node to prefer for the allocation. 352returns the node to prefer for the allocation.
353 353
354Similarly, setting 'memory_spread_slab' turns on the flag 354Similarly, setting 'cpuset.memory_spread_slab' turns on the flag
355PF_SPREAD_SLAB, and appropriately marked slab caches will allocate 355PF_SPREAD_SLAB, and appropriately marked slab caches will allocate
356pages from the node returned by cpuset_mem_spread_node(). 356pages from the node returned by cpuset_mem_spread_node().
357 357
@@ -404,24 +404,24 @@ the following two situations:
404 system overhead on those CPUs, including avoiding task load 404 system overhead on those CPUs, including avoiding task load
405 balancing if that is not needed. 405 balancing if that is not needed.
406 406
407When the per-cpuset flag "sched_load_balance" is enabled (the default 407When the per-cpuset flag "cpuset.sched_load_balance" is enabled (the default
408setting), it requests that all the CPUs in that cpusets allowed 'cpus' 408setting), it requests that all the CPUs in that cpusets allowed 'cpuset.cpus'
409be contained in a single sched domain, ensuring that load balancing 409be contained in a single sched domain, ensuring that load balancing
410can move a task (not otherwised pinned, as by sched_setaffinity) 410can move a task (not otherwised pinned, as by sched_setaffinity)
411from any CPU in that cpuset to any other. 411from any CPU in that cpuset to any other.
412 412
413When the per-cpuset flag "sched_load_balance" is disabled, then the 413When the per-cpuset flag "cpuset.sched_load_balance" is disabled, then the
414scheduler will avoid load balancing across the CPUs in that cpuset, 414scheduler will avoid load balancing across the CPUs in that cpuset,
415--except-- in so far as is necessary because some overlapping cpuset 415--except-- in so far as is necessary because some overlapping cpuset
416has "sched_load_balance" enabled. 416has "sched_load_balance" enabled.
417 417
418So, for example, if the top cpuset has the flag "sched_load_balance" 418So, for example, if the top cpuset has the flag "cpuset.sched_load_balance"
419enabled, then the scheduler will have one sched domain covering all 419enabled, then the scheduler will have one sched domain covering all
420CPUs, and the setting of the "sched_load_balance" flag in any other 420CPUs, and the setting of the "cpuset.sched_load_balance" flag in any other
421cpusets won't matter, as we're already fully load balancing. 421cpusets won't matter, as we're already fully load balancing.
422 422
423Therefore in the above two situations, the top cpuset flag 423Therefore in the above two situations, the top cpuset flag
424"sched_load_balance" should be disabled, and only some of the smaller, 424"cpuset.sched_load_balance" should be disabled, and only some of the smaller,
425child cpusets have this flag enabled. 425child cpusets have this flag enabled.
426 426
427When doing this, you don't usually want to leave any unpinned tasks in 427When doing this, you don't usually want to leave any unpinned tasks in
@@ -433,7 +433,7 @@ scheduler might not consider the possibility of load balancing that
433task to that underused CPU. 433task to that underused CPU.
434 434
435Of course, tasks pinned to a particular CPU can be left in a cpuset 435Of course, tasks pinned to a particular CPU can be left in a cpuset
436that disables "sched_load_balance" as those tasks aren't going anywhere 436that disables "cpuset.sched_load_balance" as those tasks aren't going anywhere
437else anyway. 437else anyway.
438 438
439There is an impedance mismatch here, between cpusets and sched domains. 439There is an impedance mismatch here, between cpusets and sched domains.
@@ -443,19 +443,19 @@ overlap and each CPU is in at most one sched domain.
443It is necessary for sched domains to be flat because load balancing 443It is necessary for sched domains to be flat because load balancing
444across partially overlapping sets of CPUs would risk unstable dynamics 444across partially overlapping sets of CPUs would risk unstable dynamics
445that would be beyond our understanding. So if each of two partially 445that would be beyond our understanding. So if each of two partially
446overlapping cpusets enables the flag 'sched_load_balance', then we 446overlapping cpusets enables the flag 'cpuset.sched_load_balance', then we
447form a single sched domain that is a superset of both. We won't move 447form a single sched domain that is a superset of both. We won't move
448a task to a CPU outside it cpuset, but the scheduler load balancing 448a task to a CPU outside it cpuset, but the scheduler load balancing
449code might waste some compute cycles considering that possibility. 449code might waste some compute cycles considering that possibility.
450 450
451This mismatch is why there is not a simple one-to-one relation 451This mismatch is why there is not a simple one-to-one relation
452between which cpusets have the flag "sched_load_balance" enabled, 452between which cpusets have the flag "cpuset.sched_load_balance" enabled,
453and the sched domain configuration. If a cpuset enables the flag, it 453and the sched domain configuration. If a cpuset enables the flag, it
454will get balancing across all its CPUs, but if it disables the flag, 454will get balancing across all its CPUs, but if it disables the flag,
455it will only be assured of no load balancing if no other overlapping 455it will only be assured of no load balancing if no other overlapping
456cpuset enables the flag. 456cpuset enables the flag.
457 457
458If two cpusets have partially overlapping 'cpus' allowed, and only 458If two cpusets have partially overlapping 'cpuset.cpus' allowed, and only
459one of them has this flag enabled, then the other may find its 459one of them has this flag enabled, then the other may find its
460tasks only partially load balanced, just on the overlapping CPUs. 460tasks only partially load balanced, just on the overlapping CPUs.
461This is just the general case of the top_cpuset example given a few 461This is just the general case of the top_cpuset example given a few
@@ -468,23 +468,23 @@ load balancing to the other CPUs.
4681.7.1 sched_load_balance implementation details. 4681.7.1 sched_load_balance implementation details.
469------------------------------------------------ 469------------------------------------------------
470 470
471The per-cpuset flag 'sched_load_balance' defaults to enabled (contrary 471The per-cpuset flag 'cpuset.sched_load_balance' defaults to enabled (contrary
472to most cpuset flags.) When enabled for a cpuset, the kernel will 472to most cpuset flags.) When enabled for a cpuset, the kernel will
473ensure that it can load balance across all the CPUs in that cpuset 473ensure that it can load balance across all the CPUs in that cpuset
474(makes sure that all the CPUs in the cpus_allowed of that cpuset are 474(makes sure that all the CPUs in the cpus_allowed of that cpuset are
475in the same sched domain.) 475in the same sched domain.)
476 476
477If two overlapping cpusets both have 'sched_load_balance' enabled, 477If two overlapping cpusets both have 'cpuset.sched_load_balance' enabled,
478then they will be (must be) both in the same sched domain. 478then they will be (must be) both in the same sched domain.
479 479
480If, as is the default, the top cpuset has 'sched_load_balance' enabled, 480If, as is the default, the top cpuset has 'cpuset.sched_load_balance' enabled,
481then by the above that means there is a single sched domain covering 481then by the above that means there is a single sched domain covering
482the whole system, regardless of any other cpuset settings. 482the whole system, regardless of any other cpuset settings.
483 483
484The kernel commits to user space that it will avoid load balancing 484The kernel commits to user space that it will avoid load balancing
485where it can. It will pick as fine a granularity partition of sched 485where it can. It will pick as fine a granularity partition of sched
486domains as it can while still providing load balancing for any set 486domains as it can while still providing load balancing for any set
487of CPUs allowed to a cpuset having 'sched_load_balance' enabled. 487of CPUs allowed to a cpuset having 'cpuset.sched_load_balance' enabled.
488 488
489The internal kernel cpuset to scheduler interface passes from the 489The internal kernel cpuset to scheduler interface passes from the
490cpuset code to the scheduler code a partition of the load balanced 490cpuset code to the scheduler code a partition of the load balanced
@@ -495,9 +495,9 @@ all the CPUs that must be load balanced.
495The cpuset code builds a new such partition and passes it to the 495The cpuset code builds a new such partition and passes it to the
496scheduler sched domain setup code, to have the sched domains rebuilt 496scheduler sched domain setup code, to have the sched domains rebuilt
497as necessary, whenever: 497as necessary, whenever:
498 - the 'sched_load_balance' flag of a cpuset with non-empty CPUs changes, 498 - the 'cpuset.sched_load_balance' flag of a cpuset with non-empty CPUs changes,
499 - or CPUs come or go from a cpuset with this flag enabled, 499 - or CPUs come or go from a cpuset with this flag enabled,
500 - or 'sched_relax_domain_level' value of a cpuset with non-empty CPUs 500 - or 'cpuset.sched_relax_domain_level' value of a cpuset with non-empty CPUs
501 and with this flag enabled changes, 501 and with this flag enabled changes,
502 - or a cpuset with non-empty CPUs and with this flag enabled is removed, 502 - or a cpuset with non-empty CPUs and with this flag enabled is removed,
503 - or a cpu is offlined/onlined. 503 - or a cpu is offlined/onlined.
@@ -542,7 +542,7 @@ As the result, task B on CPU X need to wait task A or wait load balance
542on the next tick. For some applications in special situation, waiting 542on the next tick. For some applications in special situation, waiting
5431 tick may be too long. 5431 tick may be too long.
544 544
545The 'sched_relax_domain_level' file allows you to request changing 545The 'cpuset.sched_relax_domain_level' file allows you to request changing
546this searching range as you like. This file takes int value which 546this searching range as you like. This file takes int value which
547indicates size of searching range in levels ideally as follows, 547indicates size of searching range in levels ideally as follows,
548otherwise initial value -1 that indicates the cpuset has no request. 548otherwise initial value -1 that indicates the cpuset has no request.
@@ -559,8 +559,8 @@ The system default is architecture dependent. The system default
559can be changed using the relax_domain_level= boot parameter. 559can be changed using the relax_domain_level= boot parameter.
560 560
561This file is per-cpuset and affect the sched domain where the cpuset 561This file is per-cpuset and affect the sched domain where the cpuset
562belongs to. Therefore if the flag 'sched_load_balance' of a cpuset 562belongs to. Therefore if the flag 'cpuset.sched_load_balance' of a cpuset
563is disabled, then 'sched_relax_domain_level' have no effect since 563is disabled, then 'cpuset.sched_relax_domain_level' have no effect since
564there is no sched domain belonging the cpuset. 564there is no sched domain belonging the cpuset.
565 565
566If multiple cpusets are overlapping and hence they form a single sched 566If multiple cpusets are overlapping and hence they form a single sched
@@ -607,9 +607,9 @@ from one cpuset to another, then the kernel will adjust the tasks
607memory placement, as above, the next time that the kernel attempts 607memory placement, as above, the next time that the kernel attempts
608to allocate a page of memory for that task. 608to allocate a page of memory for that task.
609 609
610If a cpuset has its 'cpus' modified, then each task in that cpuset 610If a cpuset has its 'cpuset.cpus' modified, then each task in that cpuset
611will have its allowed CPU placement changed immediately. Similarly, 611will have its allowed CPU placement changed immediately. Similarly,
612if a tasks pid is written to another cpusets 'tasks' file, then its 612if a tasks pid is written to another cpusets 'cpuset.tasks' file, then its
613allowed CPU placement is changed immediately. If such a task had been 613allowed CPU placement is changed immediately. If such a task had been
614bound to some subset of its cpuset using the sched_setaffinity() call, 614bound to some subset of its cpuset using the sched_setaffinity() call,
615the task will be allowed to run on any CPU allowed in its new cpuset, 615the task will be allowed to run on any CPU allowed in its new cpuset,
@@ -622,8 +622,8 @@ and the processor placement is updated immediately.
622Normally, once a page is allocated (given a physical page 622Normally, once a page is allocated (given a physical page
623of main memory) then that page stays on whatever node it 623of main memory) then that page stays on whatever node it
624was allocated, so long as it remains allocated, even if the 624was allocated, so long as it remains allocated, even if the
625cpusets memory placement policy 'mems' subsequently changes. 625cpusets memory placement policy 'cpuset.mems' subsequently changes.
626If the cpuset flag file 'memory_migrate' is set true, then when 626If the cpuset flag file 'cpuset.memory_migrate' is set true, then when
627tasks are attached to that cpuset, any pages that task had 627tasks are attached to that cpuset, any pages that task had
628allocated to it on nodes in its previous cpuset are migrated 628allocated to it on nodes in its previous cpuset are migrated
629to the tasks new cpuset. The relative placement of the page within 629to the tasks new cpuset. The relative placement of the page within
@@ -631,12 +631,12 @@ the cpuset is preserved during these migration operations if possible.
631For example if the page was on the second valid node of the prior cpuset 631For example if the page was on the second valid node of the prior cpuset
632then the page will be placed on the second valid node of the new cpuset. 632then the page will be placed on the second valid node of the new cpuset.
633 633
634Also if 'memory_migrate' is set true, then if that cpusets 634Also if 'cpuset.memory_migrate' is set true, then if that cpusets
635'mems' file is modified, pages allocated to tasks in that 635'cpuset.mems' file is modified, pages allocated to tasks in that
636cpuset, that were on nodes in the previous setting of 'mems', 636cpuset, that were on nodes in the previous setting of 'cpuset.mems',
637will be moved to nodes in the new setting of 'mems.' 637will be moved to nodes in the new setting of 'mems.'
638Pages that were not in the tasks prior cpuset, or in the cpusets 638Pages that were not in the tasks prior cpuset, or in the cpusets
639prior 'mems' setting, will not be moved. 639prior 'cpuset.mems' setting, will not be moved.
640 640
641There is an exception to the above. If hotplug functionality is used 641There is an exception to the above. If hotplug functionality is used
642to remove all the CPUs that are currently assigned to a cpuset, 642to remove all the CPUs that are currently assigned to a cpuset,
@@ -678,8 +678,8 @@ and then start a subshell 'sh' in that cpuset:
678 cd /dev/cpuset 678 cd /dev/cpuset
679 mkdir Charlie 679 mkdir Charlie
680 cd Charlie 680 cd Charlie
681 /bin/echo 2-3 > cpus 681 /bin/echo 2-3 > cpuset.cpus
682 /bin/echo 1 > mems 682 /bin/echo 1 > cpuset.mems
683 /bin/echo $$ > tasks 683 /bin/echo $$ > tasks
684 sh 684 sh
685 # The subshell 'sh' is now running in cpuset Charlie 685 # The subshell 'sh' is now running in cpuset Charlie
@@ -725,10 +725,13 @@ Now you want to do something with this cpuset.
725 725
726In this directory you can find several files: 726In this directory you can find several files:
727# ls 727# ls
728cpu_exclusive memory_migrate mems tasks 728cpuset.cpu_exclusive cpuset.memory_spread_slab
729cpus memory_pressure notify_on_release 729cpuset.cpus cpuset.mems
730mem_exclusive memory_spread_page sched_load_balance 730cpuset.mem_exclusive cpuset.sched_load_balance
731mem_hardwall memory_spread_slab sched_relax_domain_level 731cpuset.mem_hardwall cpuset.sched_relax_domain_level
732cpuset.memory_migrate notify_on_release
733cpuset.memory_pressure tasks
734cpuset.memory_spread_page
732 735
733Reading them will give you information about the state of this cpuset: 736Reading them will give you information about the state of this cpuset:
734the CPUs and Memory Nodes it can use, the processes that are using 737the CPUs and Memory Nodes it can use, the processes that are using
@@ -736,13 +739,13 @@ it, its properties. By writing to these files you can manipulate
736the cpuset. 739the cpuset.
737 740
738Set some flags: 741Set some flags:
739# /bin/echo 1 > cpu_exclusive 742# /bin/echo 1 > cpuset.cpu_exclusive
740 743
741Add some cpus: 744Add some cpus:
742# /bin/echo 0-7 > cpus 745# /bin/echo 0-7 > cpuset.cpus
743 746
744Add some mems: 747Add some mems:
745# /bin/echo 0-7 > mems 748# /bin/echo 0-7 > cpuset.mems
746 749
747Now attach your shell to this cpuset: 750Now attach your shell to this cpuset:
748# /bin/echo $$ > tasks 751# /bin/echo $$ > tasks
@@ -774,28 +777,28 @@ echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent
774This is the syntax to use when writing in the cpus or mems files 777This is the syntax to use when writing in the cpus or mems files
775in cpuset directories: 778in cpuset directories:
776 779
777# /bin/echo 1-4 > cpus -> set cpus list to cpus 1,2,3,4 780# /bin/echo 1-4 > cpuset.cpus -> set cpus list to cpus 1,2,3,4
778# /bin/echo 1,2,3,4 > cpus -> set cpus list to cpus 1,2,3,4 781# /bin/echo 1,2,3,4 > cpuset.cpus -> set cpus list to cpus 1,2,3,4
779 782
780To add a CPU to a cpuset, write the new list of CPUs including the 783To add a CPU to a cpuset, write the new list of CPUs including the
781CPU to be added. To add 6 to the above cpuset: 784CPU to be added. To add 6 to the above cpuset:
782 785
783# /bin/echo 1-4,6 > cpus -> set cpus list to cpus 1,2,3,4,6 786# /bin/echo 1-4,6 > cpuset.cpus -> set cpus list to cpus 1,2,3,4,6
784 787
785Similarly to remove a CPU from a cpuset, write the new list of CPUs 788Similarly to remove a CPU from a cpuset, write the new list of CPUs
786without the CPU to be removed. 789without the CPU to be removed.
787 790
788To remove all the CPUs: 791To remove all the CPUs:
789 792
790# /bin/echo "" > cpus -> clear cpus list 793# /bin/echo "" > cpuset.cpus -> clear cpus list
791 794
7922.3 Setting flags 7952.3 Setting flags
793----------------- 796-----------------
794 797
795The syntax is very simple: 798The syntax is very simple:
796 799
797# /bin/echo 1 > cpu_exclusive -> set flag 'cpu_exclusive' 800# /bin/echo 1 > cpuset.cpu_exclusive -> set flag 'cpuset.cpu_exclusive'
798# /bin/echo 0 > cpu_exclusive -> unset flag 'cpu_exclusive' 801# /bin/echo 0 > cpuset.cpu_exclusive -> unset flag 'cpuset.cpu_exclusive'
799 802
8002.4 Attaching processes 8032.4 Attaching processes
801----------------------- 804-----------------------
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
index 72db89ed0609..f7f68b2ac199 100644
--- a/Documentation/cgroups/memcg_test.txt
+++ b/Documentation/cgroups/memcg_test.txt
@@ -1,6 +1,6 @@
1Memory Resource Controller(Memcg) Implementation Memo. 1Memory Resource Controller(Memcg) Implementation Memo.
2Last Updated: 2009/1/20 2Last Updated: 2010/2
3Base Kernel Version: based on 2.6.29-rc2. 3Base Kernel Version: based on 2.6.33-rc7-mm(candidate for 34).
4 4
5Because VM is getting complex (one of reasons is memcg...), memcg's behavior 5Because VM is getting complex (one of reasons is memcg...), memcg's behavior
6is complex. This is a document for memcg's internal behavior. 6is complex. This is a document for memcg's internal behavior.
@@ -337,7 +337,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
337 race and lock dependency with other cgroup subsystems. 337 race and lock dependency with other cgroup subsystems.
338 338
339 example) 339 example)
340 # mount -t cgroup none /cgroup -t cpuset,memory,cpu,devices 340 # mount -t cgroup none /cgroup -o cpuset,memory,cpu,devices
341 341
342 and do task move, mkdir, rmdir etc...under this. 342 and do task move, mkdir, rmdir etc...under this.
343 343
@@ -348,7 +348,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
348 348
349 For example, test like following is good. 349 For example, test like following is good.
350 (Shell-A) 350 (Shell-A)
351 # mount -t cgroup none /cgroup -t memory 351 # mount -t cgroup none /cgroup -o memory
352 # mkdir /cgroup/test 352 # mkdir /cgroup/test
353 # echo 40M > /cgroup/test/memory.limit_in_bytes 353 # echo 40M > /cgroup/test/memory.limit_in_bytes
354 # echo 0 > /cgroup/test/tasks 354 # echo 0 > /cgroup/test/tasks
@@ -378,3 +378,42 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
378 #echo 50M > memory.limit_in_bytes 378 #echo 50M > memory.limit_in_bytes
379 #echo 50M > memory.memsw.limit_in_bytes 379 #echo 50M > memory.memsw.limit_in_bytes
380 run 51M of malloc 380 run 51M of malloc
381
382 9.9 Move charges at task migration
383 Charges associated with a task can be moved along with task migration.
384
385 (Shell-A)
386 #mkdir /cgroup/A
387 #echo $$ >/cgroup/A/tasks
388 run some programs which uses some amount of memory in /cgroup/A.
389
390 (Shell-B)
391 #mkdir /cgroup/B
392 #echo 1 >/cgroup/B/memory.move_charge_at_immigrate
393 #echo "pid of the program running in group A" >/cgroup/B/tasks
394
395 You can see charges have been moved by reading *.usage_in_bytes or
396 memory.stat of both A and B.
397 See 8.2 of Documentation/cgroups/memory.txt to see what value should be
398 written to move_charge_at_immigrate.
399
400 9.10 Memory thresholds
401 Memory controler implements memory thresholds using cgroups notification
402 API. You can use Documentation/cgroups/cgroup_event_listener.c to test
403 it.
404
405 (Shell-A) Create cgroup and run event listener
406 # mkdir /cgroup/A
407 # ./cgroup_event_listener /cgroup/A/memory.usage_in_bytes 5M
408
409 (Shell-B) Add task to cgroup and try to allocate and free memory
410 # echo $$ >/cgroup/A/tasks
411 # a="$(dd if=/dev/zero bs=1M count=10)"
412 # a=
413
414 You will see message from cgroup_event_listener every time you cross
415 the thresholds.
416
417 Use /cgroup/A/memory.memsw.usage_in_bytes to test memsw thresholds.
418
419 It's good idea to test root cgroup as well.
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index b871f2552b45..3a6aecd078ba 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -182,6 +182,8 @@ list.
182NOTE: Reclaim does not work for the root cgroup, since we cannot set any 182NOTE: Reclaim does not work for the root cgroup, since we cannot set any
183limits on the root cgroup. 183limits on the root cgroup.
184 184
185Note2: When panic_on_oom is set to "2", the whole system will panic.
186
1852. Locking 1872. Locking
186 188
187The memory controller uses the following hierarchy 189The memory controller uses the following hierarchy
@@ -262,10 +264,12 @@ some of the pages cached in the cgroup (page cache pages).
2624.2 Task migration 2644.2 Task migration
263 265
264When a task migrates from one cgroup to another, it's charge is not 266When a task migrates from one cgroup to another, it's charge is not
265carried forward. The pages allocated from the original cgroup still 267carried forward by default. The pages allocated from the original cgroup still
266remain charged to it, the charge is dropped when the page is freed or 268remain charged to it, the charge is dropped when the page is freed or
267reclaimed. 269reclaimed.
268 270
271Note: You can move charges of a task along with task migration. See 8.
272
2694.3 Removing a cgroup 2734.3 Removing a cgroup
270 274
271A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a 275A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
@@ -336,7 +340,7 @@ Note:
3365.3 swappiness 3405.3 swappiness
337 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. 341 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
338 342
339 Following cgroups' swapiness can't be changed. 343 Following cgroups' swappiness can't be changed.
340 - root cgroup (uses /proc/sys/vm/swappiness). 344 - root cgroup (uses /proc/sys/vm/swappiness).
341 - a cgroup which uses hierarchy and it has child cgroup. 345 - a cgroup which uses hierarchy and it has child cgroup.
342 - a cgroup which uses hierarchy and not the root of hierarchy. 346 - a cgroup which uses hierarchy and not the root of hierarchy.
@@ -377,7 +381,8 @@ The feature can be disabled by
377NOTE1: Enabling/disabling will fail if the cgroup already has other 381NOTE1: Enabling/disabling will fail if the cgroup already has other
378cgroups created below it. 382cgroups created below it.
379 383
380NOTE2: This feature can be enabled/disabled per subtree. 384NOTE2: When panic_on_oom is set to "2", the whole system will panic in
385case of an oom event in any cgroup.
381 386
3827. Soft limits 3877. Soft limits
383 388
@@ -414,7 +419,76 @@ NOTE1: Soft limits take effect over a long period of time, since they involve
414NOTE2: It is recommended to set the soft limit always below the hard limit, 419NOTE2: It is recommended to set the soft limit always below the hard limit,
415 otherwise the hard limit will take precedence. 420 otherwise the hard limit will take precedence.
416 421
4178. TODO 4228. Move charges at task migration
423
424Users can move charges associated with a task along with task migration, that
425is, uncharge task's pages from the old cgroup and charge them to the new cgroup.
426This feature is not supported in !CONFIG_MMU environments because of lack of
427page tables.
428
4298.1 Interface
430
431This feature is disabled by default. It can be enabled(and disabled again) by
432writing to memory.move_charge_at_immigrate of the destination cgroup.
433
434If you want to enable it:
435
436# echo (some positive value) > memory.move_charge_at_immigrate
437
438Note: Each bits of move_charge_at_immigrate has its own meaning about what type
439 of charges should be moved. See 8.2 for details.
440Note: Charges are moved only when you move mm->owner, IOW, a leader of a thread
441 group.
442Note: If we cannot find enough space for the task in the destination cgroup, we
443 try to make space by reclaiming memory. Task migration may fail if we
444 cannot make enough space.
445Note: It can take several seconds if you move charges in giga bytes order.
446
447And if you want disable it again:
448
449# echo 0 > memory.move_charge_at_immigrate
450
4518.2 Type of charges which can be move
452
453Each bits of move_charge_at_immigrate has its own meaning about what type of
454charges should be moved.
455
456 bit | what type of charges would be moved ?
457 -----+------------------------------------------------------------------------
458 0 | A charge of an anonymous page(or swap of it) used by the target task.
459 | Those pages and swaps must be used only by the target task. You must
460 | enable Swap Extension(see 2.4) to enable move of swap charges.
461
462Note: Those pages and swaps must be charged to the old cgroup.
463Note: More type of pages(e.g. file cache, shmem,) will be supported by other
464 bits in future.
465
4668.3 TODO
467
468- Add support for other types of pages(e.g. file cache, shmem, etc.).
469- Implement madvise(2) to let users decide the vma to be moved or not to be
470 moved.
471- All of moving charge operations are done under cgroup_mutex. It's not good
472 behavior to hold the mutex too long, so we may need some trick.
473
4749. Memory thresholds
475
476Memory controler implements memory thresholds using cgroups notification
477API (see cgroups.txt). It allows to register multiple memory and memsw
478thresholds and gets notifications when it crosses.
479
480To register a threshold application need:
481 - create an eventfd using eventfd(2);
482 - open memory.usage_in_bytes or memory.memsw.usage_in_bytes;
483 - write string like "<event_fd> <memory.usage_in_bytes> <threshold>" to
484 cgroup.event_control.
485
486Application will be notified through eventfd when memory usage crosses
487threshold in any direction.
488
489It's applicable for root and non-root cgroup.
490
49110. TODO
418 492
4191. Add support for accounting huge pages (as a separate controller) 4931. Add support for accounting huge pages (as a separate controller)
4202. Make per-cgroup scanner reclaim not-shared pages first 4942. Make per-cgroup scanner reclaim not-shared pages first
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt
new file mode 100644
index 000000000000..8117e5bf6065
--- /dev/null
+++ b/Documentation/circular-buffers.txt
@@ -0,0 +1,234 @@
1 ================
2 CIRCULAR BUFFERS
3 ================
4
5By: David Howells <dhowells@redhat.com>
6 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7
8
9Linux provides a number of features that can be used to implement circular
10buffering. There are two sets of such features:
11
12 (1) Convenience functions for determining information about power-of-2 sized
13 buffers.
14
15 (2) Memory barriers for when the producer and the consumer of objects in the
16 buffer don't want to share a lock.
17
18To use these facilities, as discussed below, there needs to be just one
19producer and just one consumer. It is possible to handle multiple producers by
20serialising them, and to handle multiple consumers by serialising them.
21
22
23Contents:
24
25 (*) What is a circular buffer?
26
27 (*) Measuring power-of-2 buffers.
28
29 (*) Using memory barriers with circular buffers.
30 - The producer.
31 - The consumer.
32
33
34==========================
35WHAT IS A CIRCULAR BUFFER?
36==========================
37
38First of all, what is a circular buffer? A circular buffer is a buffer of
39fixed, finite size into which there are two indices:
40
41 (1) A 'head' index - the point at which the producer inserts items into the
42 buffer.
43
44 (2) A 'tail' index - the point at which the consumer finds the next item in
45 the buffer.
46
47Typically when the tail pointer is equal to the head pointer, the buffer is
48empty; and the buffer is full when the head pointer is one less than the tail
49pointer.
50
51The head index is incremented when items are added, and the tail index when
52items are removed. The tail index should never jump the head index, and both
53indices should be wrapped to 0 when they reach the end of the buffer, thus
54allowing an infinite amount of data to flow through the buffer.
55
56Typically, items will all be of the same unit size, but this isn't strictly
57required to use the techniques below. The indices can be increased by more
58than 1 if multiple items or variable-sized items are to be included in the
59buffer, provided that neither index overtakes the other. The implementer must
60be careful, however, as a region more than one unit in size may wrap the end of
61the buffer and be broken into two segments.
62
63
64============================
65MEASURING POWER-OF-2 BUFFERS
66============================
67
68Calculation of the occupancy or the remaining capacity of an arbitrarily sized
69circular buffer would normally be a slow operation, requiring the use of a
70modulus (divide) instruction. However, if the buffer is of a power-of-2 size,
71then a much quicker bitwise-AND instruction can be used instead.
72
73Linux provides a set of macros for handling power-of-2 circular buffers. These
74can be made use of by:
75
76 #include <linux/circ_buf.h>
77
78The macros are:
79
80 (*) Measure the remaining capacity of a buffer:
81
82 CIRC_SPACE(head_index, tail_index, buffer_size);
83
84 This returns the amount of space left in the buffer[1] into which items
85 can be inserted.
86
87
88 (*) Measure the maximum consecutive immediate space in a buffer:
89
90 CIRC_SPACE_TO_END(head_index, tail_index, buffer_size);
91
92 This returns the amount of consecutive space left in the buffer[1] into
93 which items can be immediately inserted without having to wrap back to the
94 beginning of the buffer.
95
96
97 (*) Measure the occupancy of a buffer:
98
99 CIRC_CNT(head_index, tail_index, buffer_size);
100
101 This returns the number of items currently occupying a buffer[2].
102
103
104 (*) Measure the non-wrapping occupancy of a buffer:
105
106 CIRC_CNT_TO_END(head_index, tail_index, buffer_size);
107
108 This returns the number of consecutive items[2] that can be extracted from
109 the buffer without having to wrap back to the beginning of the buffer.
110
111
112Each of these macros will nominally return a value between 0 and buffer_size-1,
113however:
114
115 [1] CIRC_SPACE*() are intended to be used in the producer. To the producer
116 they will return a lower bound as the producer controls the head index,
117 but the consumer may still be depleting the buffer on another CPU and
118 moving the tail index.
119
120 To the consumer it will show an upper bound as the producer may be busy
121 depleting the space.
122
123 [2] CIRC_CNT*() are intended to be used in the consumer. To the consumer they
124 will return a lower bound as the consumer controls the tail index, but the
125 producer may still be filling the buffer on another CPU and moving the
126 head index.
127
128 To the producer it will show an upper bound as the consumer may be busy
129 emptying the buffer.
130
131 [3] To a third party, the order in which the writes to the indices by the
132 producer and consumer become visible cannot be guaranteed as they are
133 independent and may be made on different CPUs - so the result in such a
134 situation will merely be a guess, and may even be negative.
135
136
137===========================================
138USING MEMORY BARRIERS WITH CIRCULAR BUFFERS
139===========================================
140
141By using memory barriers in conjunction with circular buffers, you can avoid
142the need to:
143
144 (1) use a single lock to govern access to both ends of the buffer, thus
145 allowing the buffer to be filled and emptied at the same time; and
146
147 (2) use atomic counter operations.
148
149There are two sides to this: the producer that fills the buffer, and the
150consumer that empties it. Only one thing should be filling a buffer at any one
151time, and only one thing should be emptying a buffer at any one time, but the
152two sides can operate simultaneously.
153
154
155THE PRODUCER
156------------
157
158The producer will look something like this:
159
160 spin_lock(&producer_lock);
161
162 unsigned long head = buffer->head;
163 unsigned long tail = ACCESS_ONCE(buffer->tail);
164
165 if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
166 /* insert one item into the buffer */
167 struct item *item = buffer[head];
168
169 produce_item(item);
170
171 smp_wmb(); /* commit the item before incrementing the head */
172
173 buffer->head = (head + 1) & (buffer->size - 1);
174
175 /* wake_up() will make sure that the head is committed before
176 * waking anyone up */
177 wake_up(consumer);
178 }
179
180 spin_unlock(&producer_lock);
181
182This will instruct the CPU that the contents of the new item must be written
183before the head index makes it available to the consumer and then instructs the
184CPU that the revised head index must be written before the consumer is woken.
185
186Note that wake_up() doesn't have to be the exact mechanism used, but whatever
187is used must guarantee a (write) memory barrier between the update of the head
188index and the change of state of the consumer, if a change of state occurs.
189
190
191THE CONSUMER
192------------
193
194The consumer will look something like this:
195
196 spin_lock(&consumer_lock);
197
198 unsigned long head = ACCESS_ONCE(buffer->head);
199 unsigned long tail = buffer->tail;
200
201 if (CIRC_CNT(head, tail, buffer->size) >= 1) {
202 /* read index before reading contents at that index */
203 smp_read_barrier_depends();
204
205 /* extract one item from the buffer */
206 struct item *item = buffer[tail];
207
208 consume_item(item);
209
210 smp_mb(); /* finish reading descriptor before incrementing tail */
211
212 buffer->tail = (tail + 1) & (buffer->size - 1);
213 }
214
215 spin_unlock(&consumer_lock);
216
217This will instruct the CPU to make sure the index is up to date before reading
218the new item, and then it shall make sure the CPU has finished reading the item
219before it writes the new tail pointer, which will erase the item.
220
221
222Note the use of ACCESS_ONCE() in both algorithms to read the opposition index.
223This prevents the compiler from discarding and reloading its cached value -
224which some compilers will do across smp_read_barrier_depends(). This isn't
225strictly needed if you can be sure that the opposition index will _only_ be
226used the once.
227
228
229===============
230FURTHER READING
231===============
232
233See also Documentation/memory-barriers.txt for a description of Linux's memory
234barrier facilities.
diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c
index b07add3467f1..7764594778d4 100644
--- a/Documentation/connector/cn_test.c
+++ b/Documentation/connector/cn_test.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/slab.h>
28#include <linux/timer.h> 29#include <linux/timer.h>
29 30
30#include <linux/connector.h> 31#include <linux/connector.h>
diff --git a/Documentation/console/console.txt b/Documentation/console/console.txt
index 877a1b26cc3d..926cf1b5e63e 100644
--- a/Documentation/console/console.txt
+++ b/Documentation/console/console.txt
@@ -74,7 +74,7 @@ driver takes over the consoles vacated by the driver. Binding, on the other
74hand, will bind the driver to the consoles that are currently occupied by a 74hand, will bind the driver to the consoles that are currently occupied by a
75system driver. 75system driver.
76 76
77NOTE1: Binding and binding must be selected in Kconfig. It's under: 77NOTE1: Binding and unbinding must be selected in Kconfig. It's under:
78 78
79Device Drivers -> Character devices -> Support for binding and unbinding 79Device Drivers -> Character devices -> Support for binding and unbinding
80console drivers 80console drivers
diff --git a/Documentation/driver-model/platform.txt b/Documentation/driver-model/platform.txt
index 2e2c2ea90ceb..41f41632ee55 100644
--- a/Documentation/driver-model/platform.txt
+++ b/Documentation/driver-model/platform.txt
@@ -192,7 +192,7 @@ command line. This will execute all matching early_param() callbacks.
192User specified early platform devices will be registered at this point. 192User specified early platform devices will be registered at this point.
193For the early serial console case the user can specify port on the 193For the early serial console case the user can specify port on the
194kernel command line as "earlyprintk=serial.0" where "earlyprintk" is 194kernel command line as "earlyprintk=serial.0" where "earlyprintk" is
195the class string, "serial" is the name of the platfrom driver and 195the class string, "serial" is the name of the platform driver and
1960 is the platform device id. If the id is -1 then the dot and the 1960 is the platform device id. If the id is -1 then the dot and the
197id can be omitted. 197id can be omitted.
198 198
diff --git a/Documentation/eisa.txt b/Documentation/eisa.txt
index 60e361ba08c0..f297fc1202ae 100644
--- a/Documentation/eisa.txt
+++ b/Documentation/eisa.txt
@@ -171,7 +171,7 @@ device.
171virtual_root.force_probe : 171virtual_root.force_probe :
172 172
173Force the probing code to probe EISA slots even when it cannot find an 173Force the probing code to probe EISA slots even when it cannot find an
174EISA compliant mainboard (nothing appears on slot 0). Defaultd to 0 174EISA compliant mainboard (nothing appears on slot 0). Defaults to 0
175(don't force), and set to 1 (force probing) when either 175(don't force), and set to 1 (force probing) when either
176CONFIG_ALPHA_JENSEN or CONFIG_EISA_VLB_PRIMING are set. 176CONFIG_ALPHA_JENSEN or CONFIG_EISA_VLB_PRIMING are set.
177 177
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index a618efab7b15..945ff3fda433 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -216,26 +216,14 @@ Works. Use "Insert file..." or external editor.
216~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 216~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
217Gmail (Web GUI) 217Gmail (Web GUI)
218 218
219If you just have to use Gmail to send patches, it CAN be made to work. It 219Does not work for sending patches.
220requires a bit of external help, though. 220
221 221Gmail web client converts tabs to spaces automatically.
222The first problem is that Gmail converts tabs to spaces. This will 222
223totally break your patches. To prevent this, you have to use a different 223At the same time it wraps lines every 78 chars with CRLF style line breaks
224editor. There is a firefox extension called "ViewSourceWith" 224although tab2space problem can be solved with external editor.
225(https://addons.mozilla.org/en-US/firefox/addon/394) which allows you to 225
226edit any text box in the editor of your choice. Configure it to launch 226Another problem is that Gmail will base64-encode any message that has a
227your favorite editor. When you want to send a patch, use this technique. 227non-ASCII character. That includes things like European names.
228Once you have crafted your messsage + patch, save and exit the editor,
229which should reload the Gmail edit box. GMAIL WILL PRESERVE THE TABS.
230Hoorah. Apparently you can cut-n-paste literal tabs, but Gmail will
231convert those to spaces upon sending!
232
233The second problem is that Gmail converts tabs to spaces on replies. If
234you reply to a patch, don't expect to be able to apply it as a patch.
235
236The last problem is that Gmail will base64-encode any message that has a
237non-ASCII character. That includes things like European names. Be aware.
238
239Gmail is not convenient for lkml patches, but CAN be made to work.
240 228
241 ### 229 ###
diff --git a/Documentation/fb/imacfb.txt b/Documentation/fb/efifb.txt
index 316ec9bb7deb..a59916c29b33 100644
--- a/Documentation/fb/imacfb.txt
+++ b/Documentation/fb/efifb.txt
@@ -1,9 +1,9 @@
1 1
2What is imacfb? 2What is efifb?
3=============== 3===============
4 4
5This is a generic EFI platform driver for Intel based Apple computers. 5This is a generic EFI platform driver for Intel based Apple computers.
6Imacfb is only for EFI booted Intel Macs. 6efifb is only for EFI booted Intel Macs.
7 7
8Supported Hardware 8Supported Hardware
9================== 9==================
@@ -16,16 +16,16 @@ MacMini
16How to use it? 16How to use it?
17============== 17==============
18 18
19Imacfb does not have any kind of autodetection of your machine. 19efifb does not have any kind of autodetection of your machine.
20You have to add the following kernel parameters in your elilo.conf: 20You have to add the following kernel parameters in your elilo.conf:
21 Macbook : 21 Macbook :
22 video=imacfb:macbook 22 video=efifb:macbook
23 MacMini : 23 MacMini :
24 video=imacfb:mini 24 video=efifb:mini
25 Macbook Pro 15", iMac 17" : 25 Macbook Pro 15", iMac 17" :
26 video=imacfb:i17 26 video=efifb:i17
27 Macbook Pro 17", iMac 20" : 27 Macbook Pro 17", iMac 20" :
28 video=imacfb:i20 28 video=efifb:i20
29 29
30-- 30--
31Edgar Hucek <gimli@dark-green.com> 31Edgar Hucek <gimli@dark-green.com>
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index a5cc0db63d7a..05df0b7514b6 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -582,3 +582,33 @@ Why: The paravirt mmu host support is slower than non-paravirt mmu, both
582Who: Avi Kivity <avi@redhat.com> 582Who: Avi Kivity <avi@redhat.com>
583 583
584---------------------------- 584----------------------------
585
586What: "acpi=ht" boot option
587When: 2.6.35
588Why: Useful in 2003, implementation is a hack.
589 Generally invoked by accident today.
590 Seen as doing more harm than good.
591Who: Len Brown <len.brown@intel.com>
592
593----------------------------
594
595What: video4linux /dev/vtx teletext API support
596When: 2.6.35
597Files: drivers/media/video/saa5246a.c drivers/media/video/saa5249.c
598 include/linux/videotext.h
599Why: The vtx device nodes have been superseded by vbi device nodes
600 for many years. No applications exist that use the vtx support.
601 Of the two i2c drivers that actually support this API the saa5249
602 has been impossible to use for a year now and no known hardware
603 that supports this device exists. The saa5246a is theoretically
604 supported by the old mxb boards, but it never actually worked.
605
606 In summary: there is no hardware that can use this API and there
607 are no applications actually implementing this API.
608
609 The vtx support still reserves minors 192-223 and we would really
610 like to reuse those for upcoming new functionality. In the unlikely
611 event that new hardware appears that wants to use the functionality
612 provided by the vtx API, then that functionality should be build
613 around the sliced VBI API instead.
614Who: Hans Verkuil <hverkuil@xs4all.nl>
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 5139b8c9d5af..4303614b5add 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -16,6 +16,8 @@ befs.txt
16 - information about the BeOS filesystem for Linux. 16 - information about the BeOS filesystem for Linux.
17bfs.txt 17bfs.txt
18 - info for the SCO UnixWare Boot Filesystem (BFS). 18 - info for the SCO UnixWare Boot Filesystem (BFS).
19ceph.txt
20 - info for the Ceph Distributed File System
19cifs.txt 21cifs.txt
20 - description of the CIFS filesystem. 22 - description of the CIFS filesystem.
21coda.txt 23coda.txt
@@ -32,6 +34,8 @@ dlmfs.txt
32 - info on the userspace interface to the OCFS2 DLM. 34 - info on the userspace interface to the OCFS2 DLM.
33dnotify.txt 35dnotify.txt
34 - info about directory notification in Linux. 36 - info about directory notification in Linux.
37dnotify_test.c
38 - example program for dnotify
35ecryptfs.txt 39ecryptfs.txt
36 - docs on eCryptfs: stacked cryptographic filesystem for Linux. 40 - docs on eCryptfs: stacked cryptographic filesystem for Linux.
37exofs.txt 41exofs.txt
diff --git a/Documentation/filesystems/9p.txt b/Documentation/filesystems/9p.txt
index 57e0b80a5274..c0236e753bc8 100644
--- a/Documentation/filesystems/9p.txt
+++ b/Documentation/filesystems/9p.txt
@@ -37,6 +37,15 @@ For Plan 9 From User Space applications (http://swtch.com/plan9)
37 37
38 mount -t 9p `namespace`/acme /mnt/9 -o trans=unix,uname=$USER 38 mount -t 9p `namespace`/acme /mnt/9 -o trans=unix,uname=$USER
39 39
40For server running on QEMU host with virtio transport:
41
42 mount -t 9p -o trans=virtio <mount_tag> /mnt/9
43
44where mount_tag is the tag associated by the server to each of the exported
45mount points. Each 9P export is seen by the client as a virtio device with an
46associated "mount_tag" property. Available mount tags can be
47seen by reading /sys/bus/virtio/drivers/9pnet_virtio/virtio<n>/mount_tag files.
48
40OPTIONS 49OPTIONS
41======= 50=======
42 51
@@ -47,7 +56,7 @@ OPTIONS
47 fd - used passed file descriptors for connection 56 fd - used passed file descriptors for connection
48 (see rfdno and wfdno) 57 (see rfdno and wfdno)
49 virtio - connect to the next virtio channel available 58 virtio - connect to the next virtio channel available
50 (from lguest or KVM with trans_virtio module) 59 (from QEMU with trans_virtio module)
51 rdma - connect to a specified RDMA channel 60 rdma - connect to a specified RDMA channel
52 61
53 uname=name user name to attempt mount as on the remote server. The 62 uname=name user name to attempt mount as on the remote server. The
@@ -85,7 +94,12 @@ OPTIONS
85 94
86 port=n port to connect to on the remote server 95 port=n port to connect to on the remote server
87 96
88 noextend force legacy mode (no 9p2000.u semantics) 97 noextend force legacy mode (no 9p2000.u or 9p2000.L semantics)
98
99 version=name Select 9P protocol version. Valid options are:
100 9p2000 - Legacy mode (same as noextend)
101 9p2000.u - Use 9P2000.u protocol
102 9p2000.L - Use 9P2000.L protocol
89 103
90 dfltuid attempt to mount as a particular uid 104 dfltuid attempt to mount as a particular uid
91 105
diff --git a/Documentation/filesystems/Makefile b/Documentation/filesystems/Makefile
new file mode 100644
index 000000000000..a5dd114da14f
--- /dev/null
+++ b/Documentation/filesystems/Makefile
@@ -0,0 +1,8 @@
1# kbuild trick to avoid linker error. Can be omitted if a module is built.
2obj- := dummy.o
3
4# List of programs to build
5hostprogs-y := dnotify_test
6
7# Tell kbuild to always build the programs
8always := $(hostprogs-y)
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
new file mode 100644
index 000000000000..0660c9f5deef
--- /dev/null
+++ b/Documentation/filesystems/ceph.txt
@@ -0,0 +1,140 @@
1Ceph Distributed File System
2============================
3
4Ceph is a distributed network file system designed to provide good
5performance, reliability, and scalability.
6
7Basic features include:
8
9 * POSIX semantics
10 * Seamless scaling from 1 to many thousands of nodes
11 * High availability and reliability. No single point of failure.
12 * N-way replication of data across storage nodes
13 * Fast recovery from node failures
14 * Automatic rebalancing of data on node addition/removal
15 * Easy deployment: most FS components are userspace daemons
16
17Also,
18 * Flexible snapshots (on any directory)
19 * Recursive accounting (nested files, directories, bytes)
20
21In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely
22on symmetric access by all clients to shared block devices, Ceph
23separates data and metadata management into independent server
24clusters, similar to Lustre. Unlike Lustre, however, metadata and
25storage nodes run entirely as user space daemons. Storage nodes
26utilize btrfs to store data objects, leveraging its advanced features
27(checksumming, metadata replication, etc.). File data is striped
28across storage nodes in large chunks to distribute workload and
29facilitate high throughputs. When storage nodes fail, data is
30re-replicated in a distributed fashion by the storage nodes themselves
31(with some minimal coordination from a cluster monitor), making the
32system extremely efficient and scalable.
33
34Metadata servers effectively form a large, consistent, distributed
35in-memory cache above the file namespace that is extremely scalable,
36dynamically redistributes metadata in response to workload changes,
37and can tolerate arbitrary (well, non-Byzantine) node failures. The
38metadata server takes a somewhat unconventional approach to metadata
39storage to significantly improve performance for common workloads. In
40particular, inodes with only a single link are embedded in
41directories, allowing entire directories of dentries and inodes to be
42loaded into its cache with a single I/O operation. The contents of
43extremely large directories can be fragmented and managed by
44independent metadata servers, allowing scalable concurrent access.
45
46The system offers automatic data rebalancing/migration when scaling
47from a small cluster of just a few nodes to many hundreds, without
48requiring an administrator carve the data set into static volumes or
49go through the tedious process of migrating data between servers.
50When the file system approaches full, new nodes can be easily added
51and things will "just work."
52
53Ceph includes flexible snapshot mechanism that allows a user to create
54a snapshot on any subdirectory (and its nested contents) in the
55system. Snapshot creation and deletion are as simple as 'mkdir
56.snap/foo' and 'rmdir .snap/foo'.
57
58Ceph also provides some recursive accounting on directories for nested
59files and bytes. That is, a 'getfattr -d foo' on any directory in the
60system will reveal the total number of nested regular files and
61subdirectories, and a summation of all nested file sizes. This makes
62the identification of large disk space consumers relatively quick, as
63no 'du' or similar recursive scan of the file system is required.
64
65
66Mount Syntax
67============
68
69The basic mount syntax is:
70
71 # mount -t ceph monip[:port][,monip2[:port]...]:/[subdir] mnt
72
73You only need to specify a single monitor, as the client will get the
74full list when it connects. (However, if the monitor you specify
75happens to be down, the mount won't succeed.) The port can be left
76off if the monitor is using the default. So if the monitor is at
771.2.3.4,
78
79 # mount -t ceph 1.2.3.4:/ /mnt/ceph
80
81is sufficient. If /sbin/mount.ceph is installed, a hostname can be
82used instead of an IP address.
83
84
85
86Mount Options
87=============
88
89 ip=A.B.C.D[:N]
90 Specify the IP and/or port the client should bind to locally.
91 There is normally not much reason to do this. If the IP is not
92 specified, the client's IP address is determined by looking at the
93 address it's connection to the monitor originates from.
94
95 wsize=X
96 Specify the maximum write size in bytes. By default there is no
97 maximum. Ceph will normally size writes based on the file stripe
98 size.
99
100 rsize=X
101 Specify the maximum readahead.
102
103 mount_timeout=X
104 Specify the timeout value for mount (in seconds), in the case
105 of a non-responsive Ceph file system. The default is 30
106 seconds.
107
108 rbytes
109 When stat() is called on a directory, set st_size to 'rbytes',
110 the summation of file sizes over all files nested beneath that
111 directory. This is the default.
112
113 norbytes
114 When stat() is called on a directory, set st_size to the
115 number of entries in that directory.
116
117 nocrc
118 Disable CRC32C calculation for data writes. If set, the storage node
119 must rely on TCP's error correction to detect data corruption
120 in the data payload.
121
122 noasyncreaddir
123 Disable client's use its local cache to satisfy readdir
124 requests. (This does not change correctness; the client uses
125 cached metadata only when a lease or capability ensures it is
126 valid.)
127
128
129More Information
130================
131
132For more information on Ceph, see the home page at
133 http://ceph.newdream.net/
134
135The Linux kernel client source tree is available at
136 git://ceph.newdream.net/git/ceph-client.git
137 git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
138
139and the source for the full system is at
140 git://ceph.newdream.net/git/ceph.git
diff --git a/Documentation/filesystems/dnotify.txt b/Documentation/filesystems/dnotify.txt
index 9f5d338ddbb8..6baf88f46859 100644
--- a/Documentation/filesystems/dnotify.txt
+++ b/Documentation/filesystems/dnotify.txt
@@ -62,38 +62,9 @@ disabled, fcntl(fd, F_NOTIFY, ...) will return -EINVAL.
62 62
63Example 63Example
64------- 64-------
65See Documentation/filesystems/dnotify_test.c for an example.
65 66
66 #define _GNU_SOURCE /* needed to get the defines */ 67NOTE
67 #include <fcntl.h> /* in glibc 2.2 this has the needed 68----
68 values defined */ 69Beginning with Linux 2.6.13, dnotify has been replaced by inotify.
69 #include <signal.h> 70See Documentation/filesystems/inotify.txt for more information on it.
70 #include <stdio.h>
71 #include <unistd.h>
72
73 static volatile int event_fd;
74
75 static void handler(int sig, siginfo_t *si, void *data)
76 {
77 event_fd = si->si_fd;
78 }
79
80 int main(void)
81 {
82 struct sigaction act;
83 int fd;
84
85 act.sa_sigaction = handler;
86 sigemptyset(&act.sa_mask);
87 act.sa_flags = SA_SIGINFO;
88 sigaction(SIGRTMIN + 1, &act, NULL);
89
90 fd = open(".", O_RDONLY);
91 fcntl(fd, F_SETSIG, SIGRTMIN + 1);
92 fcntl(fd, F_NOTIFY, DN_MODIFY|DN_CREATE|DN_MULTISHOT);
93 /* we will now be notified if any of the files
94 in "." is modified or new files are created */
95 while (1) {
96 pause();
97 printf("Got event on fd=%d\n", event_fd);
98 }
99 }
diff --git a/Documentation/filesystems/dnotify_test.c b/Documentation/filesystems/dnotify_test.c
new file mode 100644
index 000000000000..8b37b4a1e18d
--- /dev/null
+++ b/Documentation/filesystems/dnotify_test.c
@@ -0,0 +1,34 @@
1#define _GNU_SOURCE /* needed to get the defines */
2#include <fcntl.h> /* in glibc 2.2 this has the needed
3 values defined */
4#include <signal.h>
5#include <stdio.h>
6#include <unistd.h>
7
8static volatile int event_fd;
9
10static void handler(int sig, siginfo_t *si, void *data)
11{
12 event_fd = si->si_fd;
13}
14
15int main(void)
16{
17 struct sigaction act;
18 int fd;
19
20 act.sa_sigaction = handler;
21 sigemptyset(&act.sa_mask);
22 act.sa_flags = SA_SIGINFO;
23 sigaction(SIGRTMIN + 1, &act, NULL);
24
25 fd = open(".", O_RDONLY);
26 fcntl(fd, F_SETSIG, SIGRTMIN + 1);
27 fcntl(fd, F_NOTIFY, DN_MODIFY|DN_CREATE|DN_MULTISHOT);
28 /* we will now be notified if any of the files
29 in "." is modified or new files are created */
30 while (1) {
31 pause();
32 printf("Got event on fd=%d\n", event_fd);
33 }
34}
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 96a44dd95e03..1e359b62c40a 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -195,7 +195,7 @@ asynchronous manner and the vaule may not be very precise. To see a precise
195snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table. 195snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
196It's slow but very precise. 196It's slow but very precise.
197 197
198Table 1-2: Contents of the statm files (as of 2.6.30-rc7) 198Table 1-2: Contents of the status files (as of 2.6.30-rc7)
199.............................................................................. 199..............................................................................
200 Field Content 200 Field Content
201 Name filename of the executable 201 Name filename of the executable
@@ -316,7 +316,7 @@ address perms offset dev inode pathname
31608049000-0804a000 rw-p 00001000 03:00 8312 /opt/test 31608049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
3170804a000-0806b000 rw-p 00000000 00:00 0 [heap] 3170804a000-0806b000 rw-p 00000000 00:00 0 [heap]
318a7cb1000-a7cb2000 ---p 00000000 00:00 0 318a7cb1000-a7cb2000 ---p 00000000 00:00 0
319a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4] 319a7cb2000-a7eb2000 rw-p 00000000 00:00 0
320a7eb2000-a7eb3000 ---p 00000000 00:00 0 320a7eb2000-a7eb3000 ---p 00000000 00:00 0
321a7eb3000-a7ed5000 rw-p 00000000 00:00 0 321a7eb3000-a7ed5000 rw-p 00000000 00:00 0
322a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 322a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
@@ -352,7 +352,6 @@ is not associated with a file:
352 [stack] = the stack of the main process 352 [stack] = the stack of the main process
353 [vdso] = the "virtual dynamic shared object", 353 [vdso] = the "virtual dynamic shared object",
354 the kernel system call handler 354 the kernel system call handler
355 [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size
356 355
357 or if empty, the mapping is anonymous. 356 or if empty, the mapping is anonymous.
358 357
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index 3015da0c6b2a..fe09a2cb1858 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA memory allocation policy for
82all files in that instance (if CONFIG_NUMA is enabled) - which can be 82all files in that instance (if CONFIG_NUMA is enabled) - which can be
83adjusted on the fly via 'mount -o remount ...' 83adjusted on the fly via 'mount -o remount ...'
84 84
85mpol=default prefers to allocate memory from the local node 85mpol=default use the process allocation policy
86 (see set_mempolicy(2))
86mpol=prefer:Node prefers to allocate memory from the given Node 87mpol=prefer:Node prefers to allocate memory from the given Node
87mpol=bind:NodeList allocates memory only from nodes in NodeList 88mpol=bind:NodeList allocates memory only from nodes in NodeList
88mpol=interleave prefers to allocate from each node in turn 89mpol=interleave prefers to allocate from each node in turn
89mpol=interleave:NodeList allocates from each node of NodeList in turn 90mpol=interleave:NodeList allocates from each node of NodeList in turn
91mpol=local prefers to allocate memory from the local node
90 92
91NodeList format is a comma-separated list of decimal numbers and ranges, 93NodeList format is a comma-separated list of decimal numbers and ranges,
92a range being two hyphen-separated decimal numbers, the smallest and 94a range being two hyphen-separated decimal numbers, the smallest and
@@ -134,3 +136,5 @@ Author:
134 Christoph Rohland <cr@sap.com>, 1.12.01 136 Christoph Rohland <cr@sap.com>, 1.12.01
135Updated: 137Updated:
136 Hugh Dickins, 4 June 2007 138 Hugh Dickins, 4 June 2007
139Updated:
140 KOSAKI Motohiro, 16 Mar 2010
diff --git a/Documentation/hwmon/abituguru b/Documentation/hwmon/abituguru
index 87ffa0f5ec70..5eb3b9d5f0d5 100644
--- a/Documentation/hwmon/abituguru
+++ b/Documentation/hwmon/abituguru
@@ -30,7 +30,7 @@ Supported chips:
30 bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1 30 bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
31 You may also need to specify the fan_sensors option for these boards 31 You may also need to specify the fan_sensors option for these boards
32 fan_sensors=5 32 fan_sensors=5
33 2) There is a seperate abituguru3 driver for these motherboards, 33 2) There is a separate abituguru3 driver for these motherboards,
34 the abituguru (without the 3 !) driver will not work on these 34 the abituguru (without the 3 !) driver will not work on these
35 motherboards (and visa versa)! 35 motherboards (and visa versa)!
36 36
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 3219ee0dbfef..5ebf5af1d716 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -74,6 +74,11 @@ structure at all. You should use this to keep device-specific data.
74 /* retrieve the value */ 74 /* retrieve the value */
75 void *i2c_get_clientdata(const struct i2c_client *client); 75 void *i2c_get_clientdata(const struct i2c_client *client);
76 76
77Note that starting with kernel 2.6.34, you don't have to set the `data' field
78to NULL in remove() or if probe() failed anymore. The i2c-core does this
79automatically on these occasions. Those are also the only times the core will
80touch this field.
81
77 82
78Accessing the client 83Accessing the client
79==================== 84====================
diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
index a10c3b6ba7c4..56941ae1f5db 100644
--- a/Documentation/input/elantech.txt
+++ b/Documentation/input/elantech.txt
@@ -333,14 +333,14 @@ byte 0:
333byte 1: 333byte 1:
334 334
335 bit 7 6 5 4 3 2 1 0 335 bit 7 6 5 4 3 2 1 0
336 x15 x14 x13 x12 x11 x10 x9 x8 336 . . . . . x10 x9 x8
337 337
338byte 2: 338byte 2:
339 339
340 bit 7 6 5 4 3 2 1 0 340 bit 7 6 5 4 3 2 1 0
341 x7 x6 x5 x4 x4 x2 x1 x0 341 x7 x6 x5 x4 x4 x2 x1 x0
342 342
343 x15..x0 = absolute x value (horizontal) 343 x10..x0 = absolute x value (horizontal)
344 344
345byte 3: 345byte 3:
346 346
@@ -350,14 +350,14 @@ byte 3:
350byte 4: 350byte 4:
351 351
352 bit 7 6 5 4 3 2 1 0 352 bit 7 6 5 4 3 2 1 0
353 y15 y14 y13 y12 y11 y10 y8 y8 353 . . . . . . y9 y8
354 354
355byte 5: 355byte 5:
356 356
357 bit 7 6 5 4 3 2 1 0 357 bit 7 6 5 4 3 2 1 0
358 y7 y6 y5 y4 y3 y2 y1 y0 358 y7 y6 y5 y4 y3 y2 y1 y0
359 359
360 y15..y0 = absolute y value (vertical) 360 y9..y0 = absolute y value (vertical)
361 361
362 362
3634.2.2 Two finger touch 3634.2.2 Two finger touch
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index 8490480ce432..c0fc1c75fd88 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -68,6 +68,22 @@ like:
68 SYN_MT_REPORT 68 SYN_MT_REPORT
69 SYN_REPORT 69 SYN_REPORT
70 70
71Here is the sequence after lifting one of the fingers:
72
73 ABS_MT_POSITION_X
74 ABS_MT_POSITION_Y
75 SYN_MT_REPORT
76 SYN_REPORT
77
78And here is the sequence after lifting the remaining finger:
79
80 SYN_MT_REPORT
81 SYN_REPORT
82
83If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the
84ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the
85last SYN_REPORT will be dropped by the input core, resulting in no
86zero-finger event reaching userland.
71 87
72Event Semantics 88Event Semantics
73--------------- 89---------------
@@ -217,11 +233,6 @@ where examples can be found.
217difference between the contact position and the approaching tool position 233difference between the contact position and the approaching tool position
218could be used to derive tilt. 234could be used to derive tilt.
219[2] The list can of course be extended. 235[2] The list can of course be extended.
220[3] The multi-touch X driver is currently in the prototyping stage. At the 236[3] Multitouch X driver project: http://bitmath.org/code/multitouch/.
221time of writing (April 2009), the MT protocol is not yet merged, and the
222prototype implements finger matching, basic mouse support and two-finger
223scrolling. The project aims at improving the quality of current multi-touch
224functionality available in the Synaptics X driver, and in addition
225implement more advanced gestures.
226[4] See the section on event computation. 237[4] See the section on event computation.
227[5] See the section on finger tracking. 238[5] See the section on finger tracking.
diff --git a/Documentation/input/rotary-encoder.txt b/Documentation/input/rotary-encoder.txt
index 3a6aec40c0b0..8b4129de1d2d 100644
--- a/Documentation/input/rotary-encoder.txt
+++ b/Documentation/input/rotary-encoder.txt
@@ -75,7 +75,7 @@ and the number of steps or will clamp at the maximum and zero depending on
75the configuration. 75the configuration.
76 76
77Because GPIO to IRQ mapping is platform specific, this information must 77Because GPIO to IRQ mapping is platform specific, this information must
78be given in seperately to the driver. See the example below. 78be given in separately to the driver. See the example below.
79 79
80---------<snip>--------- 80---------<snip>---------
81 81
diff --git a/Documentation/intel_txt.txt b/Documentation/intel_txt.txt
index f40a1f030019..87c8990dbbd9 100644
--- a/Documentation/intel_txt.txt
+++ b/Documentation/intel_txt.txt
@@ -161,13 +161,15 @@ o In order to put a system into any of the sleep states after a TXT
161 has been restored, it will restore the TPM PCRs and then 161 has been restored, it will restore the TPM PCRs and then
162 transfer control back to the kernel's S3 resume vector. 162 transfer control back to the kernel's S3 resume vector.
163 In order to preserve system integrity across S3, the kernel 163 In order to preserve system integrity across S3, the kernel
164 provides tboot with a set of memory ranges (kernel 164 provides tboot with a set of memory ranges (RAM and RESERVED_KERN
165 code/data/bss, S3 resume code, and AP trampoline) that tboot 165 in the e820 table, but not any memory that BIOS might alter over
166 will calculate a MAC (message authentication code) over and then 166 the S3 transition) that tboot will calculate a MAC (message
167 seal with the TPM. On resume and once the measured environment 167 authentication code) over and then seal with the TPM. On resume
168 has been re-established, tboot will re-calculate the MAC and 168 and once the measured environment has been re-established, tboot
169 verify it against the sealed value. Tboot's policy determines 169 will re-calculate the MAC and verify it against the sealed value.
170 what happens if the verification fails. 170 Tboot's policy determines what happens if the verification fails.
171 Note that the c/s 194 of tboot which has the new MAC code supports
172 this.
171 173
172That's pretty much it for TXT support. 174That's pretty much it for TXT support.
173 175
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 35c9b51d20ea..dd5806f4fcc4 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -291,6 +291,7 @@ Code Seq#(hex) Include File Comments
2910x92 00-0F drivers/usb/mon/mon_bin.c 2910x92 00-0F drivers/usb/mon/mon_bin.c
2920x93 60-7F linux/auto_fs.h 2920x93 60-7F linux/auto_fs.h
2930x94 all fs/btrfs/ioctl.h 2930x94 all fs/btrfs/ioctl.h
2940x97 00-7F fs/ceph/ioctl.h Ceph file system
2940x99 00-0F 537-Addinboard driver 2950x99 00-0F 537-Addinboard driver
295 <mailto:buk@buks.ipn.de> 296 <mailto:buk@buks.ipn.de>
2960xA0 all linux/sdp/sdp.h Industrial Device Project 2970xA0 all linux/sdp/sdp.h Industrial Device Project
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 3bc48b0bd3a9..567b7a8eb878 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -200,10 +200,6 @@ and is between 256 and 4096 characters. It is defined in the file
200 acpi_display_output=video 200 acpi_display_output=video
201 See above. 201 See above.
202 202
203 acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods
204 early. Needed on some platforms to properly
205 initialize the EC.
206
207 acpi_irq_balance [HW,ACPI] 203 acpi_irq_balance [HW,ACPI]
208 ACPI will balance active IRQs 204 ACPI will balance active IRQs
209 default in APIC mode 205 default in APIC mode
@@ -324,15 +320,12 @@ and is between 256 and 4096 characters. It is defined in the file
324 amd_iommu= [HW,X86-84] 320 amd_iommu= [HW,X86-84]
325 Pass parameters to the AMD IOMMU driver in the system. 321 Pass parameters to the AMD IOMMU driver in the system.
326 Possible values are: 322 Possible values are:
327 isolate - enable device isolation (each device, as far
328 as possible, will get its own protection
329 domain) [default]
330 share - put every device behind one IOMMU into the
331 same protection domain
332 fullflush - enable flushing of IO/TLB entries when 323 fullflush - enable flushing of IO/TLB entries when
333 they are unmapped. Otherwise they are 324 they are unmapped. Otherwise they are
334 flushed before they will be reused, which 325 flushed before they will be reused, which
335 is a lot of faster 326 is a lot of faster
327 off - do not initialize any AMD IOMMU found in
328 the system
336 329
337 amijoy.map= [HW,JOY] Amiga joystick support 330 amijoy.map= [HW,JOY] Amiga joystick support
338 Map of devices attached to JOY0DAT and JOY1DAT 331 Map of devices attached to JOY0DAT and JOY1DAT
@@ -793,8 +786,12 @@ and is between 256 and 4096 characters. It is defined in the file
793 as early as possible in order to facilitate early 786 as early as possible in order to facilitate early
794 boot debugging. 787 boot debugging.
795 788
796 ftrace_dump_on_oops 789 ftrace_dump_on_oops[=orig_cpu]
797 [FTRACE] will dump the trace buffers on oops. 790 [FTRACE] will dump the trace buffers on oops.
791 If no parameter is passed, ftrace will dump
792 buffers of all CPUs, but if you pass orig_cpu, it will
793 dump only the buffer of the CPU that triggered the
794 oops.
798 795
799 ftrace_filter=[function-list] 796 ftrace_filter=[function-list]
800 [FTRACE] Limit the functions traced by the function 797 [FTRACE] Limit the functions traced by the function
@@ -1203,7 +1200,7 @@ and is between 256 and 4096 characters. It is defined in the file
1203 1200
1204 libata.force= [LIBATA] Force configurations. The format is comma 1201 libata.force= [LIBATA] Force configurations. The format is comma
1205 separated list of "[ID:]VAL" where ID is 1202 separated list of "[ID:]VAL" where ID is
1206 PORT[:DEVICE]. PORT and DEVICE are decimal numbers 1203 PORT[.DEVICE]. PORT and DEVICE are decimal numbers
1207 matching port, link or device. Basically, it matches 1204 matching port, link or device. Basically, it matches
1208 the ATA ID string printed on console by libata. If 1205 the ATA ID string printed on console by libata. If
1209 the whole ID part is omitted, the last PORT and DEVICE 1206 the whole ID part is omitted, the last PORT and DEVICE
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index bdb13817e1e9..3ab2472509cb 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -59,37 +59,56 @@ nice to have in other objects. The C language does not allow for the
59direct expression of inheritance, so other techniques - such as structure 59direct expression of inheritance, so other techniques - such as structure
60embedding - must be used. 60embedding - must be used.
61 61
62So, for example, the UIO code has a structure that defines the memory 62(As an aside, for those familiar with the kernel linked list implementation,
63region associated with a uio device: 63this is analogous as to how "list_head" structs are rarely useful on
64their own, but are invariably found embedded in the larger objects of
65interest.)
64 66
65struct uio_mem { 67So, for example, the UIO code in drivers/uio/uio.c has a structure that
68defines the memory region associated with a uio device:
69
70 struct uio_map {
66 struct kobject kobj; 71 struct kobject kobj;
67 unsigned long addr; 72 struct uio_mem *mem;
68 unsigned long size; 73 };
69 int memtype;
70 void __iomem *internal_addr;
71};
72 74
73If you have a struct uio_mem structure, finding its embedded kobject is 75If you have a struct uio_map structure, finding its embedded kobject is
74just a matter of using the kobj member. Code that works with kobjects will 76just a matter of using the kobj member. Code that works with kobjects will
75often have the opposite problem, however: given a struct kobject pointer, 77often have the opposite problem, however: given a struct kobject pointer,
76what is the pointer to the containing structure? You must avoid tricks 78what is the pointer to the containing structure? You must avoid tricks
77(such as assuming that the kobject is at the beginning of the structure) 79(such as assuming that the kobject is at the beginning of the structure)
78and, instead, use the container_of() macro, found in <linux/kernel.h>: 80and, instead, use the container_of() macro, found in <linux/kernel.h>:
79 81
80 container_of(pointer, type, member) 82 container_of(pointer, type, member)
83
84where:
85
86 * "pointer" is the pointer to the embedded kobject,
87 * "type" is the type of the containing structure, and
88 * "member" is the name of the structure field to which "pointer" points.
89
90The return value from container_of() is a pointer to the corresponding
91container type. So, for example, a pointer "kp" to a struct kobject
92embedded *within* a struct uio_map could be converted to a pointer to the
93*containing* uio_map structure with:
94
95 struct uio_map *u_map = container_of(kp, struct uio_map, kobj);
96
97For convenience, programmers often define a simple macro for "back-casting"
98kobject pointers to the containing type. Exactly this happens in the
99earlier drivers/uio/uio.c, as you can see here:
100
101 struct uio_map {
102 struct kobject kobj;
103 struct uio_mem *mem;
104 };
81 105
82where pointer is the pointer to the embedded kobject, type is the type of 106 #define to_map(map) container_of(map, struct uio_map, kobj)
83the containing structure, and member is the name of the structure field to
84which pointer points. The return value from container_of() is a pointer to
85the given type. So, for example, a pointer "kp" to a struct kobject
86embedded within a struct uio_mem could be converted to a pointer to the
87containing uio_mem structure with:
88 107
89 struct uio_mem *u_mem = container_of(kp, struct uio_mem, kobj); 108where the macro argument "map" is a pointer to the struct kobject in
109question. That macro is subsequently invoked with:
90 110
91Programmers often define a simple macro for "back-casting" kobject pointers 111 struct uio_map *map = to_map(kobj);
92to the containing type.
93 112
94 113
95Initialization of kobjects 114Initialization of kobjects
@@ -387,4 +406,5 @@ called, and the objects in the former circle release each other.
387Example code to copy from 406Example code to copy from
388 407
389For a more complete example of using ksets and kobjects properly, see the 408For a more complete example of using ksets and kobjects properly, see the
390sample/kobject/kset-example.c code. 409example programs samples/kobject/{kobject-example.c,kset-example.c},
410which will be built as loadable modules if you select CONFIG_SAMPLE_KOBJECT.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 2f9115c0ae62..61c291cddf18 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -165,8 +165,8 @@ the user entry_handler invocation is also skipped.
165 165
1661.4 How Does Jump Optimization Work? 1661.4 How Does Jump Optimization Work?
167 167
168If you configured your kernel with CONFIG_OPTPROBES=y (currently 168If your kernel is built with CONFIG_OPTPROBES=y (currently this flag
169this option is supported on x86/x86-64, non-preemptive kernel) and 169is automatically set 'y' on x86/x86-64, non-preemptive kernel) and
170the "debug.kprobes_optimization" kernel parameter is set to 1 (see 170the "debug.kprobes_optimization" kernel parameter is set to 1 (see
171sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump 171sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
172instruction instead of a breakpoint instruction at each probepoint. 172instruction instead of a breakpoint instruction at each probepoint.
@@ -271,8 +271,6 @@ tweak the kernel's execution path, you need to suppress optimization,
271using one of the following techniques: 271using one of the following techniques:
272- Specify an empty function for the kprobe's post_handler or break_handler. 272- Specify an empty function for the kprobe's post_handler or break_handler.
273 or 273 or
274- Config CONFIG_OPTPROBES=n.
275 or
276- Execute 'sysctl -w debug.kprobes_optimization=n' 274- Execute 'sysctl -w debug.kprobes_optimization=n'
277 275
2782. Architectures Supported 2762. Architectures Supported
@@ -307,10 +305,6 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
307so you can use "objdump -d -l vmlinux" to see the source-to-object 305so you can use "objdump -d -l vmlinux" to see the source-to-object
308code mapping. 306code mapping.
309 307
310If you want to reduce probing overhead, set "Kprobes jump optimization
311support" (CONFIG_OPTPROBES) to "y". You can find this option under the
312"Kprobes" line.
313
3144. API Reference 3084. API Reference
315 309
316The Kprobes API includes a "register" function and an "unregister" 310The Kprobes API includes a "register" function and an "unregister"
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index ee5692b26dd4..fa688538e757 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -2,6 +2,12 @@
2 - This file 2 - This file
3acer-wmi.txt 3acer-wmi.txt
4 - information on the Acer Laptop WMI Extras driver. 4 - information on the Acer Laptop WMI Extras driver.
5asus-laptop.txt
6 - information on the Asus Laptop Extras driver.
7disk-shock-protection.txt
8 - information on hard disk shock protection.
9dslm.c
10 - Simple Disk Sleep Monitor program
5laptop-mode.txt 11laptop-mode.txt
6 - how to conserve battery power using laptop-mode. 12 - how to conserve battery power using laptop-mode.
7sony-laptop.txt 13sony-laptop.txt
diff --git a/Documentation/laptops/Makefile b/Documentation/laptops/Makefile
new file mode 100644
index 000000000000..5cb144af3c09
--- /dev/null
+++ b/Documentation/laptops/Makefile
@@ -0,0 +1,8 @@
1# kbuild trick to avoid linker error. Can be omitted if a module is built.
2obj- := dummy.o
3
4# List of programs to build
5hostprogs-y := dslm
6
7# Tell kbuild to always build the programs
8always := $(hostprogs-y)
diff --git a/Documentation/laptops/dslm.c b/Documentation/laptops/dslm.c
new file mode 100644
index 000000000000..72ff290c5fc6
--- /dev/null
+++ b/Documentation/laptops/dslm.c
@@ -0,0 +1,166 @@
1/*
2 * dslm.c
3 * Simple Disk Sleep Monitor
4 * by Bartek Kania
5 * Licenced under the GPL
6 */
7#include <unistd.h>
8#include <stdlib.h>
9#include <stdio.h>
10#include <fcntl.h>
11#include <errno.h>
12#include <time.h>
13#include <string.h>
14#include <signal.h>
15#include <sys/ioctl.h>
16#include <linux/hdreg.h>
17
18#ifdef DEBUG
19#define D(x) x
20#else
21#define D(x)
22#endif
23
24int endit = 0;
25
26/* Check if the disk is in powersave-mode
27 * Most of the code is stolen from hdparm.
28 * 1 = active, 0 = standby/sleep, -1 = unknown */
29static int check_powermode(int fd)
30{
31 unsigned char args[4] = {WIN_CHECKPOWERMODE1,0,0,0};
32 int state;
33
34 if (ioctl(fd, HDIO_DRIVE_CMD, &args)
35 && (args[0] = WIN_CHECKPOWERMODE2) /* try again with 0x98 */
36 && ioctl(fd, HDIO_DRIVE_CMD, &args)) {
37 if (errno != EIO || args[0] != 0 || args[1] != 0) {
38 state = -1; /* "unknown"; */
39 } else
40 state = 0; /* "sleeping"; */
41 } else {
42 state = (args[2] == 255) ? 1 : 0;
43 }
44 D(printf(" drive state is: %d\n", state));
45
46 return state;
47}
48
49static char *state_name(int i)
50{
51 if (i == -1) return "unknown";
52 if (i == 0) return "sleeping";
53 if (i == 1) return "active";
54
55 return "internal error";
56}
57
58static char *myctime(time_t time)
59{
60 char *ts = ctime(&time);
61 ts[strlen(ts) - 1] = 0;
62
63 return ts;
64}
65
66static void measure(int fd)
67{
68 time_t start_time;
69 int last_state;
70 time_t last_time;
71 int curr_state;
72 time_t curr_time = 0;
73 time_t time_diff;
74 time_t active_time = 0;
75 time_t sleep_time = 0;
76 time_t unknown_time = 0;
77 time_t total_time = 0;
78 int changes = 0;
79 float tmp;
80
81 printf("Starting measurements\n");
82
83 last_state = check_powermode(fd);
84 start_time = last_time = time(0);
85 printf(" System is in state %s\n\n", state_name(last_state));
86
87 while(!endit) {
88 sleep(1);
89 curr_state = check_powermode(fd);
90
91 if (curr_state != last_state || endit) {
92 changes++;
93 curr_time = time(0);
94 time_diff = curr_time - last_time;
95
96 if (last_state == 1) active_time += time_diff;
97 else if (last_state == 0) sleep_time += time_diff;
98 else unknown_time += time_diff;
99
100 last_state = curr_state;
101 last_time = curr_time;
102
103 printf("%s: State-change to %s\n", myctime(curr_time),
104 state_name(curr_state));
105 }
106 }
107 changes--; /* Compensate for SIGINT */
108
109 total_time = time(0) - start_time;
110 printf("\nTotal running time: %lus\n", curr_time - start_time);
111 printf(" State changed %d times\n", changes);
112
113 tmp = (float)sleep_time / (float)total_time * 100;
114 printf(" Time in sleep state: %lus (%.2f%%)\n", sleep_time, tmp);
115 tmp = (float)active_time / (float)total_time * 100;
116 printf(" Time in active state: %lus (%.2f%%)\n", active_time, tmp);
117 tmp = (float)unknown_time / (float)total_time * 100;
118 printf(" Time in unknown state: %lus (%.2f%%)\n", unknown_time, tmp);
119}
120
121static void ender(int s)
122{
123 endit = 1;
124}
125
126static void usage(void)
127{
128 puts("usage: dslm [-w <time>] <disk>");
129 exit(0);
130}
131
132int main(int argc, char **argv)
133{
134 int fd;
135 char *disk = 0;
136 int settle_time = 60;
137
138 /* Parse the simple command-line */
139 if (argc == 2)
140 disk = argv[1];
141 else if (argc == 4) {
142 settle_time = atoi(argv[2]);
143 disk = argv[3];
144 } else
145 usage();
146
147 if (!(fd = open(disk, O_RDONLY|O_NONBLOCK))) {
148 printf("Can't open %s, because: %s\n", disk, strerror(errno));
149 exit(-1);
150 }
151
152 if (settle_time) {
153 printf("Waiting %d seconds for the system to settle down to "
154 "'normal'\n", settle_time);
155 sleep(settle_time);
156 } else
157 puts("Not waiting for system to settle down");
158
159 signal(SIGINT, ender);
160
161 measure(fd);
162
163 close(fd);
164
165 return 0;
166}
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index eeedee11c8c2..2c3c35093023 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -779,172 +779,4 @@ Monitoring tool
779--------------- 779---------------
780 780
781Bartek Kania submitted this, it can be used to measure how much time your disk 781Bartek Kania submitted this, it can be used to measure how much time your disk
782spends spun up/down. 782spends spun up/down. See Documentation/laptops/dslm.c
783
784---------------------------dslm.c BEGIN-----------------------------------------
785/*
786 * Simple Disk Sleep Monitor
787 * by Bartek Kania
788 * Licenced under the GPL
789 */
790#include <unistd.h>
791#include <stdlib.h>
792#include <stdio.h>
793#include <fcntl.h>
794#include <errno.h>
795#include <time.h>
796#include <string.h>
797#include <signal.h>
798#include <sys/ioctl.h>
799#include <linux/hdreg.h>
800
801#ifdef DEBUG
802#define D(x) x
803#else
804#define D(x)
805#endif
806
807int endit = 0;
808
809/* Check if the disk is in powersave-mode
810 * Most of the code is stolen from hdparm.
811 * 1 = active, 0 = standby/sleep, -1 = unknown */
812int check_powermode(int fd)
813{
814 unsigned char args[4] = {WIN_CHECKPOWERMODE1,0,0,0};
815 int state;
816
817 if (ioctl(fd, HDIO_DRIVE_CMD, &args)
818 && (args[0] = WIN_CHECKPOWERMODE2) /* try again with 0x98 */
819 && ioctl(fd, HDIO_DRIVE_CMD, &args)) {
820 if (errno != EIO || args[0] != 0 || args[1] != 0) {
821 state = -1; /* "unknown"; */
822 } else
823 state = 0; /* "sleeping"; */
824 } else {
825 state = (args[2] == 255) ? 1 : 0;
826 }
827 D(printf(" drive state is: %d\n", state));
828
829 return state;
830}
831
832char *state_name(int i)
833{
834 if (i == -1) return "unknown";
835 if (i == 0) return "sleeping";
836 if (i == 1) return "active";
837
838 return "internal error";
839}
840
841char *myctime(time_t time)
842{
843 char *ts = ctime(&time);
844 ts[strlen(ts) - 1] = 0;
845
846 return ts;
847}
848
849void measure(int fd)
850{
851 time_t start_time;
852 int last_state;
853 time_t last_time;
854 int curr_state;
855 time_t curr_time = 0;
856 time_t time_diff;
857 time_t active_time = 0;
858 time_t sleep_time = 0;
859 time_t unknown_time = 0;
860 time_t total_time = 0;
861 int changes = 0;
862 float tmp;
863
864 printf("Starting measurements\n");
865
866 last_state = check_powermode(fd);
867 start_time = last_time = time(0);
868 printf(" System is in state %s\n\n", state_name(last_state));
869
870 while(!endit) {
871 sleep(1);
872 curr_state = check_powermode(fd);
873
874 if (curr_state != last_state || endit) {
875 changes++;
876 curr_time = time(0);
877 time_diff = curr_time - last_time;
878
879 if (last_state == 1) active_time += time_diff;
880 else if (last_state == 0) sleep_time += time_diff;
881 else unknown_time += time_diff;
882
883 last_state = curr_state;
884 last_time = curr_time;
885
886 printf("%s: State-change to %s\n", myctime(curr_time),
887 state_name(curr_state));
888 }
889 }
890 changes--; /* Compensate for SIGINT */
891
892 total_time = time(0) - start_time;
893 printf("\nTotal running time: %lus\n", curr_time - start_time);
894 printf(" State changed %d times\n", changes);
895
896 tmp = (float)sleep_time / (float)total_time * 100;
897 printf(" Time in sleep state: %lus (%.2f%%)\n", sleep_time, tmp);
898 tmp = (float)active_time / (float)total_time * 100;
899 printf(" Time in active state: %lus (%.2f%%)\n", active_time, tmp);
900 tmp = (float)unknown_time / (float)total_time * 100;
901 printf(" Time in unknown state: %lus (%.2f%%)\n", unknown_time, tmp);
902}
903
904void ender(int s)
905{
906 endit = 1;
907}
908
909void usage()
910{
911 puts("usage: dslm [-w <time>] <disk>");
912 exit(0);
913}
914
915int main(int argc, char **argv)
916{
917 int fd;
918 char *disk = 0;
919 int settle_time = 60;
920
921 /* Parse the simple command-line */
922 if (argc == 2)
923 disk = argv[1];
924 else if (argc == 4) {
925 settle_time = atoi(argv[2]);
926 disk = argv[3];
927 } else
928 usage();
929
930 if (!(fd = open(disk, O_RDONLY|O_NONBLOCK))) {
931 printf("Can't open %s, because: %s\n", disk, strerror(errno));
932 exit(-1);
933 }
934
935 if (settle_time) {
936 printf("Waiting %d seconds for the system to settle down to "
937 "'normal'\n", settle_time);
938 sleep(settle_time);
939 } else
940 puts("Not waiting for system to settle down");
941
942 signal(SIGINT, ender);
943
944 measure(fd);
945
946 close(fd);
947
948 return 0;
949}
950---------------------------dslm.c END-------------------------------------------
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 7f5809eddee6..631ad2f1b229 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -3,6 +3,7 @@
3 ============================ 3 ============================
4 4
5By: David Howells <dhowells@redhat.com> 5By: David Howells <dhowells@redhat.com>
6 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
6 7
7Contents: 8Contents:
8 9
@@ -60,6 +61,10 @@ Contents:
60 61
61 - And then there's the Alpha. 62 - And then there's the Alpha.
62 63
64 (*) Example uses.
65
66 - Circular buffers.
67
63 (*) References. 68 (*) References.
64 69
65 70
@@ -2226,6 +2231,21 @@ The Alpha defines the Linux kernel's memory barrier model.
2226See the subsection on "Cache Coherency" above. 2231See the subsection on "Cache Coherency" above.
2227 2232
2228 2233
2234============
2235EXAMPLE USES
2236============
2237
2238CIRCULAR BUFFERS
2239----------------
2240
2241Memory barriers can be used to implement circular buffering without the need
2242of a lock to serialise the producer with the consumer. See:
2243
2244 Documentation/circular-buffers.txt
2245
2246for details.
2247
2248
2229========== 2249==========
2230REFERENCES 2250REFERENCES
2231========== 2251==========
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
index 6d8af1ac56c4..5aba7a33aeeb 100644
--- a/Documentation/networking/Makefile
+++ b/Documentation/networking/Makefile
@@ -6,3 +6,5 @@ hostprogs-y := ifenslave
6 6
7# Tell kbuild to always build the programs 7# Tell kbuild to always build the programs
8always := $(hostprogs-y) 8always := $(hostprogs-y)
9
10obj-m := timestamping/
diff --git a/Documentation/networking/skfp.txt b/Documentation/networking/skfp.txt
index abfddf81e34a..203ec66c9fb4 100644
--- a/Documentation/networking/skfp.txt
+++ b/Documentation/networking/skfp.txt
@@ -68,7 +68,7 @@ Compaq adapters (not tested):
68======================= 68=======================
69 69
70From v2.01 on, the driver is integrated in the linux kernel sources. 70From v2.01 on, the driver is integrated in the linux kernel sources.
71Therefor, the installation is the same as for any other adapter 71Therefore, the installation is the same as for any other adapter
72supported by the kernel. 72supported by the kernel.
73Refer to the manual of your distribution about the installation 73Refer to the manual of your distribution about the installation
74of network adapters. 74of network adapters.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
new file mode 100644
index 000000000000..7ee770b5ef5f
--- /dev/null
+++ b/Documentation/networking/stmmac.txt
@@ -0,0 +1,143 @@
1 STMicroelectronics 10/100/1000 Synopsys Ethernet driver
2
3Copyright (C) 2007-2010 STMicroelectronics Ltd
4Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
5
6This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
7(Synopsys IP blocks); it has been fully tested on STLinux platforms.
8
9Currently this network device driver is for all STM embedded MAC/GMAC
10(7xxx SoCs).
11
12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
13Universal version 4.0 have been used for developing the first code
14implementation.
15
16Please, for more information also visit: www.stlinux.com
17
181) Kernel Configuration
19The kernel configuration option is STMMAC_ETH:
20 Device Drivers ---> Network device support ---> Ethernet (1000 Mbit) --->
21 STMicroelectronics 10/100/1000 Ethernet driver (STMMAC_ETH)
22
232) Driver parameters list:
24 debug: message level (0: no output, 16: all);
25 phyaddr: to manually provide the physical address to the PHY device;
26 dma_rxsize: DMA rx ring size;
27 dma_txsize: DMA tx ring size;
28 buf_sz: DMA buffer size;
29 tc: control the HW FIFO threshold;
30 tx_coe: Enable/Disable Tx Checksum Offload engine;
31 watchdog: transmit timeout (in milliseconds);
32 flow_ctrl: Flow control ability [on/off];
33 pause: Flow Control Pause Time;
34 tmrate: timer period (only if timer optimisation is configured).
35
363) Command line options
37Driver parameters can be also passed in command line by using:
38 stmmaceth=dma_rxsize:128,dma_txsize:512
39
404) Driver information and notes
41
424.1) Transmit process
43The xmit method is invoked when the kernel needs to transmit a packet; it sets
44the descriptors in the ring and informs the DMA engine that there is a packet
45ready to be transmitted.
46Once the controller has finished transmitting the packet, an interrupt is
47triggered; So the driver will be able to release the socket buffers.
48By default, the driver sets the NETIF_F_SG bit in the features field of the
49net_device structure enabling the scatter/gather feature.
50
514.2) Receive process
52When one or more packets are received, an interrupt happens. The interrupts
53are not queued so the driver has to scan all the descriptors in the ring during
54the receive process.
55This is based on NAPI so the interrupt handler signals only if there is work to be
56done, and it exits.
57Then the poll method will be scheduled at some future point.
58The incoming packets are stored, by the DMA, in a list of pre-allocated socket
59buffers in order to avoid the memcpy (Zero-copy).
60
614.3) Timer-Driver Interrupt
62Instead of having the device that asynchronously notifies the frame receptions, the
63driver configures a timer to generate an interrupt at regular intervals.
64Based on the granularity of the timer, the frames that are received by the device
65will experience different levels of latency. Some NICs have dedicated timer
66device to perform this task. STMMAC can use either the RTC device or the TMU
67channel 2 on STLinux platforms.
68The timers frequency can be passed to the driver as parameter; when change it,
69take care of both hardware capability and network stability/performance impact.
70Several performance tests on STM platforms showed this optimisation allows to spare
71the CPU while having the maximum throughput.
72
734.4) WOL
74Wake up on Lan feature through Magic Frame is only supported for the GMAC
75core.
76
774.5) DMA descriptors
78Driver handles both normal and enhanced descriptors. The latter has been only
79tested on DWC Ether MAC 10/100/1000 Universal version 3.41a.
80
814.6) Ethtool support
82Ethtool is supported. Driver statistics and internal errors can be taken using:
83ethtool -S ethX command. It is possible to dump registers etc.
84
854.7) Jumbo and Segmentation Offloading
86Jumbo frames are supported and tested for the GMAC.
87The GSO has been also added but it's performed in software.
88LRO is not supported.
89
904.8) Physical
91The driver is compatible with PAL to work with PHY and GPHY devices.
92
934.9) Platform information
94Several information came from the platform; please refer to the
95driver's Header file in include/linux directory.
96
97struct plat_stmmacenet_data {
98 int bus_id;
99 int pbl;
100 int has_gmac;
101 void (*fix_mac_speed)(void *priv, unsigned int speed);
102 void (*bus_setup)(unsigned long ioaddr);
103#ifdef CONFIG_STM_DRIVERS
104 struct stm_pad_config *pad_config;
105#endif
106 void *bsp_priv;
107};
108
109Where:
110- pbl (Programmable Burst Length) is maximum number of
111 beats to be transferred in one DMA transaction.
112 GMAC also enables the 4xPBL by default.
113- fix_mac_speed and bus_setup are used to configure internal target
114 registers (on STM platforms);
115- has_gmac: GMAC core is on board (get it at run-time in the next step);
116- bus_id: bus identifier.
117
118struct plat_stmmacphy_data {
119 int bus_id;
120 int phy_addr;
121 unsigned int phy_mask;
122 int interface;
123 int (*phy_reset)(void *priv);
124 void *priv;
125};
126
127Where:
128- bus_id: bus identifier;
129- phy_addr: physical address used for the attached phy device;
130 set it to -1 to get it at run-time;
131- interface: physical MII interface mode;
132- phy_reset: hook to reset HW function.
133
134TODO:
135- Continue to make the driver more generic and suitable for other Synopsys
136 Ethernet controllers used on other architectures (i.e. ARM).
137- 10G controllers are not supported.
138- MAC uses Normal descriptors and GMAC uses enhanced ones.
139 This is a limit that should be reviewed. MAC could want to
140 use the enhanced structure.
141- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
142- Review the timer optimisation code to use an embedded device that seems to be
143 available in new chip generations.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 0e58b4539176..e8c8f4f06c67 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in
41SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. 41SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
42SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the 42SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
43following control message: 43following control message:
44 struct scm_timestamping { 44
45 struct timespec systime; 45struct scm_timestamping {
46 struct timespec hwtimetrans; 46 struct timespec systime;
47 struct timespec hwtimeraw; 47 struct timespec hwtimetrans;
48 }; 48 struct timespec hwtimeraw;
49};
49 50
50recvmsg() can be used to get this control message for regular incoming 51recvmsg() can be used to get this control message for regular incoming
51packets. For send time stamps the outgoing packet is looped back to 52packets. For send time stamps the outgoing packet is looped back to
@@ -87,12 +88,13 @@ by the network device and will be empty without that support.
87SIOCSHWTSTAMP: 88SIOCSHWTSTAMP:
88 89
89Hardware time stamping must also be initialized for each device driver 90Hardware time stamping must also be initialized for each device driver
90that is expected to do hardware time stamping. The parameter is: 91that is expected to do hardware time stamping. The parameter is defined in
92/include/linux/net_tstamp.h as:
91 93
92struct hwtstamp_config { 94struct hwtstamp_config {
93 int flags; /* no flags defined right now, must be zero */ 95 int flags; /* no flags defined right now, must be zero */
94 int tx_type; /* HWTSTAMP_TX_* */ 96 int tx_type; /* HWTSTAMP_TX_* */
95 int rx_filter; /* HWTSTAMP_FILTER_* */ 97 int rx_filter; /* HWTSTAMP_FILTER_* */
96}; 98};
97 99
98Desired behavior is passed into the kernel and to a specific device by 100Desired behavior is passed into the kernel and to a specific device by
@@ -139,42 +141,56 @@ enum {
139 /* time stamp any incoming packet */ 141 /* time stamp any incoming packet */
140 HWTSTAMP_FILTER_ALL, 142 HWTSTAMP_FILTER_ALL,
141 143
142 /* return value: time stamp all packets requested plus some others */ 144 /* return value: time stamp all packets requested plus some others */
143 HWTSTAMP_FILTER_SOME, 145 HWTSTAMP_FILTER_SOME,
144 146
145 /* PTP v1, UDP, any kind of event packet */ 147 /* PTP v1, UDP, any kind of event packet */
146 HWTSTAMP_FILTER_PTP_V1_L4_EVENT, 148 HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
147 149
148 ... 150 /* for the complete list of values, please check
151 * the include file /include/linux/net_tstamp.h
152 */
149}; 153};
150 154
151 155
152DEVICE IMPLEMENTATION 156DEVICE IMPLEMENTATION
153 157
154A driver which supports hardware time stamping must support the 158A driver which supports hardware time stamping must support the
155SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored 159SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with
156in the skb with skb_hwtstamp_set(). 160the actual values as described in the section on SIOCSHWTSTAMP.
161
162Time stamps for received packets must be stored in the skb. To get a pointer
163to the shared time stamp structure of the skb call skb_hwtstamps(). Then
164set the time stamps in the structure:
165
166struct skb_shared_hwtstamps {
167 /* hardware time stamp transformed into duration
168 * since arbitrary point in time
169 */
170 ktime_t hwtstamp;
171 ktime_t syststamp; /* hwtstamp transformed to system time base */
172};
157 173
158Time stamps for outgoing packets are to be generated as follows: 174Time stamps for outgoing packets are to be generated as follows:
159- In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware() 175- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero.
160 returns non-zero. If yes, then the driver is expected 176 If yes, then the driver is expected to do hardware time stamping.
161 to do hardware time stamping.
162- If this is possible for the skb and requested, then declare 177- If this is possible for the skb and requested, then declare
163 that the driver is doing the time stamping by calling 178 that the driver is doing the time stamping by setting the field
164 skb_hwtstamp_tx_in_progress(). A driver not supporting 179 skb_tx(skb)->in_progress non-zero. You might want to keep a pointer
165 hardware time stamping doesn't do that. A driver must never 180 to the associated skb for the next step and not free the skb. A driver
166 touch sk_buff::tstamp! It is used to store how time stamping 181 not supporting hardware time stamping doesn't do that. A driver must
167 for an outgoing packets is to be done. 182 never touch sk_buff::tstamp! It is used to store software generated
183 time stamps by the network subsystem.
168- As soon as the driver has sent the packet and/or obtained a 184- As soon as the driver has sent the packet and/or obtained a
169 hardware time stamp for it, it passes the time stamp back by 185 hardware time stamp for it, it passes the time stamp back by
170 calling skb_hwtstamp_tx() with the original skb, the raw 186 calling skb_hwtstamp_tx() with the original skb, the raw
171 hardware time stamp and a handle to the device (necessary 187 hardware time stamp. skb_hwtstamp_tx() clones the original skb and
172 to convert the hardware time stamp to system time). If obtaining 188 adds the timestamps, therefore the original skb has to be freed now.
173 the hardware time stamp somehow fails, then the driver should 189 If obtaining the hardware time stamp somehow fails, then the driver
174 not fall back to software time stamping. The rationale is that 190 should not fall back to software time stamping. The rationale is that
175 this would occur at a later time in the processing pipeline 191 this would occur at a later time in the processing pipeline than other
176 than other software time stamping and therefore could lead 192 software time stamping and therefore could lead to unexpected deltas
177 to unexpected deltas between time stamps. 193 between time stamps.
178- If the driver did not call skb_hwtstamp_tx_in_progress(), then 194- If the driver did not call set skb_tx(skb)->in_progress, then
179 dev_hard_start_xmit() checks whether software time stamping 195 dev_hard_start_xmit() checks whether software time stamping
180 is wanted as fallback and potentially generates the time stamp. 196 is wanted as fallback and potentially generates the time stamp.
diff --git a/Documentation/networking/timestamping/Makefile b/Documentation/networking/timestamping/Makefile
index 2a1489fdc036..e79973443e9f 100644
--- a/Documentation/networking/timestamping/Makefile
+++ b/Documentation/networking/timestamping/Makefile
@@ -1,6 +1,13 @@
1CPPFLAGS = -I../../../include 1# kbuild trick to avoid linker error. Can be omitted if a module is built.
2obj- := dummy.o
2 3
3timestamping: timestamping.c 4# List of programs to build
5hostprogs-y := timestamping
6
7# Tell kbuild to always build the programs
8always := $(hostprogs-y)
9
10HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include
4 11
5clean: 12clean:
6 rm -f timestamping 13 rm -f timestamping
diff --git a/Documentation/networking/timestamping/timestamping.c b/Documentation/networking/timestamping/timestamping.c
index a7936fe8444a..8ba82bfe6a33 100644
--- a/Documentation/networking/timestamping/timestamping.c
+++ b/Documentation/networking/timestamping/timestamping.c
@@ -41,9 +41,9 @@
41#include <arpa/inet.h> 41#include <arpa/inet.h>
42#include <net/if.h> 42#include <net/if.h>
43 43
44#include "asm/types.h" 44#include <asm/types.h>
45#include "linux/net_tstamp.h" 45#include <linux/net_tstamp.h>
46#include "linux/errqueue.h" 46#include <linux/errqueue.h>
47 47
48#ifndef SO_TIMESTAMPING 48#ifndef SO_TIMESTAMPING
49# define SO_TIMESTAMPING 37 49# define SO_TIMESTAMPING 37
@@ -164,7 +164,7 @@ static void printpacket(struct msghdr *msg, int res,
164 164
165 gettimeofday(&now, 0); 165 gettimeofday(&now, 0);
166 166
167 printf("%ld.%06ld: received %s data, %d bytes from %s, %d bytes control messages\n", 167 printf("%ld.%06ld: received %s data, %d bytes from %s, %zu bytes control messages\n",
168 (long)now.tv_sec, (long)now.tv_usec, 168 (long)now.tv_sec, (long)now.tv_usec,
169 (recvmsg_flags & MSG_ERRQUEUE) ? "error" : "regular", 169 (recvmsg_flags & MSG_ERRQUEUE) ? "error" : "regular",
170 res, 170 res,
@@ -173,7 +173,7 @@ static void printpacket(struct msghdr *msg, int res,
173 for (cmsg = CMSG_FIRSTHDR(msg); 173 for (cmsg = CMSG_FIRSTHDR(msg);
174 cmsg; 174 cmsg;
175 cmsg = CMSG_NXTHDR(msg, cmsg)) { 175 cmsg = CMSG_NXTHDR(msg, cmsg)) {
176 printf(" cmsg len %d: ", cmsg->cmsg_len); 176 printf(" cmsg len %zu: ", cmsg->cmsg_len);
177 switch (cmsg->cmsg_level) { 177 switch (cmsg->cmsg_level) {
178 case SOL_SOCKET: 178 case SOL_SOCKET:
179 printf("SOL_SOCKET "); 179 printf("SOL_SOCKET ");
@@ -370,7 +370,7 @@ int main(int argc, char **argv)
370 } 370 }
371 371
372 sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP); 372 sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
373 if (socket < 0) 373 if (sock < 0)
374 bail("socket"); 374 bail("socket");
375 375
376 memset(&device, 0, sizeof(device)); 376 memset(&device, 0, sizeof(device));
diff --git a/Documentation/pnp.txt b/Documentation/pnp.txt
index a327db67782a..763e4659bf18 100644
--- a/Documentation/pnp.txt
+++ b/Documentation/pnp.txt
@@ -57,7 +57,7 @@ PC standard floppy disk controller
57# cat resources 57# cat resources
58DISABLED 58DISABLED
59 59
60- Notice the string "DISABLED". THis means the device is not active. 60- Notice the string "DISABLED". This means the device is not active.
61 61
623.) check the device's possible configurations (optional) 623.) check the device's possible configurations (optional)
63# cat options 63# cat options
@@ -139,7 +139,7 @@ Plug and Play but it is planned to be in the near future.
139 139
140Requirements for a Linux PnP protocol: 140Requirements for a Linux PnP protocol:
1411.) the protocol must use EISA IDs 1411.) the protocol must use EISA IDs
1422.) the protocol must inform the PnP Layer of a devices current configuration 1422.) the protocol must inform the PnP Layer of a device's current configuration
143- the ability to set resources is optional but preferred. 143- the ability to set resources is optional but preferred.
144 144
145The following are PnP protocol related functions: 145The following are PnP protocol related functions:
@@ -158,7 +158,7 @@ pnp_remove_device
158- automatically will free mem used by the device and related structures 158- automatically will free mem used by the device and related structures
159 159
160pnp_add_id 160pnp_add_id
161- adds a EISA ID to the list of supported IDs for the specified device 161- adds an EISA ID to the list of supported IDs for the specified device
162 162
163For more information consult the source of a protocol such as 163For more information consult the source of a protocol such as
164/drivers/pnp/pnpbios/core.c. 164/drivers/pnp/pnpbios/core.c.
@@ -167,7 +167,7 @@ For more information consult the source of a protocol such as
167 167
168Linux Plug and Play Drivers 168Linux Plug and Play Drivers
169--------------------------- 169---------------------------
170 This section contains information for linux PnP driver developers. 170 This section contains information for Linux PnP driver developers.
171 171
172The New Way 172The New Way
173........... 173...........
@@ -235,11 +235,10 @@ static int __init serial8250_pnp_init(void)
235The Old Way 235The Old Way
236........... 236...........
237 237
238a series of compatibility functions have been created to make it easy to convert 238A series of compatibility functions have been created to make it easy to convert
239
240ISAPNP drivers. They should serve as a temporary solution only. 239ISAPNP drivers. They should serve as a temporary solution only.
241 240
242they are as follows: 241They are as follows:
243 242
244struct pnp_card *pnp_find_card(unsigned short vendor, 243struct pnp_card *pnp_find_card(unsigned short vendor,
245 unsigned short device, 244 unsigned short device,
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index ab00eeddecaf..55b859b3bc72 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -256,7 +256,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
256 to suspend the device again in future 256 to suspend the device again in future
257 257
258 int pm_runtime_resume(struct device *dev); 258 int pm_runtime_resume(struct device *dev);
259 - execute the subsystem-leve resume callback for the device; returns 0 on 259 - execute the subsystem-level resume callback for the device; returns 0 on
260 success, 1 if the device's run-time PM status was already 'active' or 260 success, 1 if the device's run-time PM status was already 'active' or
261 error code on failure, where -EAGAIN means it may be safe to attempt to 261 error code on failure, where -EAGAIN means it may be safe to attempt to
262 resume the device again in future, but 'power.runtime_error' should be 262 resume the device again in future, but 'power.runtime_error' should be
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
index 6e37be1eeb2d..4f8930263dd9 100644
--- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
@@ -21,6 +21,15 @@ Required properties:
21- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the 21- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
22 threads. 22 threads.
23 23
24Optional properties:
25- fsl,firmware-phandle:
26 Usage: required only if there is no fsl,qe-firmware child node
27 Value type: <phandle>
28 Definition: Points to a firmware node (see "QE Firmware Node" below)
29 that contains the firmware that should be uploaded for this QE.
30 The compatible property for the firmware node should say,
31 "fsl,qe-firmware".
32
24Recommended properties 33Recommended properties
25- brg-frequency : the internal clock source frequency for baud-rate 34- brg-frequency : the internal clock source frequency for baud-rate
26 generators in Hz. 35 generators in Hz.
@@ -59,3 +68,48 @@ Example:
59 reg = <0 c000>; 68 reg = <0 c000>;
60 }; 69 };
61 }; 70 };
71
72* QE Firmware Node
73
74This node defines a firmware binary that is embedded in the device tree, for
75the purpose of passing the firmware from bootloader to the kernel, or from
76the hypervisor to the guest.
77
78The firmware node itself contains the firmware binary contents, a compatible
79property, and any firmware-specific properties. The node should be placed
80inside a QE node that needs it. Doing so eliminates the need for a
81fsl,firmware-phandle property. Other QE nodes that need the same firmware
82should define an fsl,firmware-phandle property that points to the firmware node
83in the first QE node.
84
85The fsl,firmware property can be specified in the DTS (possibly using incbin)
86or can be inserted by the boot loader at boot time.
87
88Required properties:
89 - compatible
90 Usage: required
91 Value type: <string>
92 Definition: A standard property. Specify a string that indicates what
93 kind of firmware it is. For QE, this should be "fsl,qe-firmware".
94
95 - fsl,firmware
96 Usage: required
97 Value type: <prop-encoded-array>, encoded as an array of bytes
98 Definition: A standard property. This property contains the firmware
99 binary "blob".
100
101Example:
102 qe1@e0080000 {
103 compatible = "fsl,qe";
104 qe_firmware:qe-firmware {
105 compatible = "fsl,qe-firmware";
106 fsl,firmware = [0x70 0xcd 0x00 0x00 0x01 0x46 0x45 ...];
107 };
108 ...
109 };
110
111 qe2@e0090000 {
112 compatible = "fsl,qe";
113 fsl,firmware-phandle = <&qe_firmware>;
114 ...
115 };
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index aae8355d3166..221f38be98f4 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -190,3 +190,61 @@ Example:
190 for (node = rb_first(&mytree); node; node = rb_next(node)) 190 for (node = rb_first(&mytree); node; node = rb_next(node))
191 printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring); 191 printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring);
192 192
193Support for Augmented rbtrees
194-----------------------------
195
196Augmented rbtree is an rbtree with "some" additional data stored in each node.
197This data can be used to augment some new functionality to rbtree.
198Augmented rbtree is an optional feature built on top of basic rbtree
199infrastructure. rbtree user who wants this feature will have an augment
200callback function in rb_root initialized.
201
202This callback function will be called from rbtree core routines whenever
203a node has a change in one or both of its children. It is the responsibility
204of the callback function to recalculate the additional data that is in the
205rb node using new children information. Note that if this new additional
206data affects the parent node's additional data, then callback function has
207to handle it and do the recursive updates.
208
209
210Interval tree is an example of augmented rb tree. Reference -
211"Introduction to Algorithms" by Cormen, Leiserson, Rivest and Stein.
212More details about interval trees:
213
214Classical rbtree has a single key and it cannot be directly used to store
215interval ranges like [lo:hi] and do a quick lookup for any overlap with a new
216lo:hi or to find whether there is an exact match for a new lo:hi.
217
218However, rbtree can be augmented to store such interval ranges in a structured
219way making it possible to do efficient lookup and exact match.
220
221This "extra information" stored in each node is the maximum hi
222(max_hi) value among all the nodes that are its descendents. This
223information can be maintained at each node just be looking at the node
224and its immediate children. And this will be used in O(log n) lookup
225for lowest match (lowest start address among all possible matches)
226with something like:
227
228find_lowest_match(lo, hi, node)
229{
230 lowest_match = NULL;
231 while (node) {
232 if (max_hi(node->left) > lo) {
233 // Lowest overlap if any must be on left side
234 node = node->left;
235 } else if (overlap(lo, hi, node)) {
236 lowest_match = node;
237 break;
238 } else if (lo > node->lo) {
239 // Lowest overlap if any must be on right side
240 node = node->right;
241 } else {
242 break;
243 }
244 }
245 return lowest_match;
246}
247
248Finding exact match will be to first find lowest match and then to follow
249successor nodes looking for exact match, until the start of a node is beyond
250the hi value we are looking for.
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
index 6f5ceb0f09fc..85f3280d7ef6 100644
--- a/Documentation/s390/kvm.txt
+++ b/Documentation/s390/kvm.txt
@@ -102,7 +102,7 @@ args: unsigned long
102see also: include/linux/kvm.h 102see also: include/linux/kvm.h
103This ioctl stores the state of the cpu at the guest real address given as 103This ioctl stores the state of the cpu at the guest real address given as
104argument, unless one of the following values defined in include/linux/kvm.h 104argument, unless one of the following values defined in include/linux/kvm.h
105is given as arguement: 105is given as argument:
106KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in 106KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
107absolute lowcore as defined by the principles of operation 107absolute lowcore as defined by the principles of operation
108KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in 108KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 6f33593e59e2..8239ebbcddce 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -211,7 +211,7 @@ provide fair CPU time to each such task group. For example, it may be
211desirable to first provide fair CPU time to each user on the system and then to 211desirable to first provide fair CPU time to each user on the system and then to
212each task belonging to a user. 212each task belonging to a user.
213 213
214CONFIG_GROUP_SCHED strives to achieve exactly that. It lets tasks to be 214CONFIG_CGROUP_SCHED strives to achieve exactly that. It lets tasks to be
215grouped and divides CPU time fairly among such groups. 215grouped and divides CPU time fairly among such groups.
216 216
217CONFIG_RT_GROUP_SCHED permits to group real-time (i.e., SCHED_FIFO and 217CONFIG_RT_GROUP_SCHED permits to group real-time (i.e., SCHED_FIFO and
@@ -220,38 +220,11 @@ SCHED_RR) tasks.
220CONFIG_FAIR_GROUP_SCHED permits to group CFS (i.e., SCHED_NORMAL and 220CONFIG_FAIR_GROUP_SCHED permits to group CFS (i.e., SCHED_NORMAL and
221SCHED_BATCH) tasks. 221SCHED_BATCH) tasks.
222 222
223At present, there are two (mutually exclusive) mechanisms to group tasks for 223 These options need CONFIG_CGROUPS to be defined, and let the administrator
224CPU bandwidth control purposes:
225
226 - Based on user id (CONFIG_USER_SCHED)
227
228 With this option, tasks are grouped according to their user id.
229
230 - Based on "cgroup" pseudo filesystem (CONFIG_CGROUP_SCHED)
231
232 This options needs CONFIG_CGROUPS to be defined, and lets the administrator
233 create arbitrary groups of tasks, using the "cgroup" pseudo filesystem. See 224 create arbitrary groups of tasks, using the "cgroup" pseudo filesystem. See
234 Documentation/cgroups/cgroups.txt for more information about this filesystem. 225 Documentation/cgroups/cgroups.txt for more information about this filesystem.
235 226
236Only one of these options to group tasks can be chosen and not both. 227When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
237
238When CONFIG_USER_SCHED is defined, a directory is created in sysfs for each new
239user and a "cpu_share" file is added in that directory.
240
241 # cd /sys/kernel/uids
242 # cat 512/cpu_share # Display user 512's CPU share
243 1024
244 # echo 2048 > 512/cpu_share # Modify user 512's CPU share
245 # cat 512/cpu_share # Display user 512's CPU share
246 2048
247 #
248
249CPU bandwidth between two users is divided in the ratio of their CPU shares.
250For example: if you would like user "root" to get twice the bandwidth of user
251"guest," then set the cpu_share for both the users such that "root"'s cpu_share
252is twice "guest"'s cpu_share.
253
254When CONFIG_CGROUP_SCHED is defined, a "cpu.shares" file is created for each
255group created using the pseudo filesystem. See example steps below to create 228group created using the pseudo filesystem. See example steps below to create
256task groups and modify their CPU share using the "cgroups" pseudo filesystem. 229task groups and modify their CPU share using the "cgroups" pseudo filesystem.
257 230
@@ -273,24 +246,3 @@ task groups and modify their CPU share using the "cgroups" pseudo filesystem.
273 246
274 # #Launch gmplayer (or your favourite movie player) 247 # #Launch gmplayer (or your favourite movie player)
275 # echo <movie_player_pid> > multimedia/tasks 248 # echo <movie_player_pid> > multimedia/tasks
276
2778. Implementation note: user namespaces
278
279User namespaces are intended to be hierarchical. But they are currently
280only partially implemented. Each of those has ramifications for CFS.
281
282First, since user namespaces are hierarchical, the /sys/kernel/uids
283presentation is inadequate. Eventually we will likely want to use sysfs
284tagging to provide private views of /sys/kernel/uids within each user
285namespace.
286
287Second, the hierarchical nature is intended to support completely
288unprivileged use of user namespaces. So if using user groups, then
289we want the users in a user namespace to be children of the user
290who created it.
291
292That is currently unimplemented. So instead, every user in a new
293user namespace will receive 1024 shares just like any user in the
294initial user namespace. Note that at the moment creation of a new
295user namespace requires each of CAP_SYS_ADMIN, CAP_SETUID, and
296CAP_SETGID.
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 86eabe6c3419..605b0d40329d 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -126,23 +126,12 @@ priority!
1262.3 Basis for grouping tasks 1262.3 Basis for grouping tasks
127---------------------------- 127----------------------------
128 128
129There are two compile-time settings for allocating CPU bandwidth. These are 129Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
130configured using the "Basis for grouping tasks" multiple choice menu under 130CPU bandwidth to task groups.
131General setup > Group CPU Scheduler:
132
133a. CONFIG_USER_SCHED (aka "Basis for grouping tasks" = "user id")
134
135This lets you use the virtual files under
136"/sys/kernel/uids/<uid>/cpu_rt_runtime_us" to control he CPU time reserved for
137each user .
138
139The other option is:
140
141.o CONFIG_CGROUP_SCHED (aka "Basis for grouping tasks" = "Control groups")
142 131
143This uses the /cgroup virtual file system and 132This uses the /cgroup virtual file system and
144"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each 133"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
145control group instead. 134control group.
146 135
147For more information on working with control groups, you should read 136For more information on working with control groups, you should read
148Documentation/cgroups/cgroups.txt as well. 137Documentation/cgroups/cgroups.txt as well.
@@ -161,8 +150,7 @@ For now, this can be simplified to just the following (but see Future plans):
161=============== 150===============
162 151
163There is work in progress to make the scheduling period for each group 152There is work in progress to make the scheduling period for each group
164("/sys/kernel/uids/<uid>/cpu_rt_period_us" or 153("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well.
165"/cgroup/<cgroup>/cpu.rt_period_us" respectively) configurable as well.
166 154
167The constraint on the period is that a subgroup must have a smaller or 155The constraint on the period is that a subgroup must have a smaller or
168equal period to its parent. But realistically its not very useful _yet_ 156equal period to its parent. But realistically its not very useful _yet_
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
index ff19a52fe004..2ffc1148eb95 100644
--- a/Documentation/scsi/ChangeLog.lpfc
+++ b/Documentation/scsi/ChangeLog.lpfc
@@ -989,8 +989,8 @@ Changes from 20040709 to 20040716
989 * Remove redundant port_cmp != 2 check in if 989 * Remove redundant port_cmp != 2 check in if
990 (!port_cmp) { .... if (port_cmp != 2).... } 990 (!port_cmp) { .... if (port_cmp != 2).... }
991 * Clock changes: removed struct clk_data and timerList. 991 * Clock changes: removed struct clk_data and timerList.
992 * Clock changes: seperate nodev_tmo and els_retry_delay into 2 992 * Clock changes: separate nodev_tmo and els_retry_delay into 2
993 seperate timers and convert to 1 argument changed 993 separate timers and convert to 1 argument changed
994 LPFC_NODE_FARP_PEND_t to struct lpfc_node_farp_pend convert 994 LPFC_NODE_FARP_PEND_t to struct lpfc_node_farp_pend convert
995 ipfarp_tmo to 1 argument convert target struct tmofunc and 995 ipfarp_tmo to 1 argument convert target struct tmofunc and
996 rtplunfunc to 1 argument * cr_count, cr_delay and 996 rtplunfunc to 1 argument * cr_count, cr_delay and
@@ -1514,7 +1514,7 @@ Changes from 20040402 to 20040409
1514 * Remove unused elxclock declaration in elx_sli.h. 1514 * Remove unused elxclock declaration in elx_sli.h.
1515 * Since everywhere IOCB_ENTRY is used, the return value is cast, 1515 * Since everywhere IOCB_ENTRY is used, the return value is cast,
1516 move the cast into the macro. 1516 move the cast into the macro.
1517 * Split ioctls out into seperate files 1517 * Split ioctls out into separate files
1518 1518
1519Changes from 20040326 to 20040402 1519Changes from 20040326 to 20040402
1520 1520
@@ -1534,7 +1534,7 @@ Changes from 20040326 to 20040402
1534 * Unused variable cleanup 1534 * Unused variable cleanup
1535 * Use Linux list macros for DMABUF_t 1535 * Use Linux list macros for DMABUF_t
1536 * Break up ioctls into 3 sections, dfc, util, hbaapi 1536 * Break up ioctls into 3 sections, dfc, util, hbaapi
1537 rearranged code so this could be easily seperated into a 1537 rearranged code so this could be easily separated into a
1538 differnet module later All 3 are currently turned on by 1538 differnet module later All 3 are currently turned on by
1539 defines in lpfc_ioctl.c LPFC_DFC_IOCTL, LPFC_UTIL_IOCTL, 1539 defines in lpfc_ioctl.c LPFC_DFC_IOCTL, LPFC_UTIL_IOCTL,
1540 LPFC_HBAAPI_IOCTL 1540 LPFC_HBAAPI_IOCTL
@@ -1551,7 +1551,7 @@ Changes from 20040326 to 20040402
1551 started by lpfc_online(). lpfc_offline() only stopped 1551 started by lpfc_online(). lpfc_offline() only stopped
1552 els_timeout routine. It now stops all timeout routines 1552 els_timeout routine. It now stops all timeout routines
1553 associated with that hba. 1553 associated with that hba.
1554 * Replace seperate next and prev pointers in struct 1554 * Replace separate next and prev pointers in struct
1555 lpfc_bindlist with list_head type. In elxHBA_t, replace 1555 lpfc_bindlist with list_head type. In elxHBA_t, replace
1556 fc_nlpbind_start and _end with fc_nlpbind_list and use 1556 fc_nlpbind_start and _end with fc_nlpbind_list and use
1557 list_head macros to access it. 1557 list_head macros to access it.
diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
index 5e5349a4fcd2..7c900507279f 100644
--- a/Documentation/serial/tty.txt
+++ b/Documentation/serial/tty.txt
@@ -105,6 +105,10 @@ write_wakeup() - May be called at any point between open and close.
105 is permitted to call the driver write method from 105 is permitted to call the driver write method from
106 this function. In such a situation defer it. 106 this function. In such a situation defer it.
107 107
108dcd_change() - Report to the tty line the current DCD pin status
109 changes and the relative timestamp. The timestamp
110 can be NULL.
111
108 112
109Driver Access 113Driver Access
110 114
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index f4dd3bf99d12..98d14cb8a85d 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -119,10 +119,18 @@ the codec slots 0 and 1 no matter what the hardware reports.
119 119
120Interrupt Handling 120Interrupt Handling
121~~~~~~~~~~~~~~~~~~ 121~~~~~~~~~~~~~~~~~~
122In rare but some cases, the interrupt isn't properly handled as 122HD-audio driver uses MSI as default (if available) since 2.6.33
123default. You would notice this by the DMA transfer error reported by 123kernel as MSI works better on some machines, and in general, it's
124ALSA PCM core, for example. Using MSI might help in such a case. 124better for performance. However, Nvidia controllers showed bad
125Pass `enable_msi=1` option for enabling MSI. 125regressions with MSI (especially in a combination with AMD chipset),
126thus we disabled MSI for them.
127
128There seem also still other devices that don't work with MSI. If you
129see a regression wrt the sound quality (stuttering, etc) or a lock-up
130in the recent kernel, try to pass `enable_msi=0` option to disable
131MSI. If it works, you can add the known bad device to the blacklist
132defined in hda_intel.c. In such a case, please report and give the
133patch back to the upstream developer.
126 134
127 135
128HD-AUDIO CODEC 136HD-AUDIO CODEC
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index 10abd3773e49..16feda901469 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -58,7 +58,7 @@ static void transfer(int fd)
58 }; 58 };
59 59
60 ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr); 60 ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
61 if (ret == 1) 61 if (ret < 1)
62 pabort("can't send spi message"); 62 pabort("can't send spi message");
63 63
64 for (ret = 0; ret < ARRAY_SIZE(tx); ret++) { 64 for (ret = 0; ret < ARRAY_SIZE(tx); ret++) {
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index 5effa5bd993b..e213f45cf9d7 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -18,16 +18,15 @@ Rules on what kind of patches are accepted, and which ones are not, into the
18 - It cannot contain any "trivial" fixes in it (spelling changes, 18 - It cannot contain any "trivial" fixes in it (spelling changes,
19 whitespace cleanups, etc). 19 whitespace cleanups, etc).
20 - It must follow the Documentation/SubmittingPatches rules. 20 - It must follow the Documentation/SubmittingPatches rules.
21 - It or an equivalent fix must already exist in Linus' tree. Quote the 21 - It or an equivalent fix must already exist in Linus' tree (upstream).
22 respective commit ID in Linus' tree in your patch submission to -stable.
23 22
24 23
25Procedure for submitting patches to the -stable tree: 24Procedure for submitting patches to the -stable tree:
26 25
27 - Send the patch, after verifying that it follows the above rules, to 26 - Send the patch, after verifying that it follows the above rules, to
28 stable@kernel.org. 27 stable@kernel.org. You must note the upstream commit ID in the changelog
29 - To have the patch automatically included in the stable tree, add the 28 of your submission.
30 the tag 29 - To have the patch automatically included in the stable tree, add the tag
31 Cc: stable@kernel.org 30 Cc: stable@kernel.org
32 in the sign-off area. Once the patch is merged it will be applied to 31 in the sign-off area. Once the patch is merged it will be applied to
33 the stable tree without anything else needing to be done by the author 32 the stable tree without anything else needing to be done by the author
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index fc5790d36cd9..6c7d18c53f84 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -573,11 +573,14 @@ Because other nodes' memory may be free. This means system total status
573may be not fatal yet. 573may be not fatal yet.
574 574
575If this is set to 2, the kernel panics compulsorily even on the 575If this is set to 2, the kernel panics compulsorily even on the
576above-mentioned. 576above-mentioned. Even oom happens under memory cgroup, the whole
577system panics.
577 578
578The default value is 0. 579The default value is 0.
5791 and 2 are for failover of clustering. Please select either 5801 and 2 are for failover of clustering. Please select either
580according to your policy of failover. 581according to your policy of failover.
582panic_on_oom=2+kdump gives you very strong tool to investigate
583why oom happens. You can get snapshot.
581 584
582============================================================= 585=============================================================
583 586
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index 397dc35e1323..a9248da5cdbc 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -4,6 +4,8 @@ highres.txt
4 - High resolution timers and dynamic ticks design notes 4 - High resolution timers and dynamic ticks design notes
5hpet.txt 5hpet.txt
6 - High Precision Event Timer Driver for Linux 6 - High Precision Event Timer Driver for Linux
7hpet_example.c
8 - sample hpet timer test program
7hrtimers.txt 9hrtimers.txt
8 - subsystem for high-resolution kernel timers 10 - subsystem for high-resolution kernel timers
9timer_stats.txt 11timer_stats.txt
diff --git a/Documentation/timers/Makefile b/Documentation/timers/Makefile
new file mode 100644
index 000000000000..c85625f4ab25
--- /dev/null
+++ b/Documentation/timers/Makefile
@@ -0,0 +1,8 @@
1# kbuild trick to avoid linker error. Can be omitted if a module is built.
2obj- := dummy.o
3
4# List of programs to build
5hostprogs-y := hpet_example
6
7# Tell kbuild to always build the programs
8always := $(hostprogs-y)
diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt
index 16d25e6b5a00..767392ffd31e 100644
--- a/Documentation/timers/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -26,274 +26,5 @@ initialization. An example of this initialization can be found in
26arch/x86/kernel/hpet.c. 26arch/x86/kernel/hpet.c.
27 27
28The driver provides a userspace API which resembles the API found in the 28The driver provides a userspace API which resembles the API found in the
29RTC driver framework. An example user space program is provided below. 29RTC driver framework. An example user space program is provided in
30 30file:Documentation/timers/hpet_example.c
31#include <stdio.h>
32#include <stdlib.h>
33#include <unistd.h>
34#include <fcntl.h>
35#include <string.h>
36#include <memory.h>
37#include <malloc.h>
38#include <time.h>
39#include <ctype.h>
40#include <sys/types.h>
41#include <sys/wait.h>
42#include <signal.h>
43#include <fcntl.h>
44#include <errno.h>
45#include <sys/time.h>
46#include <linux/hpet.h>
47
48
49extern void hpet_open_close(int, const char **);
50extern void hpet_info(int, const char **);
51extern void hpet_poll(int, const char **);
52extern void hpet_fasync(int, const char **);
53extern void hpet_read(int, const char **);
54
55#include <sys/poll.h>
56#include <sys/ioctl.h>
57#include <signal.h>
58
59struct hpet_command {
60 char *command;
61 void (*func)(int argc, const char ** argv);
62} hpet_command[] = {
63 {
64 "open-close",
65 hpet_open_close
66 },
67 {
68 "info",
69 hpet_info
70 },
71 {
72 "poll",
73 hpet_poll
74 },
75 {
76 "fasync",
77 hpet_fasync
78 },
79};
80
81int
82main(int argc, const char ** argv)
83{
84 int i;
85
86 argc--;
87 argv++;
88
89 if (!argc) {
90 fprintf(stderr, "-hpet: requires command\n");
91 return -1;
92 }
93
94
95 for (i = 0; i < (sizeof (hpet_command) / sizeof (hpet_command[0])); i++)
96 if (!strcmp(argv[0], hpet_command[i].command)) {
97 argc--;
98 argv++;
99 fprintf(stderr, "-hpet: executing %s\n",
100 hpet_command[i].command);
101 hpet_command[i].func(argc, argv);
102 return 0;
103 }
104
105 fprintf(stderr, "do_hpet: command %s not implemented\n", argv[0]);
106
107 return -1;
108}
109
110void
111hpet_open_close(int argc, const char **argv)
112{
113 int fd;
114
115 if (argc != 1) {
116 fprintf(stderr, "hpet_open_close: device-name\n");
117 return;
118 }
119
120 fd = open(argv[0], O_RDONLY);
121 if (fd < 0)
122 fprintf(stderr, "hpet_open_close: open failed\n");
123 else
124 close(fd);
125
126 return;
127}
128
129void
130hpet_info(int argc, const char **argv)
131{
132}
133
134void
135hpet_poll(int argc, const char **argv)
136{
137 unsigned long freq;
138 int iterations, i, fd;
139 struct pollfd pfd;
140 struct hpet_info info;
141 struct timeval stv, etv;
142 struct timezone tz;
143 long usec;
144
145 if (argc != 3) {
146 fprintf(stderr, "hpet_poll: device-name freq iterations\n");
147 return;
148 }
149
150 freq = atoi(argv[1]);
151 iterations = atoi(argv[2]);
152
153 fd = open(argv[0], O_RDONLY);
154
155 if (fd < 0) {
156 fprintf(stderr, "hpet_poll: open of %s failed\n", argv[0]);
157 return;
158 }
159
160 if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
161 fprintf(stderr, "hpet_poll: HPET_IRQFREQ failed\n");
162 goto out;
163 }
164
165 if (ioctl(fd, HPET_INFO, &info) < 0) {
166 fprintf(stderr, "hpet_poll: failed to get info\n");
167 goto out;
168 }
169
170 fprintf(stderr, "hpet_poll: info.hi_flags 0x%lx\n", info.hi_flags);
171
172 if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
173 fprintf(stderr, "hpet_poll: HPET_EPI failed\n");
174 goto out;
175 }
176
177 if (ioctl(fd, HPET_IE_ON, 0) < 0) {
178 fprintf(stderr, "hpet_poll, HPET_IE_ON failed\n");
179 goto out;
180 }
181
182 pfd.fd = fd;
183 pfd.events = POLLIN;
184
185 for (i = 0; i < iterations; i++) {
186 pfd.revents = 0;
187 gettimeofday(&stv, &tz);
188 if (poll(&pfd, 1, -1) < 0)
189 fprintf(stderr, "hpet_poll: poll failed\n");
190 else {
191 long data;
192
193 gettimeofday(&etv, &tz);
194 usec = stv.tv_sec * 1000000 + stv.tv_usec;
195 usec = (etv.tv_sec * 1000000 + etv.tv_usec) - usec;
196
197 fprintf(stderr,
198 "hpet_poll: expired time = 0x%lx\n", usec);
199
200 fprintf(stderr, "hpet_poll: revents = 0x%x\n",
201 pfd.revents);
202
203 if (read(fd, &data, sizeof(data)) != sizeof(data)) {
204 fprintf(stderr, "hpet_poll: read failed\n");
205 }
206 else
207 fprintf(stderr, "hpet_poll: data 0x%lx\n",
208 data);
209 }
210 }
211
212out:
213 close(fd);
214 return;
215}
216
217static int hpet_sigio_count;
218
219static void
220hpet_sigio(int val)
221{
222 fprintf(stderr, "hpet_sigio: called\n");
223 hpet_sigio_count++;
224}
225
226void
227hpet_fasync(int argc, const char **argv)
228{
229 unsigned long freq;
230 int iterations, i, fd, value;
231 sig_t oldsig;
232 struct hpet_info info;
233
234 hpet_sigio_count = 0;
235 fd = -1;
236
237 if ((oldsig = signal(SIGIO, hpet_sigio)) == SIG_ERR) {
238 fprintf(stderr, "hpet_fasync: failed to set signal handler\n");
239 return;
240 }
241
242 if (argc != 3) {
243 fprintf(stderr, "hpet_fasync: device-name freq iterations\n");
244 goto out;
245 }
246
247 fd = open(argv[0], O_RDONLY);
248
249 if (fd < 0) {
250 fprintf(stderr, "hpet_fasync: failed to open %s\n", argv[0]);
251 return;
252 }
253
254
255 if ((fcntl(fd, F_SETOWN, getpid()) == 1) ||
256 ((value = fcntl(fd, F_GETFL)) == 1) ||
257 (fcntl(fd, F_SETFL, value | O_ASYNC) == 1)) {
258 fprintf(stderr, "hpet_fasync: fcntl failed\n");
259 goto out;
260 }
261
262 freq = atoi(argv[1]);
263 iterations = atoi(argv[2]);
264
265 if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
266 fprintf(stderr, "hpet_fasync: HPET_IRQFREQ failed\n");
267 goto out;
268 }
269
270 if (ioctl(fd, HPET_INFO, &info) < 0) {
271 fprintf(stderr, "hpet_fasync: failed to get info\n");
272 goto out;
273 }
274
275 fprintf(stderr, "hpet_fasync: info.hi_flags 0x%lx\n", info.hi_flags);
276
277 if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
278 fprintf(stderr, "hpet_fasync: HPET_EPI failed\n");
279 goto out;
280 }
281
282 if (ioctl(fd, HPET_IE_ON, 0) < 0) {
283 fprintf(stderr, "hpet_fasync, HPET_IE_ON failed\n");
284 goto out;
285 }
286
287 for (i = 0; i < iterations; i++) {
288 (void) pause();
289 fprintf(stderr, "hpet_fasync: count = %d\n", hpet_sigio_count);
290 }
291
292out:
293 signal(SIGIO, oldsig);
294
295 if (fd >= 0)
296 close(fd);
297
298 return;
299}
diff --git a/Documentation/timers/hpet_example.c b/Documentation/timers/hpet_example.c
new file mode 100644
index 000000000000..f9ce2d9fdfd5
--- /dev/null
+++ b/Documentation/timers/hpet_example.c
@@ -0,0 +1,269 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <fcntl.h>
5#include <string.h>
6#include <memory.h>
7#include <malloc.h>
8#include <time.h>
9#include <ctype.h>
10#include <sys/types.h>
11#include <sys/wait.h>
12#include <signal.h>
13#include <fcntl.h>
14#include <errno.h>
15#include <sys/time.h>
16#include <linux/hpet.h>
17
18
19extern void hpet_open_close(int, const char **);
20extern void hpet_info(int, const char **);
21extern void hpet_poll(int, const char **);
22extern void hpet_fasync(int, const char **);
23extern void hpet_read(int, const char **);
24
25#include <sys/poll.h>
26#include <sys/ioctl.h>
27#include <signal.h>
28
29struct hpet_command {
30 char *command;
31 void (*func)(int argc, const char ** argv);
32} hpet_command[] = {
33 {
34 "open-close",
35 hpet_open_close
36 },
37 {
38 "info",
39 hpet_info
40 },
41 {
42 "poll",
43 hpet_poll
44 },
45 {
46 "fasync",
47 hpet_fasync
48 },
49};
50
51int
52main(int argc, const char ** argv)
53{
54 int i;
55
56 argc--;
57 argv++;
58
59 if (!argc) {
60 fprintf(stderr, "-hpet: requires command\n");
61 return -1;
62 }
63
64
65 for (i = 0; i < (sizeof (hpet_command) / sizeof (hpet_command[0])); i++)
66 if (!strcmp(argv[0], hpet_command[i].command)) {
67 argc--;
68 argv++;
69 fprintf(stderr, "-hpet: executing %s\n",
70 hpet_command[i].command);
71 hpet_command[i].func(argc, argv);
72 return 0;
73 }
74
75 fprintf(stderr, "do_hpet: command %s not implemented\n", argv[0]);
76
77 return -1;
78}
79
80void
81hpet_open_close(int argc, const char **argv)
82{
83 int fd;
84
85 if (argc != 1) {
86 fprintf(stderr, "hpet_open_close: device-name\n");
87 return;
88 }
89
90 fd = open(argv[0], O_RDONLY);
91 if (fd < 0)
92 fprintf(stderr, "hpet_open_close: open failed\n");
93 else
94 close(fd);
95
96 return;
97}
98
99void
100hpet_info(int argc, const char **argv)
101{
102}
103
104void
105hpet_poll(int argc, const char **argv)
106{
107 unsigned long freq;
108 int iterations, i, fd;
109 struct pollfd pfd;
110 struct hpet_info info;
111 struct timeval stv, etv;
112 struct timezone tz;
113 long usec;
114
115 if (argc != 3) {
116 fprintf(stderr, "hpet_poll: device-name freq iterations\n");
117 return;
118 }
119
120 freq = atoi(argv[1]);
121 iterations = atoi(argv[2]);
122
123 fd = open(argv[0], O_RDONLY);
124
125 if (fd < 0) {
126 fprintf(stderr, "hpet_poll: open of %s failed\n", argv[0]);
127 return;
128 }
129
130 if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
131 fprintf(stderr, "hpet_poll: HPET_IRQFREQ failed\n");
132 goto out;
133 }
134
135 if (ioctl(fd, HPET_INFO, &info) < 0) {
136 fprintf(stderr, "hpet_poll: failed to get info\n");
137 goto out;
138 }
139
140 fprintf(stderr, "hpet_poll: info.hi_flags 0x%lx\n", info.hi_flags);
141
142 if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
143 fprintf(stderr, "hpet_poll: HPET_EPI failed\n");
144 goto out;
145 }
146
147 if (ioctl(fd, HPET_IE_ON, 0) < 0) {
148 fprintf(stderr, "hpet_poll, HPET_IE_ON failed\n");
149 goto out;
150 }
151
152 pfd.fd = fd;
153 pfd.events = POLLIN;
154
155 for (i = 0; i < iterations; i++) {
156 pfd.revents = 0;
157 gettimeofday(&stv, &tz);
158 if (poll(&pfd, 1, -1) < 0)
159 fprintf(stderr, "hpet_poll: poll failed\n");
160 else {
161 long data;
162
163 gettimeofday(&etv, &tz);
164 usec = stv.tv_sec * 1000000 + stv.tv_usec;
165 usec = (etv.tv_sec * 1000000 + etv.tv_usec) - usec;
166
167 fprintf(stderr,
168 "hpet_poll: expired time = 0x%lx\n", usec);
169
170 fprintf(stderr, "hpet_poll: revents = 0x%x\n",
171 pfd.revents);
172
173 if (read(fd, &data, sizeof(data)) != sizeof(data)) {
174 fprintf(stderr, "hpet_poll: read failed\n");
175 }
176 else
177 fprintf(stderr, "hpet_poll: data 0x%lx\n",
178 data);
179 }
180 }
181
182out:
183 close(fd);
184 return;
185}
186
187static int hpet_sigio_count;
188
189static void
190hpet_sigio(int val)
191{
192 fprintf(stderr, "hpet_sigio: called\n");
193 hpet_sigio_count++;
194}
195
196void
197hpet_fasync(int argc, const char **argv)
198{
199 unsigned long freq;
200 int iterations, i, fd, value;
201 sig_t oldsig;
202 struct hpet_info info;
203
204 hpet_sigio_count = 0;
205 fd = -1;
206
207 if ((oldsig = signal(SIGIO, hpet_sigio)) == SIG_ERR) {
208 fprintf(stderr, "hpet_fasync: failed to set signal handler\n");
209 return;
210 }
211
212 if (argc != 3) {
213 fprintf(stderr, "hpet_fasync: device-name freq iterations\n");
214 goto out;
215 }
216
217 fd = open(argv[0], O_RDONLY);
218
219 if (fd < 0) {
220 fprintf(stderr, "hpet_fasync: failed to open %s\n", argv[0]);
221 return;
222 }
223
224
225 if ((fcntl(fd, F_SETOWN, getpid()) == 1) ||
226 ((value = fcntl(fd, F_GETFL)) == 1) ||
227 (fcntl(fd, F_SETFL, value | O_ASYNC) == 1)) {
228 fprintf(stderr, "hpet_fasync: fcntl failed\n");
229 goto out;
230 }
231
232 freq = atoi(argv[1]);
233 iterations = atoi(argv[2]);
234
235 if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
236 fprintf(stderr, "hpet_fasync: HPET_IRQFREQ failed\n");
237 goto out;
238 }
239
240 if (ioctl(fd, HPET_INFO, &info) < 0) {
241 fprintf(stderr, "hpet_fasync: failed to get info\n");
242 goto out;
243 }
244
245 fprintf(stderr, "hpet_fasync: info.hi_flags 0x%lx\n", info.hi_flags);
246
247 if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
248 fprintf(stderr, "hpet_fasync: HPET_EPI failed\n");
249 goto out;
250 }
251
252 if (ioctl(fd, HPET_IE_ON, 0) < 0) {
253 fprintf(stderr, "hpet_fasync, HPET_IE_ON failed\n");
254 goto out;
255 }
256
257 for (i = 0; i < iterations; i++) {
258 (void) pause();
259 fprintf(stderr, "hpet_fasync: count = %d\n", hpet_sigio_count);
260 }
261
262out:
263 signal(SIGIO, oldsig);
264
265 if (fd >= 0)
266 close(fd);
267
268 return;
269}
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 02ac6ed38b2d..778ddf38b82c 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -90,7 +90,8 @@ In order to facilitate early boot debugging, use boot option:
90 90
91 trace_event=[event-list] 91 trace_event=[event-list]
92 92
93The format of this boot option is the same as described in section 2.1. 93event-list is a comma separated list of events. See section 2.1 for event
94format.
94 95
953. Defining an event-enabled tracepoint 963. Defining an event-enabled tracepoint
96======================================= 97=======================================
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index bab3040da548..557c1edeccaf 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -155,6 +155,9 @@ of ftrace. Here is a list of some of the key files:
155 to be traced. Echoing names of functions into this file 155 to be traced. Echoing names of functions into this file
156 will limit the trace to only those functions. 156 will limit the trace to only those functions.
157 157
158 This interface also allows for commands to be used. See the
159 "Filter commands" section for more details.
160
158 set_ftrace_notrace: 161 set_ftrace_notrace:
159 162
160 This has an effect opposite to that of 163 This has an effect opposite to that of
@@ -1337,12 +1340,14 @@ ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
1337can either use the sysctl function or set it via the proc system 1340can either use the sysctl function or set it via the proc system
1338interface. 1341interface.
1339 1342
1340 sysctl kernel.ftrace_dump_on_oops=1 1343 sysctl kernel.ftrace_dump_on_oops=n
1341 1344
1342or 1345or
1343 1346
1344 echo 1 > /proc/sys/kernel/ftrace_dump_on_oops 1347 echo n > /proc/sys/kernel/ftrace_dump_on_oops
1345 1348
1349If n = 1, ftrace will dump buffers of all CPUs, if n = 2 ftrace will
1350only dump the buffer of the CPU that triggered the oops.
1346 1351
1347Here's an example of such a dump after a null pointer 1352Here's an example of such a dump after a null pointer
1348dereference in a kernel module: 1353dereference in a kernel module:
@@ -1588,7 +1593,7 @@ module author does not need to worry about it.
1588 1593
1589When tracing is enabled, kstop_machine is called to prevent 1594When tracing is enabled, kstop_machine is called to prevent
1590races with the CPUS executing code being modified (which can 1595races with the CPUS executing code being modified (which can
1591cause the CPU to do undesireable things), and the nops are 1596cause the CPU to do undesirable things), and the nops are
1592patched back to calls. But this time, they do not call mcount 1597patched back to calls. But this time, they do not call mcount
1593(which is just a function stub). They now call into the ftrace 1598(which is just a function stub). They now call into the ftrace
1594infrastructure. 1599infrastructure.
@@ -1822,6 +1827,47 @@ this special filter via:
1822 echo > set_graph_function 1827 echo > set_graph_function
1823 1828
1824 1829
1830Filter commands
1831---------------
1832
1833A few commands are supported by the set_ftrace_filter interface.
1834Trace commands have the following format:
1835
1836<function>:<command>:<parameter>
1837
1838The following commands are supported:
1839
1840- mod
1841 This command enables function filtering per module. The
1842 parameter defines the module. For example, if only the write*
1843 functions in the ext3 module are desired, run:
1844
1845 echo 'write*:mod:ext3' > set_ftrace_filter
1846
1847 This command interacts with the filter in the same way as
1848 filtering based on function names. Thus, adding more functions
1849 in a different module is accomplished by appending (>>) to the
1850 filter file. Remove specific module functions by prepending
1851 '!':
1852
1853 echo '!writeback*:mod:ext3' >> set_ftrace_filter
1854
1855- traceon/traceoff
1856 These commands turn tracing on and off when the specified
1857 functions are hit. The parameter determines how many times the
1858 tracing system is turned on and off. If unspecified, there is
1859 no limit. For example, to disable tracing when a schedule bug
1860 is hit the first 5 times, run:
1861
1862 echo '__schedule_bug:traceoff:5' > set_ftrace_filter
1863
1864 These commands are cumulative whether or not they are appended
1865 to set_ftrace_filter. To remove a command, prepend it by '!'
1866 and drop the parameter:
1867
1868 echo '!__schedule_bug:traceoff' > set_ftrace_filter
1869
1870
1825trace_pipe 1871trace_pipe
1826---------- 1872----------
1827 1873
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index a9100b28eb84..ec94748ae65b 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -40,7 +40,9 @@ Synopsis of kprobe_events
40 $stack : Fetch stack address. 40 $stack : Fetch stack address.
41 $retval : Fetch return value.(*) 41 $retval : Fetch return value.(*)
42 +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**) 42 +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
43 NAME=FETCHARG: Set NAME as the argument name of FETCHARG. 43 NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
44 FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
45 (u8/u16/u32/u64/s8/s16/s32/s64) are supported.
44 46
45 (*) only for return probe. 47 (*) only for return probe.
46 (**) this is useful for fetching a field of data structures. 48 (**) this is useful for fetching a field of data structures.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index e57d6a9dd32b..dca82d7c83d8 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -4,23 +4,35 @@ active_mm.txt
4 - An explanation from Linus about tsk->active_mm vs tsk->mm. 4 - An explanation from Linus about tsk->active_mm vs tsk->mm.
5balance 5balance
6 - various information on memory balancing. 6 - various information on memory balancing.
7hugepage-mmap.c
8 - Example app using huge page memory with the mmap system call.
9hugepage-shm.c
10 - Example app using huge page memory with Sys V shared memory system calls.
7hugetlbpage.txt 11hugetlbpage.txt
8 - a brief summary of hugetlbpage support in the Linux kernel. 12 - a brief summary of hugetlbpage support in the Linux kernel.
13hwpoison.txt
14 - explains what hwpoison is
9ksm.txt 15ksm.txt
10 - how to use the Kernel Samepage Merging feature. 16 - how to use the Kernel Samepage Merging feature.
11locking 17locking
12 - info on how locking and synchronization is done in the Linux vm code. 18 - info on how locking and synchronization is done in the Linux vm code.
19map_hugetlb.c
20 - an example program that uses the MAP_HUGETLB mmap flag.
13numa 21numa
14 - information about NUMA specific code in the Linux vm. 22 - information about NUMA specific code in the Linux vm.
15numa_memory_policy.txt 23numa_memory_policy.txt
16 - documentation of concepts and APIs of the 2.6 memory policy support. 24 - documentation of concepts and APIs of the 2.6 memory policy support.
17overcommit-accounting 25overcommit-accounting
18 - description of the Linux kernels overcommit handling modes. 26 - description of the Linux kernels overcommit handling modes.
27page-types.c
28 - Tool for querying page flags
19page_migration 29page_migration
20 - description of page migration in NUMA systems. 30 - description of page migration in NUMA systems.
31pagemap.txt
32 - pagemap, from the userspace perspective
21slabinfo.c 33slabinfo.c
22 - source code for a tool to get reports about slabs. 34 - source code for a tool to get reports about slabs.
23slub.txt 35slub.txt
24 - a short users guide for SLUB. 36 - a short users guide for SLUB.
25map_hugetlb.c 37unevictable-lru.txt
26 - an example program that uses the MAP_HUGETLB mmap flag. 38 - Unevictable LRU infrastructure
diff --git a/Documentation/vm/Makefile b/Documentation/vm/Makefile
index 5bd269b3731a..9dcff328b964 100644
--- a/Documentation/vm/Makefile
+++ b/Documentation/vm/Makefile
@@ -2,7 +2,7 @@
2obj- := dummy.o 2obj- := dummy.o
3 3
4# List of programs to build 4# List of programs to build
5hostprogs-y := slabinfo page-types 5hostprogs-y := slabinfo page-types hugepage-mmap hugepage-shm map_hugetlb
6 6
7# Tell kbuild to always build the programs 7# Tell kbuild to always build the programs
8always := $(hostprogs-y) 8always := $(hostprogs-y)
diff --git a/Documentation/vm/hugepage-mmap.c b/Documentation/vm/hugepage-mmap.c
new file mode 100644
index 000000000000..db0dd9a33d54
--- /dev/null
+++ b/Documentation/vm/hugepage-mmap.c
@@ -0,0 +1,91 @@
1/*
2 * hugepage-mmap:
3 *
4 * Example of using huge page memory in a user application using the mmap
5 * system call. Before running this application, make sure that the
6 * administrator has mounted the hugetlbfs filesystem (on some directory
7 * like /mnt) using the command mount -t hugetlbfs nodev /mnt. In this
8 * example, the app is requesting memory of size 256MB that is backed by
9 * huge pages.
10 *
11 * For the ia64 architecture, the Linux kernel reserves Region number 4 for
12 * huge pages. That means that if one requires a fixed address, a huge page
13 * aligned address starting with 0x800000... will be required. If a fixed
14 * address is not required, the kernel will select an address in the proper
15 * range.
16 * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
17 */
18
19#include <stdlib.h>
20#include <stdio.h>
21#include <unistd.h>
22#include <sys/mman.h>
23#include <fcntl.h>
24
25#define FILE_NAME "/mnt/hugepagefile"
26#define LENGTH (256UL*1024*1024)
27#define PROTECTION (PROT_READ | PROT_WRITE)
28
29/* Only ia64 requires this */
30#ifdef __ia64__
31#define ADDR (void *)(0x8000000000000000UL)
32#define FLAGS (MAP_SHARED | MAP_FIXED)
33#else
34#define ADDR (void *)(0x0UL)
35#define FLAGS (MAP_SHARED)
36#endif
37
38static void check_bytes(char *addr)
39{
40 printf("First hex is %x\n", *((unsigned int *)addr));
41}
42
43static void write_bytes(char *addr)
44{
45 unsigned long i;
46
47 for (i = 0; i < LENGTH; i++)
48 *(addr + i) = (char)i;
49}
50
51static void read_bytes(char *addr)
52{
53 unsigned long i;
54
55 check_bytes(addr);
56 for (i = 0; i < LENGTH; i++)
57 if (*(addr + i) != (char)i) {
58 printf("Mismatch at %lu\n", i);
59 break;
60 }
61}
62
63int main(void)
64{
65 void *addr;
66 int fd;
67
68 fd = open(FILE_NAME, O_CREAT | O_RDWR, 0755);
69 if (fd < 0) {
70 perror("Open failed");
71 exit(1);
72 }
73
74 addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, fd, 0);
75 if (addr == MAP_FAILED) {
76 perror("mmap");
77 unlink(FILE_NAME);
78 exit(1);
79 }
80
81 printf("Returned address is %p\n", addr);
82 check_bytes(addr);
83 write_bytes(addr);
84 read_bytes(addr);
85
86 munmap(addr, LENGTH);
87 close(fd);
88 unlink(FILE_NAME);
89
90 return 0;
91}
diff --git a/Documentation/vm/hugepage-shm.c b/Documentation/vm/hugepage-shm.c
new file mode 100644
index 000000000000..07956d8592c9
--- /dev/null
+++ b/Documentation/vm/hugepage-shm.c
@@ -0,0 +1,98 @@
1/*
2 * hugepage-shm:
3 *
4 * Example of using huge page memory in a user application using Sys V shared
5 * memory system calls. In this example the app is requesting 256MB of
6 * memory that is backed by huge pages. The application uses the flag
7 * SHM_HUGETLB in the shmget system call to inform the kernel that it is
8 * requesting huge pages.
9 *
10 * For the ia64 architecture, the Linux kernel reserves Region number 4 for
11 * huge pages. That means that if one requires a fixed address, a huge page
12 * aligned address starting with 0x800000... will be required. If a fixed
13 * address is not required, the kernel will select an address in the proper
14 * range.
15 * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
16 *
17 * Note: The default shared memory limit is quite low on many kernels,
18 * you may need to increase it via:
19 *
20 * echo 268435456 > /proc/sys/kernel/shmmax
21 *
22 * This will increase the maximum size per shared memory segment to 256MB.
23 * The other limit that you will hit eventually is shmall which is the
24 * total amount of shared memory in pages. To set it to 16GB on a system
25 * with a 4kB pagesize do:
26 *
27 * echo 4194304 > /proc/sys/kernel/shmall
28 */
29
30#include <stdlib.h>
31#include <stdio.h>
32#include <sys/types.h>
33#include <sys/ipc.h>
34#include <sys/shm.h>
35#include <sys/mman.h>
36
37#ifndef SHM_HUGETLB
38#define SHM_HUGETLB 04000
39#endif
40
41#define LENGTH (256UL*1024*1024)
42
43#define dprintf(x) printf(x)
44
45/* Only ia64 requires this */
46#ifdef __ia64__
47#define ADDR (void *)(0x8000000000000000UL)
48#define SHMAT_FLAGS (SHM_RND)
49#else
50#define ADDR (void *)(0x0UL)
51#define SHMAT_FLAGS (0)
52#endif
53
54int main(void)
55{
56 int shmid;
57 unsigned long i;
58 char *shmaddr;
59
60 if ((shmid = shmget(2, LENGTH,
61 SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) {
62 perror("shmget");
63 exit(1);
64 }
65 printf("shmid: 0x%x\n", shmid);
66
67 shmaddr = shmat(shmid, ADDR, SHMAT_FLAGS);
68 if (shmaddr == (char *)-1) {
69 perror("Shared memory attach failure");
70 shmctl(shmid, IPC_RMID, NULL);
71 exit(2);
72 }
73 printf("shmaddr: %p\n", shmaddr);
74
75 dprintf("Starting the writes:\n");
76 for (i = 0; i < LENGTH; i++) {
77 shmaddr[i] = (char)(i);
78 if (!(i % (1024 * 1024)))
79 dprintf(".");
80 }
81 dprintf("\n");
82
83 dprintf("Starting the Check...");
84 for (i = 0; i < LENGTH; i++)
85 if (shmaddr[i] != (char)i)
86 printf("\nIndex %lu mismatched\n", i);
87 dprintf("Done.\n");
88
89 if (shmdt((const void *)shmaddr) != 0) {
90 perror("Detach failure");
91 shmctl(shmid, IPC_RMID, NULL);
92 exit(3);
93 }
94
95 shmctl(shmid, IPC_RMID, NULL);
96
97 return 0;
98}
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index bc31636973e3..457634c1e03e 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -299,176 +299,11 @@ map_hugetlb.c.
299******************************************************************* 299*******************************************************************
300 300
301/* 301/*
302 * Example of using huge page memory in a user application using Sys V shared 302 * hugepage-shm: see Documentation/vm/hugepage-shm.c
303 * memory system calls. In this example the app is requesting 256MB of
304 * memory that is backed by huge pages. The application uses the flag
305 * SHM_HUGETLB in the shmget system call to inform the kernel that it is
306 * requesting huge pages.
307 *
308 * For the ia64 architecture, the Linux kernel reserves Region number 4 for
309 * huge pages. That means that if one requires a fixed address, a huge page
310 * aligned address starting with 0x800000... will be required. If a fixed
311 * address is not required, the kernel will select an address in the proper
312 * range.
313 * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
314 *
315 * Note: The default shared memory limit is quite low on many kernels,
316 * you may need to increase it via:
317 *
318 * echo 268435456 > /proc/sys/kernel/shmmax
319 *
320 * This will increase the maximum size per shared memory segment to 256MB.
321 * The other limit that you will hit eventually is shmall which is the
322 * total amount of shared memory in pages. To set it to 16GB on a system
323 * with a 4kB pagesize do:
324 *
325 * echo 4194304 > /proc/sys/kernel/shmall
326 */ 303 */
327#include <stdlib.h>
328#include <stdio.h>
329#include <sys/types.h>
330#include <sys/ipc.h>
331#include <sys/shm.h>
332#include <sys/mman.h>
333
334#ifndef SHM_HUGETLB
335#define SHM_HUGETLB 04000
336#endif
337
338#define LENGTH (256UL*1024*1024)
339
340#define dprintf(x) printf(x)
341
342#define ADDR (void *)(0x0UL) /* let kernel choose address */
343#define SHMAT_FLAGS (0)
344
345int main(void)
346{
347 int shmid;
348 unsigned long i;
349 char *shmaddr;
350
351 if ((shmid = shmget(2, LENGTH,
352 SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) {
353 perror("shmget");
354 exit(1);
355 }
356 printf("shmid: 0x%x\n", shmid);
357
358 shmaddr = shmat(shmid, ADDR, SHMAT_FLAGS);
359 if (shmaddr == (char *)-1) {
360 perror("Shared memory attach failure");
361 shmctl(shmid, IPC_RMID, NULL);
362 exit(2);
363 }
364 printf("shmaddr: %p\n", shmaddr);
365
366 dprintf("Starting the writes:\n");
367 for (i = 0; i < LENGTH; i++) {
368 shmaddr[i] = (char)(i);
369 if (!(i % (1024 * 1024)))
370 dprintf(".");
371 }
372 dprintf("\n");
373
374 dprintf("Starting the Check...");
375 for (i = 0; i < LENGTH; i++)
376 if (shmaddr[i] != (char)i)
377 printf("\nIndex %lu mismatched\n", i);
378 dprintf("Done.\n");
379
380 if (shmdt((const void *)shmaddr) != 0) {
381 perror("Detach failure");
382 shmctl(shmid, IPC_RMID, NULL);
383 exit(3);
384 }
385
386 shmctl(shmid, IPC_RMID, NULL);
387
388 return 0;
389}
390 304
391******************************************************************* 305*******************************************************************
392 306
393/* 307/*
394 * Example of using huge page memory in a user application using the mmap 308 * hugepage-mmap: see Documentation/vm/hugepage-mmap.c
395 * system call. Before running this application, make sure that the
396 * administrator has mounted the hugetlbfs filesystem (on some directory
397 * like /mnt) using the command mount -t hugetlbfs nodev /mnt. In this
398 * example, the app is requesting memory of size 256MB that is backed by
399 * huge pages.
400 *
401 * For the ia64 architecture, the Linux kernel reserves Region number 4 for
402 * huge pages. That means that if one requires a fixed address, a huge page
403 * aligned address starting with 0x800000... will be required. If a fixed
404 * address is not required, the kernel will select an address in the proper
405 * range.
406 * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
407 */ 309 */
408#include <stdlib.h>
409#include <stdio.h>
410#include <unistd.h>
411#include <sys/mman.h>
412#include <fcntl.h>
413
414#define FILE_NAME "/mnt/hugepagefile"
415#define LENGTH (256UL*1024*1024)
416#define PROTECTION (PROT_READ | PROT_WRITE)
417
418#define ADDR (void *)(0x0UL) /* let kernel choose address */
419#define FLAGS (MAP_SHARED)
420
421void check_bytes(char *addr)
422{
423 printf("First hex is %x\n", *((unsigned int *)addr));
424}
425
426void write_bytes(char *addr)
427{
428 unsigned long i;
429
430 for (i = 0; i < LENGTH; i++)
431 *(addr + i) = (char)i;
432}
433
434void read_bytes(char *addr)
435{
436 unsigned long i;
437
438 check_bytes(addr);
439 for (i = 0; i < LENGTH; i++)
440 if (*(addr + i) != (char)i) {
441 printf("Mismatch at %lu\n", i);
442 break;
443 }
444}
445
446int main(void)
447{
448 void *addr;
449 int fd;
450
451 fd = open(FILE_NAME, O_CREAT | O_RDWR, 0755);
452 if (fd < 0) {
453 perror("Open failed");
454 exit(1);
455 }
456
457 addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, fd, 0);
458 if (addr == MAP_FAILED) {
459 perror("mmap");
460 unlink(FILE_NAME);
461 exit(1);
462 }
463
464 printf("Returned address is %p\n", addr);
465 check_bytes(addr);
466 write_bytes(addr);
467 read_bytes(addr);
468
469 munmap(addr, LENGTH);
470 close(fd);
471 unlink(FILE_NAME);
472
473 return 0;
474}
diff --git a/Documentation/vm/map_hugetlb.c b/Documentation/vm/map_hugetlb.c
index e2bdae37f499..9969c7d9f985 100644
--- a/Documentation/vm/map_hugetlb.c
+++ b/Documentation/vm/map_hugetlb.c
@@ -31,12 +31,12 @@
31#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB) 31#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
32#endif 32#endif
33 33
34void check_bytes(char *addr) 34static void check_bytes(char *addr)
35{ 35{
36 printf("First hex is %x\n", *((unsigned int *)addr)); 36 printf("First hex is %x\n", *((unsigned int *)addr));
37} 37}
38 38
39void write_bytes(char *addr) 39static void write_bytes(char *addr)
40{ 40{
41 unsigned long i; 41 unsigned long i;
42 42
@@ -44,7 +44,7 @@ void write_bytes(char *addr)
44 *(addr + i) = (char)i; 44 *(addr + i) = (char)i;
45} 45}
46 46
47void read_bytes(char *addr) 47static void read_bytes(char *addr)
48{ 48{
49 unsigned long i; 49 unsigned long i;
50 50
diff --git a/Documentation/volatile-considered-harmful.txt b/Documentation/volatile-considered-harmful.txt
index 991c26a6ef64..db0cb228d64a 100644
--- a/Documentation/volatile-considered-harmful.txt
+++ b/Documentation/volatile-considered-harmful.txt
@@ -63,9 +63,9 @@ way to perform a busy wait is:
63 cpu_relax(); 63 cpu_relax();
64 64
65The cpu_relax() call can lower CPU power consumption or yield to a 65The cpu_relax() call can lower CPU power consumption or yield to a
66hyperthreaded twin processor; it also happens to serve as a memory barrier, 66hyperthreaded twin processor; it also happens to serve as a compiler
67so, once again, volatile is unnecessary. Of course, busy-waiting is 67barrier, so, once again, volatile is unnecessary. Of course, busy-
68generally an anti-social act to begin with. 68waiting is generally an anti-social act to begin with.
69 69
70There are still a few rare situations where volatile makes sense in the 70There are still a few rare situations where volatile makes sense in the
71kernel: 71kernel:
diff --git a/Documentation/voyager.txt b/Documentation/voyager.txt
deleted file mode 100644
index 2749af552cdf..000000000000
--- a/Documentation/voyager.txt
+++ /dev/null
@@ -1,95 +0,0 @@
1Running Linux on the Voyager Architecture
2=========================================
3
4For full details and current project status, see
5
6http://www.hansenpartnership.com/voyager
7
8The voyager architecture was designed by NCR in the mid 80s to be a
9fully SMP capable RAS computing architecture built around intel's 486
10chip set. The voyager came in three levels of architectural
11sophistication: 3,4 and 5 --- 1 and 2 never made it out of prototype.
12The linux patches support only the Level 5 voyager architecture (any
13machine class 3435 and above).
14
15The Voyager Architecture
16------------------------
17
18Voyager machines consist of a Baseboard with a 386 diagnostic
19processor, a Power Supply Interface (PSI) a Primary and possibly
20Secondary Microchannel bus and between 2 and 20 voyager slots. The
21voyager slots can be populated with memory and cpu cards (up to 4GB
22memory and from 1 486 to 32 Pentium Pro processors). Internally, the
23voyager has a dual arbitrated system bus and a configuration and test
24bus (CAT). The voyager bus speed is 40MHz. Therefore (since all
25voyager cards are dual ported for each system bus) the maximum
26transfer rate is 320Mb/s but only if you have your slot configuration
27tuned (only memory cards can communicate with both busses at once, CPU
28cards utilise them one at a time).
29
30Voyager SMP
31-----------
32
33Since voyager was the first intel based SMP system, it is slightly
34more primitive than the Intel IO-APIC approach to SMP. Voyager allows
35arbitrary interrupt routing (including processor affinity routing) of
36all 16 PC type interrupts. However it does this by using a modified
375259 master/slave chip set instead of an APIC bus. Additionally,
38voyager supports Cross Processor Interrupts (CPI) equivalent to the
39APIC IPIs. There are two routed voyager interrupt lines provided to
40each slot.
41
42Processor Cards
43---------------
44
45These come in single, dyadic and quad configurations (the quads are
46problematic--see later). The maximum configuration is 8 quad cards
47for 32 way SMP.
48
49Quad Processors
50---------------
51
52Because voyager only supplies two interrupt lines to each Processor
53card, the Quad processors have to be configured (and Bootstrapped) in
54as a pair of Master/Slave processors.
55
56In fact, most Quad cards only accept one VIC interrupt line, so they
57have one interrupt handling processor (called the VIC extended
58processor) and three non-interrupt handling processors.
59
60Current Status
61--------------
62
63The System will boot on Mono, Dyad and Quad cards. There was
64originally a Quad boot problem which has been fixed by proper gdt
65alignment in the initial boot loader. If you still cannot get your
66voyager system to boot, email me at:
67
68<J.E.J.Bottomley@HansenPartnership.com>
69
70
71The Quad cards now support using the separate Quad CPI vectors instead
72of going through the VIC mailbox system.
73
74The Level 4 architecture (3430 and 3360 Machines) should also work
75fine.
76
77Dump Switch
78-----------
79
80The voyager dump switch sends out a broadcast NMI which the voyager
81code intercepts and does a task dump.
82
83Power Switch
84------------
85
86The front panel power switch is intercepted by the kernel and should
87cause a system shutdown and power off.
88
89A Note About Mixed CPU Systems
90------------------------------
91
92Linux isn't designed to handle mixed CPU systems very well. In order
93to get everything going you *must* make sure that your lowest
94capability CPU is used for booting. Also, mixing CPU classes
95(e.g. 486 and 586) is really not going to work very well at all.
diff --git a/Documentation/watchdog/src/watchdog-simple.c b/Documentation/watchdog/src/watchdog-simple.c
index 4cf72f3fa8e9..ba45803a2216 100644
--- a/Documentation/watchdog/src/watchdog-simple.c
+++ b/Documentation/watchdog/src/watchdog-simple.c
@@ -17,9 +17,6 @@ int main(void)
17 ret = -1; 17 ret = -1;
18 break; 18 break;
19 } 19 }
20 ret = fsync(fd);
21 if (ret)
22 break;
23 sleep(10); 20 sleep(10);
24 } 21 }
25 close(fd); 22 close(fd);
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index a750532ffcf8..63fdc34ceb98 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -31,6 +31,8 @@ static void keep_alive(void)
31 */ 31 */
32int main(int argc, char *argv[]) 32int main(int argc, char *argv[])
33{ 33{
34 int flags;
35
34 fd = open("/dev/watchdog", O_WRONLY); 36 fd = open("/dev/watchdog", O_WRONLY);
35 37
36 if (fd == -1) { 38 if (fd == -1) {
@@ -41,12 +43,14 @@ int main(int argc, char *argv[])
41 43
42 if (argc > 1) { 44 if (argc > 1) {
43 if (!strncasecmp(argv[1], "-d", 2)) { 45 if (!strncasecmp(argv[1], "-d", 2)) {
44 ioctl(fd, WDIOC_SETOPTIONS, WDIOS_DISABLECARD); 46 flags = WDIOS_DISABLECARD;
47 ioctl(fd, WDIOC_SETOPTIONS, &flags);
45 fprintf(stderr, "Watchdog card disabled.\n"); 48 fprintf(stderr, "Watchdog card disabled.\n");
46 fflush(stderr); 49 fflush(stderr);
47 exit(0); 50 exit(0);
48 } else if (!strncasecmp(argv[1], "-e", 2)) { 51 } else if (!strncasecmp(argv[1], "-e", 2)) {
49 ioctl(fd, WDIOC_SETOPTIONS, WDIOS_ENABLECARD); 52 flags = WDIOS_ENABLECARD;
53 ioctl(fd, WDIOC_SETOPTIONS, &flags);
50 fprintf(stderr, "Watchdog card enabled.\n"); 54 fprintf(stderr, "Watchdog card enabled.\n");
51 fflush(stderr); 55 fflush(stderr);
52 exit(0); 56 exit(0);
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index 4cc4ba9d7150..eb7132ed8bbc 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -222,11 +222,10 @@ returned value is the temperature in degrees fahrenheit.
222 ioctl(fd, WDIOC_GETTEMP, &temperature); 222 ioctl(fd, WDIOC_GETTEMP, &temperature);
223 223
224Finally the SETOPTIONS ioctl can be used to control some aspects of 224Finally the SETOPTIONS ioctl can be used to control some aspects of
225the cards operation; right now the pcwd driver is the only one 225the cards operation.
226supporting this ioctl.
227 226
228 int options = 0; 227 int options = 0;
229 ioctl(fd, WDIOC_SETOPTIONS, options); 228 ioctl(fd, WDIOC_SETOPTIONS, &options);
230 229
231The following options are available: 230The following options are available:
232 231