diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-07-26 18:04:59 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-07-26 18:04:59 -0400 |
commit | d9ecdb282c91952796b7542c4f57fd6de6948d7b (patch) | |
tree | fd4de7923968afa7d2981fb037e2255fc2cfa1e1 /include | |
parent | 4ef584ba84125b67c17b5aded38e7783cd8cdef0 (diff) | |
parent | 1d1f8b377c48e5aeddaea52eba74cc0539f088cd (diff) |
Merge branch 'for_rmk_13' of git://git.mnementh.co.uk/linux-2.6-im
Diffstat (limited to 'include')
79 files changed, 1288 insertions, 361 deletions
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h index db351d1296f4..a5801ae02e4b 100644 --- a/include/asm-alpha/dma-mapping.h +++ b/include/asm-alpha/dma-mapping.h | |||
@@ -24,8 +24,8 @@ | |||
24 | pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) | 24 | pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) |
25 | #define dma_supported(dev, mask) \ | 25 | #define dma_supported(dev, mask) \ |
26 | pci_dma_supported(alpha_gendev_to_pci(dev), mask) | 26 | pci_dma_supported(alpha_gendev_to_pci(dev), mask) |
27 | #define dma_mapping_error(addr) \ | 27 | #define dma_mapping_error(dev, addr) \ |
28 | pci_dma_mapping_error(addr) | 28 | pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr) |
29 | 29 | ||
30 | #else /* no PCI - no IOMMU. */ | 30 | #else /* no PCI - no IOMMU. */ |
31 | 31 | ||
@@ -45,7 +45,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
45 | #define dma_unmap_page(dev, addr, size, dir) ((void)0) | 45 | #define dma_unmap_page(dev, addr, size, dir) ((void)0) |
46 | #define dma_unmap_sg(dev, sg, nents, dir) ((void)0) | 46 | #define dma_unmap_sg(dev, sg, nents, dir) ((void)0) |
47 | 47 | ||
48 | #define dma_mapping_error(addr) (0) | 48 | #define dma_mapping_error(dev, addr) (0) |
49 | 49 | ||
50 | #endif /* !CONFIG_PCI */ | 50 | #endif /* !CONFIG_PCI */ |
51 | 51 | ||
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h index d31fd49ff79a..2a14302c17a3 100644 --- a/include/asm-alpha/pci.h +++ b/include/asm-alpha/pci.h | |||
@@ -106,7 +106,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, | |||
106 | /* Test for pci_map_single or pci_map_page having generated an error. */ | 106 | /* Test for pci_map_single or pci_map_page having generated an error. */ |
107 | 107 | ||
108 | static inline int | 108 | static inline int |
109 | pci_dma_mapping_error(dma_addr_t dma_addr) | 109 | pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) |
110 | { | 110 | { |
111 | return dma_addr == 0; | 111 | return dma_addr == 0; |
112 | } | 112 | } |
diff --git a/include/asm-arm/arch-pxa/pxa25x-udc.h b/include/asm-arm/arch-pxa/pxa25x-udc.h index 840305916b6d..1b80a4805a60 100644 --- a/include/asm-arm/arch-pxa/pxa25x-udc.h +++ b/include/asm-arm/arch-pxa/pxa25x-udc.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ASM_ARCH_PXA25X_UDC_H | 2 | #define _ASM_ARCH_PXA25X_UDC_H |
3 | 3 | ||
4 | #ifdef _ASM_ARCH_PXA27X_UDC_H | 4 | #ifdef _ASM_ARCH_PXA27X_UDC_H |
5 | #error You can't include both PXA25x and PXA27x UDC support | 5 | #error "You can't include both PXA25x and PXA27x UDC support" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | #define UDC_RES1 __REG(0x40600004) /* UDC Undocumented - Reserved1 */ | 8 | #define UDC_RES1 __REG(0x40600004) /* UDC Undocumented - Reserved1 */ |
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 70b0fe724b62..03cf1ee977b7 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h | |||
@@ -424,9 +424,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma, | |||
424 | } | 424 | } |
425 | 425 | ||
426 | #define flush_dcache_mmap_lock(mapping) \ | 426 | #define flush_dcache_mmap_lock(mapping) \ |
427 | write_lock_irq(&(mapping)->tree_lock) | 427 | spin_lock_irq(&(mapping)->tree_lock) |
428 | #define flush_dcache_mmap_unlock(mapping) \ | 428 | #define flush_dcache_mmap_unlock(mapping) \ |
429 | write_unlock_irq(&(mapping)->tree_lock) | 429 | spin_unlock_irq(&(mapping)->tree_lock) |
430 | 430 | ||
431 | #define flush_icache_user_range(vma,page,addr,len) \ | 431 | #define flush_icache_user_range(vma,page,addr,len) \ |
432 | flush_dcache_page(page) | 432 | flush_dcache_page(page) |
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index e99406a7bece..f41335ba6337 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h | |||
@@ -56,7 +56,7 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) | |||
56 | /* | 56 | /* |
57 | * DMA errors are defined by all-bits-set in the DMA address. | 57 | * DMA errors are defined by all-bits-set in the DMA address. |
58 | */ | 58 | */ |
59 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 59 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
60 | { | 60 | { |
61 | return dma_addr == ~0; | 61 | return dma_addr == ~0; |
62 | } | 62 | } |
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h index 57dc672bab8e..0399359ab5d8 100644 --- a/include/asm-avr32/dma-mapping.h +++ b/include/asm-avr32/dma-mapping.h | |||
@@ -35,7 +35,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |||
35 | /* | 35 | /* |
36 | * dma_map_single can't fail as it is implemented now. | 36 | * dma_map_single can't fail as it is implemented now. |
37 | */ | 37 | */ |
38 | static inline int dma_mapping_error(dma_addr_t addr) | 38 | static inline int dma_mapping_error(struct device *dev, dma_addr_t addr) |
39 | { | 39 | { |
40 | return 0; | 40 | return 0; |
41 | } | 41 | } |
diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index 76033831eb35..320aa5e167e9 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h | |||
@@ -92,16 +92,20 @@ extern void *l1_data_B_sram_alloc(size_t); | |||
92 | extern void *l1_inst_sram_alloc(size_t); | 92 | extern void *l1_inst_sram_alloc(size_t); |
93 | extern void *l1_data_sram_alloc(size_t); | 93 | extern void *l1_data_sram_alloc(size_t); |
94 | extern void *l1_data_sram_zalloc(size_t); | 94 | extern void *l1_data_sram_zalloc(size_t); |
95 | extern void *l2_sram_alloc(size_t); | ||
96 | extern void *l2_sram_zalloc(size_t); | ||
95 | extern int l1_data_A_sram_free(const void*); | 97 | extern int l1_data_A_sram_free(const void*); |
96 | extern int l1_data_B_sram_free(const void*); | 98 | extern int l1_data_B_sram_free(const void*); |
97 | extern int l1_inst_sram_free(const void*); | 99 | extern int l1_inst_sram_free(const void*); |
98 | extern int l1_data_sram_free(const void*); | 100 | extern int l1_data_sram_free(const void*); |
101 | extern int l2_sram_free(const void *); | ||
99 | extern int sram_free(const void*); | 102 | extern int sram_free(const void*); |
100 | 103 | ||
101 | #define L1_INST_SRAM 0x00000001 | 104 | #define L1_INST_SRAM 0x00000001 |
102 | #define L1_DATA_A_SRAM 0x00000002 | 105 | #define L1_DATA_A_SRAM 0x00000002 |
103 | #define L1_DATA_B_SRAM 0x00000004 | 106 | #define L1_DATA_B_SRAM 0x00000004 |
104 | #define L1_DATA_SRAM 0x00000006 | 107 | #define L1_DATA_SRAM 0x00000006 |
108 | #define L2_SRAM 0x00000008 | ||
105 | extern void *sram_alloc_with_lsl(size_t, unsigned long); | 109 | extern void *sram_alloc_with_lsl(size_t, unsigned long); |
106 | extern int sram_free_with_lsl(const void*); | 110 | extern int sram_free_with_lsl(const void*); |
107 | 111 | ||
@@ -114,7 +118,9 @@ extern struct file_operations dpmc_fops; | |||
114 | extern unsigned long _ramstart, _ramend, _rambase; | 118 | extern unsigned long _ramstart, _ramend, _rambase; |
115 | extern unsigned long memory_start, memory_end, physical_mem_end; | 119 | extern unsigned long memory_start, memory_end, physical_mem_end; |
116 | extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], | 120 | extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], |
117 | _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _ebss_b_l1[]; | 121 | _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _ebss_b_l1[], |
122 | _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], | ||
123 | _ebss_l2[], _l2_lma_start[]; | ||
118 | 124 | ||
119 | #ifdef CONFIG_MTD_UCLINUX | 125 | #ifdef CONFIG_MTD_UCLINUX |
120 | extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; | 126 | extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; |
diff --git a/include/asm-blackfin/dma.h b/include/asm-blackfin/dma.h index c0d5259e315b..3cd4b522aa3f 100644 --- a/include/asm-blackfin/dma.h +++ b/include/asm-blackfin/dma.h | |||
@@ -144,8 +144,16 @@ struct dma_channel { | |||
144 | void *data; | 144 | void *data; |
145 | unsigned int dma_enable_flag; | 145 | unsigned int dma_enable_flag; |
146 | unsigned int loopback_flag; | 146 | unsigned int loopback_flag; |
147 | #ifdef CONFIG_PM | ||
148 | unsigned short saved_peripheral_map; | ||
149 | #endif | ||
147 | }; | 150 | }; |
148 | 151 | ||
152 | #ifdef CONFIG_PM | ||
153 | int blackfin_dma_suspend(void); | ||
154 | void blackfin_dma_resume(void); | ||
155 | #endif | ||
156 | |||
149 | /******************************************************************************* | 157 | /******************************************************************************* |
150 | * DMA API's | 158 | * DMA API's |
151 | *******************************************************************************/ | 159 | *******************************************************************************/ |
diff --git a/include/asm-blackfin/dpmc.h b/include/asm-blackfin/dpmc.h index 7f34cd384f12..de28e6e018b3 100644 --- a/include/asm-blackfin/dpmc.h +++ b/include/asm-blackfin/dpmc.h | |||
@@ -7,63 +7,18 @@ | |||
7 | #ifndef _BLACKFIN_DPMC_H_ | 7 | #ifndef _BLACKFIN_DPMC_H_ |
8 | #define _BLACKFIN_DPMC_H_ | 8 | #define _BLACKFIN_DPMC_H_ |
9 | 9 | ||
10 | #define SLEEP_MODE 1 | ||
11 | #define DEEP_SLEEP_MODE 2 | ||
12 | #define ACTIVE_PLL_DISABLED 3 | ||
13 | #define FULLON_MODE 4 | ||
14 | #define ACTIVE_PLL_ENABLED 5 | ||
15 | #define HIBERNATE_MODE 6 | ||
16 | |||
17 | #define IOCTL_FULL_ON_MODE _IO('s', 0xA0) | ||
18 | #define IOCTL_ACTIVE_MODE _IO('s', 0xA1) | ||
19 | #define IOCTL_SLEEP_MODE _IO('s', 0xA2) | ||
20 | #define IOCTL_DEEP_SLEEP_MODE _IO('s', 0xA3) | ||
21 | #define IOCTL_HIBERNATE_MODE _IO('s', 0xA4) | ||
22 | #define IOCTL_CHANGE_FREQUENCY _IOW('s', 0xA5, unsigned long) | ||
23 | #define IOCTL_CHANGE_VOLTAGE _IOW('s', 0xA6, unsigned long) | ||
24 | #define IOCTL_SET_CCLK _IOW('s', 0xA7, unsigned long) | ||
25 | #define IOCTL_SET_SCLK _IOW('s', 0xA8, unsigned long) | ||
26 | #define IOCTL_GET_PLLSTATUS _IOW('s', 0xA9, unsigned long) | ||
27 | #define IOCTL_GET_CORECLOCK _IOW('s', 0xAA, unsigned long) | ||
28 | #define IOCTL_GET_SYSTEMCLOCK _IOW('s', 0xAB, unsigned long) | ||
29 | #define IOCTL_GET_VCO _IOW('s', 0xAC, unsigned long) | ||
30 | #define IOCTL_DISABLE_WDOG_TIMER _IO('s', 0xAD) | ||
31 | #define IOCTL_UNMASK_WDOG_WAKEUP_EVENT _IO('s',0xAE) | ||
32 | #define IOCTL_PROGRAM_WDOG_TIMER _IOW('s',0xAF,unsigned long) | ||
33 | #define IOCTL_CLEAR_WDOG_WAKEUP_EVENT _IO('s',0xB0) | ||
34 | #define IOCTL_SLEEP_DEEPER_MODE _IO('s',0xB1) | ||
35 | |||
36 | #define DPMC_MINOR 254 | ||
37 | |||
38 | #define ON 0 | ||
39 | #define OFF 1 | ||
40 | |||
41 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | #ifndef __ASSEMBLY__ | ||
42 | 12 | ||
43 | unsigned long calc_volt(void); | ||
44 | int calc_vlev(int vlt); | ||
45 | unsigned long change_voltage(unsigned long volt); | ||
46 | int calc_msel(int vco_hz); | ||
47 | unsigned long change_frequency(unsigned long vco_mhz); | ||
48 | int set_pll_div(unsigned short sel, unsigned char flag); | ||
49 | int get_vco(void); | ||
50 | unsigned long change_system_clock(unsigned long clock); | ||
51 | unsigned long change_core_clock(unsigned long clock); | ||
52 | unsigned long get_pll_status(void); | ||
53 | void change_baud(int baud); | ||
54 | void fullon_mode(void); | ||
55 | void active_mode(void); | ||
56 | void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); | 13 | void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); |
57 | void deep_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); | 14 | void deep_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); |
58 | void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); | 15 | void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); |
59 | void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); | 16 | void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); |
60 | void program_wdog_timer(unsigned long); | 17 | void do_hibernate(int wakeup); |
61 | void unmask_wdog_wakeup_evt(void); | 18 | void set_dram_srfs(void); |
62 | void clear_wdog_wakeup_evt(void); | 19 | void unset_dram_srfs(void); |
63 | void disable_wdog_timer(void); | ||
64 | 20 | ||
65 | extern unsigned long get_cclk(void); | 21 | #define VRPAIR(vlev, freq) (((vlev) << 16) | ((freq) >> 16)) |
66 | extern unsigned long get_sclk(void); | ||
67 | 22 | ||
68 | struct bfin_dpmc_platform_data { | 23 | struct bfin_dpmc_platform_data { |
69 | const unsigned int *tuple_tab; | 24 | const unsigned int *tuple_tab; |
@@ -71,8 +26,33 @@ struct bfin_dpmc_platform_data { | |||
71 | unsigned short vr_settling_time; /* in us */ | 26 | unsigned short vr_settling_time; /* in us */ |
72 | }; | 27 | }; |
73 | 28 | ||
74 | #define VRPAIR(vlev, freq) (((vlev) << 16) | ((freq) >> 16)) | 29 | #else |
30 | |||
31 | #define PM_PUSH(x) \ | ||
32 | R0 = [P0 + (x - SRAM_BASE_ADDRESS)];\ | ||
33 | [--SP] = R0;\ | ||
34 | |||
35 | #define PM_POP(x) \ | ||
36 | R0 = [SP++];\ | ||
37 | [P0 + (x - SRAM_BASE_ADDRESS)] = R0;\ | ||
38 | |||
39 | #define PM_SYS_PUSH(x) \ | ||
40 | R0 = [P0 + (x - PLL_CTL)];\ | ||
41 | [--SP] = R0;\ | ||
42 | |||
43 | #define PM_SYS_POP(x) \ | ||
44 | R0 = [SP++];\ | ||
45 | [P0 + (x - PLL_CTL)] = R0;\ | ||
46 | |||
47 | #define PM_SYS_PUSH16(x) \ | ||
48 | R0 = w[P0 + (x - PLL_CTL)];\ | ||
49 | [--SP] = R0;\ | ||
50 | |||
51 | #define PM_SYS_POP16(x) \ | ||
52 | R0 = [SP++];\ | ||
53 | w[P0 + (x - PLL_CTL)] = R0;\ | ||
75 | 54 | ||
55 | #endif | ||
76 | #endif /* __KERNEL__ */ | 56 | #endif /* __KERNEL__ */ |
77 | 57 | ||
78 | #endif /*_BLACKFIN_DPMC_H_*/ | 58 | #endif /*_BLACKFIN_DPMC_H_*/ |
diff --git a/include/asm-blackfin/elf.h b/include/asm-blackfin/elf.h index 30303fc8292c..67a03a8a353e 100644 --- a/include/asm-blackfin/elf.h +++ b/include/asm-blackfin/elf.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #define EF_BFIN_FDPIC 0x00000002 /* -mfdpic */ | 15 | #define EF_BFIN_FDPIC 0x00000002 /* -mfdpic */ |
16 | #define EF_BFIN_CODE_IN_L1 0x00000010 /* --code-in-l1 */ | 16 | #define EF_BFIN_CODE_IN_L1 0x00000010 /* --code-in-l1 */ |
17 | #define EF_BFIN_DATA_IN_L1 0x00000020 /* --data-in-l1 */ | 17 | #define EF_BFIN_DATA_IN_L1 0x00000020 /* --data-in-l1 */ |
18 | #define EF_BFIN_CODE_IN_L2 0x00000040 /* --code-in-l2 */ | ||
19 | #define EF_BFIN_DATA_IN_L2 0x00000080 /* --data-in-l2 */ | ||
18 | 20 | ||
19 | typedef unsigned long elf_greg_t; | 21 | typedef unsigned long elf_greg_t; |
20 | 22 | ||
diff --git a/include/asm-blackfin/gpio.h b/include/asm-blackfin/gpio.h index ff95e9d88342..168f1251eb4d 100644 --- a/include/asm-blackfin/gpio.h +++ b/include/asm-blackfin/gpio.h | |||
@@ -376,8 +376,12 @@ struct gpio_port_t { | |||
376 | #endif | 376 | #endif |
377 | 377 | ||
378 | #ifdef CONFIG_PM | 378 | #ifdef CONFIG_PM |
379 | unsigned int bfin_pm_setup(void); | 379 | |
380 | void bfin_pm_restore(void); | 380 | unsigned int bfin_pm_standby_setup(void); |
381 | void bfin_pm_standby_restore(void); | ||
382 | |||
383 | void bfin_gpio_pm_hibernate_restore(void); | ||
384 | void bfin_gpio_pm_hibernate_suspend(void); | ||
381 | 385 | ||
382 | #ifndef CONFIG_BF54x | 386 | #ifndef CONFIG_BF54x |
383 | #define PM_WAKE_RISING 0x1 | 387 | #define PM_WAKE_RISING 0x1 |
@@ -392,17 +396,8 @@ void gpio_pm_wakeup_free(unsigned gpio); | |||
392 | 396 | ||
393 | struct gpio_port_s { | 397 | struct gpio_port_s { |
394 | unsigned short data; | 398 | unsigned short data; |
395 | unsigned short data_clear; | ||
396 | unsigned short data_set; | ||
397 | unsigned short toggle; | ||
398 | unsigned short maska; | 399 | unsigned short maska; |
399 | unsigned short maska_clear; | ||
400 | unsigned short maska_set; | ||
401 | unsigned short maska_toggle; | ||
402 | unsigned short maskb; | 400 | unsigned short maskb; |
403 | unsigned short maskb_clear; | ||
404 | unsigned short maskb_set; | ||
405 | unsigned short maskb_toggle; | ||
406 | unsigned short dir; | 401 | unsigned short dir; |
407 | unsigned short polar; | 402 | unsigned short polar; |
408 | unsigned short edge; | 403 | unsigned short edge; |
@@ -411,10 +406,10 @@ struct gpio_port_s { | |||
411 | 406 | ||
412 | unsigned short fer; | 407 | unsigned short fer; |
413 | unsigned short reserved; | 408 | unsigned short reserved; |
409 | unsigned short mux; | ||
414 | }; | 410 | }; |
415 | #endif /*CONFIG_BF54x*/ | 411 | #endif /*CONFIG_BF54x*/ |
416 | #endif /*CONFIG_PM*/ | 412 | #endif /*CONFIG_PM*/ |
417 | |||
418 | /*********************************************************** | 413 | /*********************************************************** |
419 | * | 414 | * |
420 | * FUNCTIONS: Blackfin GPIO Driver | 415 | * FUNCTIONS: Blackfin GPIO Driver |
diff --git a/include/asm-blackfin/mach-bf527/anomaly.h b/include/asm-blackfin/mach-bf527/anomaly.h index 4725268a5ada..b7b166f4f064 100644 --- a/include/asm-blackfin/mach-bf527/anomaly.h +++ b/include/asm-blackfin/mach-bf527/anomaly.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #define ANOMALY_05000245 (1) | 23 | #define ANOMALY_05000245 (1) |
24 | /* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ | 24 | /* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ |
25 | #define ANOMALY_05000265 (1) | 25 | #define ANOMALY_05000265 (1) |
26 | /* New Feature: EMAC TX DMA Word Alignment */ | ||
27 | #define ANOMALY_05000285 (1) | ||
26 | /* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */ | 28 | /* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */ |
27 | #define ANOMALY_05000312 (1) | 29 | #define ANOMALY_05000312 (1) |
28 | /* Incorrect Access of OTP_STATUS During otp_write() Function */ | 30 | /* Incorrect Access of OTP_STATUS During otp_write() Function */ |
diff --git a/include/asm-blackfin/mach-bf527/bfin_sir.h b/include/asm-blackfin/mach-bf527/bfin_sir.h index 0612d0c9501c..cfd8ad4f1f2c 100644 --- a/include/asm-blackfin/mach-bf527/bfin_sir.h +++ b/include/asm-blackfin/mach-bf527/bfin_sir.h | |||
@@ -118,16 +118,25 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port) | |||
118 | 118 | ||
119 | #define DRIVER_NAME "bfin_sir" | 119 | #define DRIVER_NAME "bfin_sir" |
120 | 120 | ||
121 | static void bfin_sir_hw_init(void) | 121 | static int bfin_sir_hw_init(void) |
122 | { | 122 | { |
123 | int ret = -ENODEV; | ||
123 | #ifdef CONFIG_BFIN_SIR0 | 124 | #ifdef CONFIG_BFIN_SIR0 |
124 | peripheral_request(P_UART0_TX, DRIVER_NAME); | 125 | ret = peripheral_request(P_UART0_TX, DRIVER_NAME); |
125 | peripheral_request(P_UART0_RX, DRIVER_NAME); | 126 | if (ret) |
127 | return ret; | ||
128 | ret = peripheral_request(P_UART0_RX, DRIVER_NAME); | ||
129 | if (ret) | ||
130 | return ret; | ||
126 | #endif | 131 | #endif |
127 | 132 | ||
128 | #ifdef CONFIG_BFIN_SIR1 | 133 | #ifdef CONFIG_BFIN_SIR1 |
129 | peripheral_request(P_UART1_TX, DRIVER_NAME); | 134 | ret = peripheral_request(P_UART1_TX, DRIVER_NAME); |
130 | peripheral_request(P_UART1_RX, DRIVER_NAME); | 135 | if (ret) |
136 | return ret; | ||
137 | ret = peripheral_request(P_UART1_RX, DRIVER_NAME); | ||
138 | if (ret) | ||
139 | return ret; | ||
131 | #endif | 140 | #endif |
132 | SSYNC(); | 141 | return ret; |
133 | } | 142 | } |
diff --git a/include/asm-blackfin/mach-bf527/defBF527.h b/include/asm-blackfin/mach-bf527/defBF527.h index 82134f578f32..f1a70db70cb8 100644 --- a/include/asm-blackfin/mach-bf527/defBF527.h +++ b/include/asm-blackfin/mach-bf527/defBF527.h | |||
@@ -302,6 +302,7 @@ | |||
302 | #define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */ | 302 | #define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */ |
303 | #define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */ | 303 | #define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */ |
304 | #define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */ | 304 | #define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */ |
305 | #define TXDWA 0x00000010 /* Transmit Frame DMA Word Alignment (Odd/Even*) */ | ||
305 | #define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */ | 306 | #define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */ |
306 | 307 | ||
307 | #define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */ | 308 | #define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */ |
diff --git a/include/asm-blackfin/mach-bf527/mem_init.h b/include/asm-blackfin/mach-bf527/mem_init.h index 008ca66719e2..cbe03f4a5698 100644 --- a/include/asm-blackfin/mach-bf527/mem_init.h +++ b/include/asm-blackfin/mach-bf527/mem_init.h | |||
@@ -146,33 +146,6 @@ | |||
146 | #define SDRAM_CL CL_3 | 146 | #define SDRAM_CL CL_3 |
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | #if (CONFIG_MEM_SIZE == 128) | ||
150 | #define SDRAM_SIZE EBSZ_128 | ||
151 | #endif | ||
152 | #if (CONFIG_MEM_SIZE == 64) | ||
153 | #define SDRAM_SIZE EBSZ_64 | ||
154 | #endif | ||
155 | #if (CONFIG_MEM_SIZE == 32) | ||
156 | #define SDRAM_SIZE EBSZ_32 | ||
157 | #endif | ||
158 | #if (CONFIG_MEM_SIZE == 16) | ||
159 | #define SDRAM_SIZE EBSZ_16 | ||
160 | #endif | ||
161 | #if (CONFIG_MEM_ADD_WIDTH == 11) | ||
162 | #define SDRAM_WIDTH EBCAW_11 | ||
163 | #endif | ||
164 | #if (CONFIG_MEM_ADD_WIDTH == 10) | ||
165 | #define SDRAM_WIDTH EBCAW_10 | ||
166 | #endif | ||
167 | #if (CONFIG_MEM_ADD_WIDTH == 9) | ||
168 | #define SDRAM_WIDTH EBCAW_9 | ||
169 | #endif | ||
170 | #if (CONFIG_MEM_ADD_WIDTH == 8) | ||
171 | #define SDRAM_WIDTH EBCAW_8 | ||
172 | #endif | ||
173 | |||
174 | #define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE) | ||
175 | |||
176 | /* Equation from section 17 (p17-46) of BF533 HRM */ | 149 | /* Equation from section 17 (p17-46) of BF533 HRM */ |
177 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) | 150 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) |
178 | 151 | ||
diff --git a/include/asm-blackfin/mach-bf533/bfin_sir.h b/include/asm-blackfin/mach-bf533/bfin_sir.h index cefcf8bb505b..9bb87e9e2e9b 100644 --- a/include/asm-blackfin/mach-bf533/bfin_sir.h +++ b/include/asm-blackfin/mach-bf533/bfin_sir.h | |||
@@ -110,11 +110,16 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port) | |||
110 | 110 | ||
111 | #define DRIVER_NAME "bfin_sir" | 111 | #define DRIVER_NAME "bfin_sir" |
112 | 112 | ||
113 | static void bfin_sir_hw_init(void) | 113 | static int bfin_sir_hw_init(void) |
114 | { | 114 | { |
115 | int ret = -ENODEV; | ||
115 | #ifdef CONFIG_BFIN_SIR0 | 116 | #ifdef CONFIG_BFIN_SIR0 |
116 | peripheral_request(P_UART0_TX, DRIVER_NAME); | 117 | ret = peripheral_request(P_UART0_TX, DRIVER_NAME); |
117 | peripheral_request(P_UART0_RX, DRIVER_NAME); | 118 | if (ret) |
119 | return ret; | ||
120 | ret = peripheral_request(P_UART0_RX, DRIVER_NAME); | ||
121 | if (ret) | ||
122 | return ret; | ||
118 | #endif | 123 | #endif |
119 | SSYNC(); | 124 | return ret; |
120 | } | 125 | } |
diff --git a/include/asm-blackfin/mach-bf533/mem_init.h b/include/asm-blackfin/mach-bf533/mem_init.h index f8f31901fca9..995c06b2b1ef 100644 --- a/include/asm-blackfin/mach-bf533/mem_init.h +++ b/include/asm-blackfin/mach-bf533/mem_init.h | |||
@@ -133,33 +133,6 @@ | |||
133 | #define SDRAM_CL CL_3 | 133 | #define SDRAM_CL CL_3 |
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | #if (CONFIG_MEM_SIZE == 128) | ||
137 | #define SDRAM_SIZE EBSZ_128 | ||
138 | #endif | ||
139 | #if (CONFIG_MEM_SIZE == 64) | ||
140 | #define SDRAM_SIZE EBSZ_64 | ||
141 | #endif | ||
142 | #if (CONFIG_MEM_SIZE == 32) | ||
143 | #define SDRAM_SIZE EBSZ_32 | ||
144 | #endif | ||
145 | #if (CONFIG_MEM_SIZE == 16) | ||
146 | #define SDRAM_SIZE EBSZ_16 | ||
147 | #endif | ||
148 | #if (CONFIG_MEM_ADD_WIDTH == 11) | ||
149 | #define SDRAM_WIDTH EBCAW_11 | ||
150 | #endif | ||
151 | #if (CONFIG_MEM_ADD_WIDTH == 10) | ||
152 | #define SDRAM_WIDTH EBCAW_10 | ||
153 | #endif | ||
154 | #if (CONFIG_MEM_ADD_WIDTH == 9) | ||
155 | #define SDRAM_WIDTH EBCAW_9 | ||
156 | #endif | ||
157 | #if (CONFIG_MEM_ADD_WIDTH == 8) | ||
158 | #define SDRAM_WIDTH EBCAW_8 | ||
159 | #endif | ||
160 | |||
161 | #define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE) | ||
162 | |||
163 | /* Equation from section 17 (p17-46) of BF533 HRM */ | 136 | /* Equation from section 17 (p17-46) of BF533 HRM */ |
164 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) | 137 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) |
165 | 138 | ||
diff --git a/include/asm-blackfin/mach-bf537/bfin_sir.h b/include/asm-blackfin/mach-bf537/bfin_sir.h index 0612d0c9501c..cfd8ad4f1f2c 100644 --- a/include/asm-blackfin/mach-bf537/bfin_sir.h +++ b/include/asm-blackfin/mach-bf537/bfin_sir.h | |||
@@ -118,16 +118,25 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port) | |||
118 | 118 | ||
119 | #define DRIVER_NAME "bfin_sir" | 119 | #define DRIVER_NAME "bfin_sir" |
120 | 120 | ||
121 | static void bfin_sir_hw_init(void) | 121 | static int bfin_sir_hw_init(void) |
122 | { | 122 | { |
123 | int ret = -ENODEV; | ||
123 | #ifdef CONFIG_BFIN_SIR0 | 124 | #ifdef CONFIG_BFIN_SIR0 |
124 | peripheral_request(P_UART0_TX, DRIVER_NAME); | 125 | ret = peripheral_request(P_UART0_TX, DRIVER_NAME); |
125 | peripheral_request(P_UART0_RX, DRIVER_NAME); | 126 | if (ret) |
127 | return ret; | ||
128 | ret = peripheral_request(P_UART0_RX, DRIVER_NAME); | ||
129 | if (ret) | ||
130 | return ret; | ||
126 | #endif | 131 | #endif |
127 | 132 | ||
128 | #ifdef CONFIG_BFIN_SIR1 | 133 | #ifdef CONFIG_BFIN_SIR1 |
129 | peripheral_request(P_UART1_TX, DRIVER_NAME); | 134 | ret = peripheral_request(P_UART1_TX, DRIVER_NAME); |
130 | peripheral_request(P_UART1_RX, DRIVER_NAME); | 135 | if (ret) |
136 | return ret; | ||
137 | ret = peripheral_request(P_UART1_RX, DRIVER_NAME); | ||
138 | if (ret) | ||
139 | return ret; | ||
131 | #endif | 140 | #endif |
132 | SSYNC(); | 141 | return ret; |
133 | } | 142 | } |
diff --git a/include/asm-blackfin/mach-bf537/defBF537.h b/include/asm-blackfin/mach-bf537/defBF537.h index 3f455909c418..abde24c6d3b1 100644 --- a/include/asm-blackfin/mach-bf537/defBF537.h +++ b/include/asm-blackfin/mach-bf537/defBF537.h | |||
@@ -290,6 +290,7 @@ | |||
290 | #define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */ | 290 | #define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */ |
291 | #define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */ | 291 | #define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */ |
292 | #define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */ | 292 | #define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */ |
293 | #define TXDWA 0x00000010 /* Transmit Frame DMA Word Alignment (Odd/Even*) */ | ||
293 | #define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */ | 294 | #define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */ |
294 | 295 | ||
295 | #define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */ | 296 | #define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */ |
diff --git a/include/asm-blackfin/mach-bf537/mem_init.h b/include/asm-blackfin/mach-bf537/mem_init.h index 9ad979d416c6..f67698f670ca 100644 --- a/include/asm-blackfin/mach-bf537/mem_init.h +++ b/include/asm-blackfin/mach-bf537/mem_init.h | |||
@@ -139,33 +139,6 @@ | |||
139 | #define SDRAM_CL CL_3 | 139 | #define SDRAM_CL CL_3 |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | #if (CONFIG_MEM_SIZE == 128) | ||
143 | #define SDRAM_SIZE EBSZ_128 | ||
144 | #endif | ||
145 | #if (CONFIG_MEM_SIZE == 64) | ||
146 | #define SDRAM_SIZE EBSZ_64 | ||
147 | #endif | ||
148 | #if (CONFIG_MEM_SIZE == 32) | ||
149 | #define SDRAM_SIZE EBSZ_32 | ||
150 | #endif | ||
151 | #if (CONFIG_MEM_SIZE == 16) | ||
152 | #define SDRAM_SIZE EBSZ_16 | ||
153 | #endif | ||
154 | #if (CONFIG_MEM_ADD_WIDTH == 11) | ||
155 | #define SDRAM_WIDTH EBCAW_11 | ||
156 | #endif | ||
157 | #if (CONFIG_MEM_ADD_WIDTH == 10) | ||
158 | #define SDRAM_WIDTH EBCAW_10 | ||
159 | #endif | ||
160 | #if (CONFIG_MEM_ADD_WIDTH == 9) | ||
161 | #define SDRAM_WIDTH EBCAW_9 | ||
162 | #endif | ||
163 | #if (CONFIG_MEM_ADD_WIDTH == 8) | ||
164 | #define SDRAM_WIDTH EBCAW_8 | ||
165 | #endif | ||
166 | |||
167 | #define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE) | ||
168 | |||
169 | /* Equation from section 17 (p17-46) of BF533 HRM */ | 142 | /* Equation from section 17 (p17-46) of BF533 HRM */ |
170 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) | 143 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) |
171 | 144 | ||
diff --git a/include/asm-blackfin/mach-bf548/bfin_sir.h b/include/asm-blackfin/mach-bf548/bfin_sir.h index 5e94271c7e3b..c41f9cf00268 100644 --- a/include/asm-blackfin/mach-bf548/bfin_sir.h +++ b/include/asm-blackfin/mach-bf548/bfin_sir.h | |||
@@ -124,26 +124,43 @@ struct bfin_sir_self { | |||
124 | 124 | ||
125 | #define DRIVER_NAME "bfin_sir" | 125 | #define DRIVER_NAME "bfin_sir" |
126 | 126 | ||
127 | static void bfin_sir_hw_init(void) | 127 | static int bfin_sir_hw_init(void) |
128 | { | 128 | { |
129 | int ret = -ENODEV; | ||
129 | #ifdef CONFIG_BFIN_SIR0 | 130 | #ifdef CONFIG_BFIN_SIR0 |
130 | peripheral_request(P_UART0_TX, DRIVER_NAME); | 131 | ret = peripheral_request(P_UART0_TX, DRIVER_NAME); |
131 | peripheral_request(P_UART0_RX, DRIVER_NAME); | 132 | if (ret) |
133 | return ret; | ||
134 | ret = peripheral_request(P_UART0_RX, DRIVER_NAME); | ||
135 | if (ret) | ||
136 | return ret; | ||
132 | #endif | 137 | #endif |
133 | 138 | ||
134 | #ifdef CONFIG_BFIN_SIR1 | 139 | #ifdef CONFIG_BFIN_SIR1 |
135 | peripheral_request(P_UART1_TX, DRIVER_NAME); | 140 | ret = peripheral_request(P_UART1_TX, DRIVER_NAME); |
136 | peripheral_request(P_UART1_RX, DRIVER_NAME); | 141 | if (ret) |
142 | return ret; | ||
143 | ret = peripheral_request(P_UART1_RX, DRIVER_NAME); | ||
144 | if (ret) | ||
145 | return ret; | ||
137 | #endif | 146 | #endif |
138 | 147 | ||
139 | #ifdef CONFIG_BFIN_SIR2 | 148 | #ifdef CONFIG_BFIN_SIR2 |
140 | peripheral_request(P_UART2_TX, DRIVER_NAME); | 149 | ret = peripheral_request(P_UART2_TX, DRIVER_NAME); |
141 | peripheral_request(P_UART2_RX, DRIVER_NAME); | 150 | if (ret) |
151 | return ret; | ||
152 | ret = peripheral_request(P_UART2_RX, DRIVER_NAME); | ||
153 | if (ret) | ||
154 | return ret; | ||
142 | #endif | 155 | #endif |
143 | 156 | ||
144 | #ifdef CONFIG_BFIN_SIR3 | 157 | #ifdef CONFIG_BFIN_SIR3 |
145 | peripheral_request(P_UART3_TX, DRIVER_NAME); | 158 | ret = peripheral_request(P_UART3_TX, DRIVER_NAME); |
146 | peripheral_request(P_UART3_RX, DRIVER_NAME); | 159 | if (ret) |
160 | return ret; | ||
161 | ret = peripheral_request(P_UART3_RX, DRIVER_NAME); | ||
162 | if (ret) | ||
163 | return ret; | ||
147 | #endif | 164 | #endif |
148 | SSYNC(); | 165 | return ret; |
149 | } | 166 | } |
diff --git a/include/asm-blackfin/mach-bf548/gpio.h b/include/asm-blackfin/mach-bf548/gpio.h index cb8b0f15c9a6..bba82dc75f16 100644 --- a/include/asm-blackfin/mach-bf548/gpio.h +++ b/include/asm-blackfin/mach-bf548/gpio.h | |||
@@ -209,3 +209,11 @@ struct gpio_port_t { | |||
209 | unsigned short dummy7; | 209 | unsigned short dummy7; |
210 | unsigned int port_mux; | 210 | unsigned int port_mux; |
211 | }; | 211 | }; |
212 | |||
213 | struct gpio_port_s { | ||
214 | unsigned short fer; | ||
215 | unsigned short data; | ||
216 | unsigned short dir; | ||
217 | unsigned short inen; | ||
218 | unsigned int mux; | ||
219 | }; | ||
diff --git a/include/asm-blackfin/mach-bf561/bfin_sir.h b/include/asm-blackfin/mach-bf561/bfin_sir.h index cefcf8bb505b..9bb87e9e2e9b 100644 --- a/include/asm-blackfin/mach-bf561/bfin_sir.h +++ b/include/asm-blackfin/mach-bf561/bfin_sir.h | |||
@@ -110,11 +110,16 @@ static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port) | |||
110 | 110 | ||
111 | #define DRIVER_NAME "bfin_sir" | 111 | #define DRIVER_NAME "bfin_sir" |
112 | 112 | ||
113 | static void bfin_sir_hw_init(void) | 113 | static int bfin_sir_hw_init(void) |
114 | { | 114 | { |
115 | int ret = -ENODEV; | ||
115 | #ifdef CONFIG_BFIN_SIR0 | 116 | #ifdef CONFIG_BFIN_SIR0 |
116 | peripheral_request(P_UART0_TX, DRIVER_NAME); | 117 | ret = peripheral_request(P_UART0_TX, DRIVER_NAME); |
117 | peripheral_request(P_UART0_RX, DRIVER_NAME); | 118 | if (ret) |
119 | return ret; | ||
120 | ret = peripheral_request(P_UART0_RX, DRIVER_NAME); | ||
121 | if (ret) | ||
122 | return ret; | ||
118 | #endif | 123 | #endif |
119 | SSYNC(); | 124 | return ret; |
120 | } | 125 | } |
diff --git a/include/asm-blackfin/mach-bf561/mem_init.h b/include/asm-blackfin/mach-bf561/mem_init.h index 439a5895b346..e163260bca18 100644 --- a/include/asm-blackfin/mach-bf561/mem_init.h +++ b/include/asm-blackfin/mach-bf561/mem_init.h | |||
@@ -131,33 +131,6 @@ | |||
131 | #define SDRAM_CL CL_3 | 131 | #define SDRAM_CL CL_3 |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | #if (CONFIG_MEM_SIZE == 128) | ||
135 | #define SDRAM_SIZE EB0_SZ_128 | ||
136 | #endif | ||
137 | #if (CONFIG_MEM_SIZE == 64) | ||
138 | #define SDRAM_SIZE EB0_SZ_64 | ||
139 | #endif | ||
140 | #if ( CONFIG_MEM_SIZE == 32) | ||
141 | #define SDRAM_SIZE EB0_SZ_32 | ||
142 | #endif | ||
143 | #if (CONFIG_MEM_SIZE == 16) | ||
144 | #define SDRAM_SIZE EB0_SZ_16 | ||
145 | #endif | ||
146 | #if (CONFIG_MEM_ADD_WIDTH == 11) | ||
147 | #define SDRAM_WIDTH EB0_CAW_11 | ||
148 | #endif | ||
149 | #if (CONFIG_MEM_ADD_WIDTH == 10) | ||
150 | #define SDRAM_WIDTH EB0_CAW_10 | ||
151 | #endif | ||
152 | #if (CONFIG_MEM_ADD_WIDTH == 9) | ||
153 | #define SDRAM_WIDTH EB0_CAW_9 | ||
154 | #endif | ||
155 | #if (CONFIG_MEM_ADD_WIDTH == 8) | ||
156 | #define SDRAM_WIDTH EB0_CAW_8 | ||
157 | #endif | ||
158 | |||
159 | #define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EB0_E) | ||
160 | |||
161 | /* Equation from section 17 (p17-46) of BF533 HRM */ | 134 | /* Equation from section 17 (p17-46) of BF533 HRM */ |
162 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) | 135 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) |
163 | 136 | ||
diff --git a/include/asm-blackfin/module.h b/include/asm-blackfin/module.h index 3c7ce1644280..e3128df139d6 100644 --- a/include/asm-blackfin/module.h +++ b/include/asm-blackfin/module.h | |||
@@ -6,8 +6,6 @@ | |||
6 | #define Elf_Shdr Elf32_Shdr | 6 | #define Elf_Shdr Elf32_Shdr |
7 | #define Elf_Sym Elf32_Sym | 7 | #define Elf_Sym Elf32_Sym |
8 | #define Elf_Ehdr Elf32_Ehdr | 8 | #define Elf_Ehdr Elf32_Ehdr |
9 | #define FLG_CODE_IN_L1 0x10 | ||
10 | #define FLG_DATA_IN_L1 0x20 | ||
11 | 9 | ||
12 | struct mod_arch_specific { | 10 | struct mod_arch_specific { |
13 | Elf_Shdr *text_l1; | 11 | Elf_Shdr *text_l1; |
@@ -15,5 +13,8 @@ struct mod_arch_specific { | |||
15 | Elf_Shdr *bss_a_l1; | 13 | Elf_Shdr *bss_a_l1; |
16 | Elf_Shdr *data_b_l1; | 14 | Elf_Shdr *data_b_l1; |
17 | Elf_Shdr *bss_b_l1; | 15 | Elf_Shdr *bss_b_l1; |
16 | Elf_Shdr *text_l2; | ||
17 | Elf_Shdr *data_l2; | ||
18 | Elf_Shdr *bss_l2; | ||
18 | }; | 19 | }; |
19 | #endif /* _ASM_BFIN_MODULE_H */ | 20 | #endif /* _ASM_BFIN_MODULE_H */ |
diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h index 1c0040724612..6f3995b119d8 100644 --- a/include/asm-blackfin/processor.h +++ b/include/asm-blackfin/processor.h | |||
@@ -112,7 +112,26 @@ unsigned long get_wchan(struct task_struct *p); | |||
112 | static inline uint32_t __pure bfin_revid(void) | 112 | static inline uint32_t __pure bfin_revid(void) |
113 | { | 113 | { |
114 | /* stored in the upper 4 bits */ | 114 | /* stored in the upper 4 bits */ |
115 | return bfin_read_CHIPID() >> 28; | 115 | uint32_t revid = bfin_read_CHIPID() >> 28; |
116 | |||
117 | #ifdef CONFIG_BF52x | ||
118 | /* ANOMALY_05000357 | ||
119 | * Incorrect Revision Number in DSPID Register | ||
120 | */ | ||
121 | if (revid == 0) | ||
122 | switch (bfin_read16(_BOOTROM_GET_DXE_ADDRESS_TWI)) { | ||
123 | case 0x0010: | ||
124 | revid = 0; | ||
125 | break; | ||
126 | case 0x2796: | ||
127 | revid = 1; | ||
128 | break; | ||
129 | default: | ||
130 | revid = 0xFFFF; | ||
131 | break; | ||
132 | } | ||
133 | #endif | ||
134 | return revid; | ||
116 | } | 135 | } |
117 | 136 | ||
118 | static inline uint32_t __pure bfin_compiled_revid(void) | 137 | static inline uint32_t __pure bfin_compiled_revid(void) |
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h index edc8d1bfaae2..cb2fb25ff8d9 100644 --- a/include/asm-cris/dma-mapping.h +++ b/include/asm-cris/dma-mapping.h | |||
@@ -120,7 +120,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |||
120 | } | 120 | } |
121 | 121 | ||
122 | static inline int | 122 | static inline int |
123 | dma_mapping_error(dma_addr_t dma_addr) | 123 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
124 | { | 124 | { |
125 | return 0; | 125 | return 0; |
126 | } | 126 | } |
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h index 2e8966ca030d..b2898877c07b 100644 --- a/include/asm-frv/dma-mapping.h +++ b/include/asm-frv/dma-mapping.h | |||
@@ -126,7 +126,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele | |||
126 | } | 126 | } |
127 | 127 | ||
128 | static inline | 128 | static inline |
129 | int dma_mapping_error(dma_addr_t dma_addr) | 129 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
130 | { | 130 | { |
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index e2468f894d2a..82cd0cb1c3fe 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h | |||
@@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |||
61 | #define dma_sync_sg_for_device dma_sync_sg_for_cpu | 61 | #define dma_sync_sg_for_device dma_sync_sg_for_cpu |
62 | 62 | ||
63 | extern int | 63 | extern int |
64 | dma_mapping_error(dma_addr_t dma_addr); | 64 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
65 | 65 | ||
66 | extern int | 66 | extern int |
67 | dma_supported(struct device *dev, u64 mask); | 67 | dma_supported(struct device *dev, u64 mask); |
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 783ab9944d70..189486c3f92e 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h | |||
@@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |||
144 | } | 144 | } |
145 | 145 | ||
146 | static inline int | 146 | static inline int |
147 | dma_mapping_error(dma_addr_t dma_addr) | 147 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
148 | { | 148 | { |
149 | return pci_dma_mapping_error(dma_addr); | 149 | return pci_dma_mapping_error(to_pci_dev(dev), dma_addr); |
150 | } | 150 | } |
151 | 151 | ||
152 | 152 | ||
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h index 25c10e96b2b7..37b3706226e7 100644 --- a/include/asm-generic/pci-dma-compat.h +++ b/include/asm-generic/pci-dma-compat.h | |||
@@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static inline int | 101 | static inline int |
102 | pci_dma_mapping_error(dma_addr_t dma_addr) | 102 | pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) |
103 | { | 103 | { |
104 | return dma_mapping_error(dma_addr); | 104 | return dma_mapping_error(&pdev->dev, dma_addr); |
105 | } | 105 | } |
106 | 106 | ||
107 | #endif | 107 | #endif |
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h new file mode 100644 index 000000000000..abcf34c2fdc7 --- /dev/null +++ b/include/asm-generic/syscall.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Access to user system call parameters and results | ||
3 | * | ||
4 | * Copyright (C) 2008 Red Hat, Inc. All rights reserved. | ||
5 | * | ||
6 | * This copyrighted material is made available to anyone wishing to use, | ||
7 | * modify, copy, or redistribute it subject to the terms and conditions | ||
8 | * of the GNU General Public License v.2. | ||
9 | * | ||
10 | * This file is a stub providing documentation for what functions | ||
11 | * asm-ARCH/syscall.h files need to define. Most arch definitions | ||
12 | * will be simple inlines. | ||
13 | * | ||
14 | * All of these functions expect to be called with no locks, | ||
15 | * and only when the caller is sure that the task of interest | ||
16 | * cannot return to user mode while we are looking at it. | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_SYSCALL_H | ||
20 | #define _ASM_SYSCALL_H 1 | ||
21 | |||
22 | struct task_struct; | ||
23 | struct pt_regs; | ||
24 | |||
25 | /** | ||
26 | * syscall_get_nr - find what system call a task is executing | ||
27 | * @task: task of interest, must be blocked | ||
28 | * @regs: task_pt_regs() of @task | ||
29 | * | ||
30 | * If @task is executing a system call or is at system call | ||
31 | * tracing about to attempt one, returns the system call number. | ||
32 | * If @task is not executing a system call, i.e. it's blocked | ||
33 | * inside the kernel for a fault or signal, returns -1. | ||
34 | * | ||
35 | * It's only valid to call this when @task is known to be blocked. | ||
36 | */ | ||
37 | long syscall_get_nr(struct task_struct *task, struct pt_regs *regs); | ||
38 | |||
39 | /** | ||
40 | * syscall_rollback - roll back registers after an aborted system call | ||
41 | * @task: task of interest, must be in system call exit tracing | ||
42 | * @regs: task_pt_regs() of @task | ||
43 | * | ||
44 | * It's only valid to call this when @task is stopped for system | ||
45 | * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), | ||
46 | * after tracehook_report_syscall_entry() returned nonzero to prevent | ||
47 | * the system call from taking place. | ||
48 | * | ||
49 | * This rolls back the register state in @regs so it's as if the | ||
50 | * system call instruction was a no-op. The registers containing | ||
51 | * the system call number and arguments are as they were before the | ||
52 | * system call instruction. This may not be the same as what the | ||
53 | * register state looked like at system call entry tracing. | ||
54 | */ | ||
55 | void syscall_rollback(struct task_struct *task, struct pt_regs *regs); | ||
56 | |||
57 | /** | ||
58 | * syscall_get_error - check result of traced system call | ||
59 | * @task: task of interest, must be blocked | ||
60 | * @regs: task_pt_regs() of @task | ||
61 | * | ||
62 | * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. | ||
63 | * | ||
64 | * It's only valid to call this when @task is stopped for tracing on exit | ||
65 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
66 | */ | ||
67 | long syscall_get_error(struct task_struct *task, struct pt_regs *regs); | ||
68 | |||
69 | /** | ||
70 | * syscall_get_return_value - get the return value of a traced system call | ||
71 | * @task: task of interest, must be blocked | ||
72 | * @regs: task_pt_regs() of @task | ||
73 | * | ||
74 | * Returns the return value of the successful system call. | ||
75 | * This value is meaningless if syscall_get_error() returned nonzero. | ||
76 | * | ||
77 | * It's only valid to call this when @task is stopped for tracing on exit | ||
78 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
79 | */ | ||
80 | long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); | ||
81 | |||
82 | /** | ||
83 | * syscall_set_return_value - change the return value of a traced system call | ||
84 | * @task: task of interest, must be blocked | ||
85 | * @regs: task_pt_regs() of @task | ||
86 | * @error: negative error code, or zero to indicate success | ||
87 | * @val: user return value if @error is zero | ||
88 | * | ||
89 | * This changes the results of the system call that user mode will see. | ||
90 | * If @error is zero, the user sees a successful system call with a | ||
91 | * return value of @val. If @error is nonzero, it's a negated errno | ||
92 | * code; the user sees a failed system call with this errno code. | ||
93 | * | ||
94 | * It's only valid to call this when @task is stopped for tracing on exit | ||
95 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
96 | */ | ||
97 | void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | ||
98 | int error, long val); | ||
99 | |||
100 | /** | ||
101 | * syscall_get_arguments - extract system call parameter values | ||
102 | * @task: task of interest, must be blocked | ||
103 | * @regs: task_pt_regs() of @task | ||
104 | * @i: argument index [0,5] | ||
105 | * @n: number of arguments; n+i must be [1,6]. | ||
106 | * @args: array filled with argument values | ||
107 | * | ||
108 | * Fetches @n arguments to the system call starting with the @i'th argument | ||
109 | * (from 0 through 5). Argument @i is stored in @args[0], and so on. | ||
110 | * An arch inline version is probably optimal when @i and @n are constants. | ||
111 | * | ||
112 | * It's only valid to call this when @task is stopped for tracing on | ||
113 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
114 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
115 | * taking up to 6 arguments. | ||
116 | */ | ||
117 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | ||
118 | unsigned int i, unsigned int n, unsigned long *args); | ||
119 | |||
120 | /** | ||
121 | * syscall_set_arguments - change system call parameter value | ||
122 | * @task: task of interest, must be in system call entry tracing | ||
123 | * @regs: task_pt_regs() of @task | ||
124 | * @i: argument index [0,5] | ||
125 | * @n: number of arguments; n+i must be [1,6]. | ||
126 | * @args: array of argument values to store | ||
127 | * | ||
128 | * Changes @n arguments to the system call starting with the @i'th argument. | ||
129 | * @n'th argument to @val. Argument @i gets value @args[0], and so on. | ||
130 | * An arch inline version is probably optimal when @i and @n are constants. | ||
131 | * | ||
132 | * It's only valid to call this when @task is stopped for tracing on | ||
133 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
134 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
135 | * taking up to 6 arguments. | ||
136 | */ | ||
137 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | ||
138 | unsigned int i, unsigned int n, | ||
139 | const unsigned long *args); | ||
140 | |||
141 | #endif /* _ASM_SYSCALL_H */ | ||
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 729f6b0a60e9..9cd44b162ba1 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -359,6 +359,8 @@ | |||
359 | } | 359 | } |
360 | 360 | ||
361 | #define INITCALLS \ | 361 | #define INITCALLS \ |
362 | *(.initcallearly.init) \ | ||
363 | __early_initcall_end = .; \ | ||
362 | *(.initcall0.init) \ | 364 | *(.initcall0.init) \ |
363 | *(.initcall0s.init) \ | 365 | *(.initcall0s.init) \ |
364 | *(.initcall1.init) \ | 366 | *(.initcall1.init) \ |
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index 0721a5e8271e..a6d50c77b6bf 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h | |||
@@ -54,7 +54,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_ | |||
54 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); | 54 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); |
55 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); | 55 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); |
56 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); | 56 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); |
57 | typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); | 57 | typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); |
58 | typedef int ia64_mv_dma_supported (struct device *, u64); | 58 | typedef int ia64_mv_dma_supported (struct device *, u64); |
59 | 59 | ||
60 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); | 60 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); |
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h index a26cdeb46a57..91f7944333d4 100644 --- a/include/asm-m68k/dma-mapping.h +++ b/include/asm-m68k/dma-mapping.h | |||
@@ -84,7 +84,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *s | |||
84 | { | 84 | { |
85 | } | 85 | } |
86 | 86 | ||
87 | static inline int dma_mapping_error(dma_addr_t handle) | 87 | static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) |
88 | { | 88 | { |
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h index 230b3f1b69b1..c64afb40cd06 100644 --- a/include/asm-mips/dma-mapping.h +++ b/include/asm-mips/dma-mapping.h | |||
@@ -42,7 +42,7 @@ extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
42 | int nelems, enum dma_data_direction direction); | 42 | int nelems, enum dma_data_direction direction); |
43 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 43 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
44 | int nelems, enum dma_data_direction direction); | 44 | int nelems, enum dma_data_direction direction); |
45 | extern int dma_mapping_error(dma_addr_t dma_addr); | 45 | extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
46 | extern int dma_supported(struct device *dev, u64 mask); | 46 | extern int dma_supported(struct device *dev, u64 mask); |
47 | 47 | ||
48 | static inline int | 48 | static inline int |
diff --git a/include/asm-mn10300/dma-mapping.h b/include/asm-mn10300/dma-mapping.h index 7c882fca9ec8..ccae8f6c6326 100644 --- a/include/asm-mn10300/dma-mapping.h +++ b/include/asm-mn10300/dma-mapping.h | |||
@@ -182,7 +182,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
182 | } | 182 | } |
183 | 183 | ||
184 | static inline | 184 | static inline |
185 | int dma_mapping_error(dma_addr_t dma_addr) | 185 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
186 | { | 186 | { |
187 | return 0; | 187 | return 0; |
188 | } | 188 | } |
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index 2f1e1b05440a..b7ca6dc7fddc 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h | |||
@@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm); | |||
45 | extern void flush_dcache_page(struct page *page); | 45 | extern void flush_dcache_page(struct page *page); |
46 | 46 | ||
47 | #define flush_dcache_mmap_lock(mapping) \ | 47 | #define flush_dcache_mmap_lock(mapping) \ |
48 | write_lock_irq(&(mapping)->tree_lock) | 48 | spin_lock_irq(&(mapping)->tree_lock) |
49 | #define flush_dcache_mmap_unlock(mapping) \ | 49 | #define flush_dcache_mmap_unlock(mapping) \ |
50 | write_unlock_irq(&(mapping)->tree_lock) | 50 | spin_unlock_irq(&(mapping)->tree_lock) |
51 | 51 | ||
52 | #define flush_icache_page(vma,page) do { \ | 52 | #define flush_icache_page(vma,page) do { \ |
53 | flush_kernel_dcache_page(page); \ | 53 | flush_kernel_dcache_page(page); \ |
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h index c6c0e9ff6bde..53af696f23d2 100644 --- a/include/asm-parisc/dma-mapping.h +++ b/include/asm-parisc/dma-mapping.h | |||
@@ -248,6 +248,6 @@ void * sba_get_iommu(struct parisc_device *dev); | |||
248 | #endif | 248 | #endif |
249 | 249 | ||
250 | /* At the moment, we panic on error for IOMMU resource exaustion */ | 250 | /* At the moment, we panic on error for IOMMU resource exaustion */ |
251 | #define dma_mapping_error(x) 0 | 251 | #define dma_mapping_error(dev, x) 0 |
252 | 252 | ||
253 | #endif | 253 | #endif |
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index 74c549780987..c7ca45f97dd2 100644 --- a/include/asm-powerpc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h | |||
@@ -415,7 +415,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
415 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | 415 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
416 | } | 416 | } |
417 | 417 | ||
418 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 418 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
419 | { | 419 | { |
420 | #ifdef CONFIG_PPC64 | 420 | #ifdef CONFIG_PPC64 |
421 | return (dma_addr == DMA_ERROR_CODE); | 421 | return (dma_addr == DMA_ERROR_CODE); |
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index 22cc419389fe..6c0b8a2de143 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h | |||
@@ -171,7 +171,7 @@ static inline int dma_get_cache_alignment(void) | |||
171 | return L1_CACHE_BYTES; | 171 | return L1_CACHE_BYTES; |
172 | } | 172 | } |
173 | 173 | ||
174 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 174 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
175 | { | 175 | { |
176 | return dma_addr == 0; | 176 | return dma_addr == 0; |
177 | } | 177 | } |
diff --git a/include/asm-sparc/dma-mapping_64.h b/include/asm-sparc/dma-mapping_64.h index 38cbec76a33f..bfa64f9702d5 100644 --- a/include/asm-sparc/dma-mapping_64.h +++ b/include/asm-sparc/dma-mapping_64.h | |||
@@ -135,7 +135,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
135 | /* No flushing needed to sync cpu writes to the device. */ | 135 | /* No flushing needed to sync cpu writes to the device. */ |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 138 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
139 | { | 139 | { |
140 | return (dma_addr == DMA_ERROR_CODE); | 140 | return (dma_addr == DMA_ERROR_CODE); |
141 | } | 141 | } |
diff --git a/include/asm-sparc/pci_32.h b/include/asm-sparc/pci_32.h index b93b6c79e08f..0ee949d220c0 100644 --- a/include/asm-sparc/pci_32.h +++ b/include/asm-sparc/pci_32.h | |||
@@ -154,7 +154,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
154 | 154 | ||
155 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) | 155 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) |
156 | 156 | ||
157 | static inline int pci_dma_mapping_error(dma_addr_t dma_addr) | 157 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, |
158 | dma_addr_t dma_addr) | ||
158 | { | 159 | { |
159 | return (dma_addr == PCI_DMA_ERROR_CODE); | 160 | return (dma_addr == PCI_DMA_ERROR_CODE); |
160 | } | 161 | } |
diff --git a/include/asm-sparc/pci_64.h b/include/asm-sparc/pci_64.h index f59f2571295b..4f79a54948f6 100644 --- a/include/asm-sparc/pci_64.h +++ b/include/asm-sparc/pci_64.h | |||
@@ -140,9 +140,10 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | |||
140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) | 140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) |
141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL | 141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL |
142 | 142 | ||
143 | static inline int pci_dma_mapping_error(dma_addr_t dma_addr) | 143 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, |
144 | dma_addr_t dma_addr) | ||
144 | { | 145 | { |
145 | return dma_mapping_error(dma_addr); | 146 | return dma_mapping_error(&pdev->dev, dma_addr); |
146 | } | 147 | } |
147 | 148 | ||
148 | #ifdef CONFIG_PCI | 149 | #ifdef CONFIG_PCI |
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h index 87a715367a1b..3c034f48fdb0 100644 --- a/include/asm-x86/device.h +++ b/include/asm-x86/device.h | |||
@@ -5,6 +5,9 @@ struct dev_archdata { | |||
5 | #ifdef CONFIG_ACPI | 5 | #ifdef CONFIG_ACPI |
6 | void *acpi_handle; | 6 | void *acpi_handle; |
7 | #endif | 7 | #endif |
8 | #ifdef CONFIG_X86_64 | ||
9 | struct dma_mapping_ops *dma_ops; | ||
10 | #endif | ||
8 | #ifdef CONFIG_DMAR | 11 | #ifdef CONFIG_DMAR |
9 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
10 | #endif | 13 | #endif |
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index c2ddd3d1b883..0eaa9bf6011f 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
@@ -17,7 +17,8 @@ extern int panic_on_overflow; | |||
17 | extern int force_iommu; | 17 | extern int force_iommu; |
18 | 18 | ||
19 | struct dma_mapping_ops { | 19 | struct dma_mapping_ops { |
20 | int (*mapping_error)(dma_addr_t dma_addr); | 20 | int (*mapping_error)(struct device *dev, |
21 | dma_addr_t dma_addr); | ||
21 | void* (*alloc_coherent)(struct device *dev, size_t size, | 22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
22 | dma_addr_t *dma_handle, gfp_t gfp); | 23 | dma_addr_t *dma_handle, gfp_t gfp); |
23 | void (*free_coherent)(struct device *dev, size_t size, | 24 | void (*free_coherent)(struct device *dev, size_t size, |
@@ -56,14 +57,32 @@ struct dma_mapping_ops { | |||
56 | int is_phys; | 57 | int is_phys; |
57 | }; | 58 | }; |
58 | 59 | ||
59 | extern const struct dma_mapping_ops *dma_ops; | 60 | extern struct dma_mapping_ops *dma_ops; |
60 | 61 | ||
61 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 62 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
62 | { | 63 | { |
63 | if (dma_ops->mapping_error) | 64 | #ifdef CONFIG_X86_32 |
64 | return dma_ops->mapping_error(dma_addr); | 65 | return dma_ops; |
66 | #else | ||
67 | if (unlikely(!dev) || !dev->archdata.dma_ops) | ||
68 | return dma_ops; | ||
69 | else | ||
70 | return dev->archdata.dma_ops; | ||
71 | #endif | ||
72 | } | ||
73 | |||
74 | /* Make sure we keep the same behaviour */ | ||
75 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
76 | { | ||
77 | #ifdef CONFIG_X86_32 | ||
78 | return 0; | ||
79 | #else | ||
80 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
81 | if (ops->mapping_error) | ||
82 | return ops->mapping_error(dev, dma_addr); | ||
65 | 83 | ||
66 | return (dma_addr == bad_dma_address); | 84 | return (dma_addr == bad_dma_address); |
85 | #endif | ||
67 | } | 86 | } |
68 | 87 | ||
69 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 88 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
@@ -83,44 +102,53 @@ static inline dma_addr_t | |||
83 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 102 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
84 | int direction) | 103 | int direction) |
85 | { | 104 | { |
105 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
106 | |||
86 | BUG_ON(!valid_dma_direction(direction)); | 107 | BUG_ON(!valid_dma_direction(direction)); |
87 | return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 108 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
88 | } | 109 | } |
89 | 110 | ||
90 | static inline void | 111 | static inline void |
91 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 112 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
92 | int direction) | 113 | int direction) |
93 | { | 114 | { |
115 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
116 | |||
94 | BUG_ON(!valid_dma_direction(direction)); | 117 | BUG_ON(!valid_dma_direction(direction)); |
95 | if (dma_ops->unmap_single) | 118 | if (ops->unmap_single) |
96 | dma_ops->unmap_single(dev, addr, size, direction); | 119 | ops->unmap_single(dev, addr, size, direction); |
97 | } | 120 | } |
98 | 121 | ||
99 | static inline int | 122 | static inline int |
100 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 123 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
101 | int nents, int direction) | 124 | int nents, int direction) |
102 | { | 125 | { |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
127 | |||
103 | BUG_ON(!valid_dma_direction(direction)); | 128 | BUG_ON(!valid_dma_direction(direction)); |
104 | return dma_ops->map_sg(hwdev, sg, nents, direction); | 129 | return ops->map_sg(hwdev, sg, nents, direction); |
105 | } | 130 | } |
106 | 131 | ||
107 | static inline void | 132 | static inline void |
108 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 133 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
109 | int direction) | 134 | int direction) |
110 | { | 135 | { |
136 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
137 | |||
111 | BUG_ON(!valid_dma_direction(direction)); | 138 | BUG_ON(!valid_dma_direction(direction)); |
112 | if (dma_ops->unmap_sg) | 139 | if (ops->unmap_sg) |
113 | dma_ops->unmap_sg(hwdev, sg, nents, direction); | 140 | ops->unmap_sg(hwdev, sg, nents, direction); |
114 | } | 141 | } |
115 | 142 | ||
116 | static inline void | 143 | static inline void |
117 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 144 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
118 | size_t size, int direction) | 145 | size_t size, int direction) |
119 | { | 146 | { |
147 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
148 | |||
120 | BUG_ON(!valid_dma_direction(direction)); | 149 | BUG_ON(!valid_dma_direction(direction)); |
121 | if (dma_ops->sync_single_for_cpu) | 150 | if (ops->sync_single_for_cpu) |
122 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, | 151 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); |
123 | direction); | ||
124 | flush_write_buffers(); | 152 | flush_write_buffers(); |
125 | } | 153 | } |
126 | 154 | ||
@@ -128,10 +156,11 @@ static inline void | |||
128 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 156 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
129 | size_t size, int direction) | 157 | size_t size, int direction) |
130 | { | 158 | { |
159 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
160 | |||
131 | BUG_ON(!valid_dma_direction(direction)); | 161 | BUG_ON(!valid_dma_direction(direction)); |
132 | if (dma_ops->sync_single_for_device) | 162 | if (ops->sync_single_for_device) |
133 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, | 163 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); |
134 | direction); | ||
135 | flush_write_buffers(); | 164 | flush_write_buffers(); |
136 | } | 165 | } |
137 | 166 | ||
@@ -139,11 +168,12 @@ static inline void | |||
139 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 168 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
140 | unsigned long offset, size_t size, int direction) | 169 | unsigned long offset, size_t size, int direction) |
141 | { | 170 | { |
142 | BUG_ON(!valid_dma_direction(direction)); | 171 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
143 | if (dma_ops->sync_single_range_for_cpu) | ||
144 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
145 | size, direction); | ||
146 | 172 | ||
173 | BUG_ON(!valid_dma_direction(direction)); | ||
174 | if (ops->sync_single_range_for_cpu) | ||
175 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
176 | size, direction); | ||
147 | flush_write_buffers(); | 177 | flush_write_buffers(); |
148 | } | 178 | } |
149 | 179 | ||
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
152 | unsigned long offset, size_t size, | 182 | unsigned long offset, size_t size, |
153 | int direction) | 183 | int direction) |
154 | { | 184 | { |
155 | BUG_ON(!valid_dma_direction(direction)); | 185 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
156 | if (dma_ops->sync_single_range_for_device) | ||
157 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, | ||
158 | offset, size, direction); | ||
159 | 186 | ||
187 | BUG_ON(!valid_dma_direction(direction)); | ||
188 | if (ops->sync_single_range_for_device) | ||
189 | ops->sync_single_range_for_device(hwdev, dma_handle, | ||
190 | offset, size, direction); | ||
160 | flush_write_buffers(); | 191 | flush_write_buffers(); |
161 | } | 192 | } |
162 | 193 | ||
@@ -164,9 +195,11 @@ static inline void | |||
164 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 195 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
165 | int nelems, int direction) | 196 | int nelems, int direction) |
166 | { | 197 | { |
198 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
199 | |||
167 | BUG_ON(!valid_dma_direction(direction)); | 200 | BUG_ON(!valid_dma_direction(direction)); |
168 | if (dma_ops->sync_sg_for_cpu) | 201 | if (ops->sync_sg_for_cpu) |
169 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 202 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
170 | flush_write_buffers(); | 203 | flush_write_buffers(); |
171 | } | 204 | } |
172 | 205 | ||
@@ -174,9 +207,11 @@ static inline void | |||
174 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 207 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
175 | int nelems, int direction) | 208 | int nelems, int direction) |
176 | { | 209 | { |
210 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
211 | |||
177 | BUG_ON(!valid_dma_direction(direction)); | 212 | BUG_ON(!valid_dma_direction(direction)); |
178 | if (dma_ops->sync_sg_for_device) | 213 | if (ops->sync_sg_for_device) |
179 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 214 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
180 | 215 | ||
181 | flush_write_buffers(); | 216 | flush_write_buffers(); |
182 | } | 217 | } |
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
185 | size_t offset, size_t size, | 220 | size_t offset, size_t size, |
186 | int direction) | 221 | int direction) |
187 | { | 222 | { |
223 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
224 | |||
188 | BUG_ON(!valid_dma_direction(direction)); | 225 | BUG_ON(!valid_dma_direction(direction)); |
189 | return dma_ops->map_single(dev, page_to_phys(page)+offset, | 226 | return ops->map_single(dev, page_to_phys(page) + offset, |
190 | size, direction); | 227 | size, direction); |
191 | } | 228 | } |
192 | 229 | ||
193 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 230 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h index 116e9147fe66..c4c91b37c104 100644 --- a/include/asm-x86/gpio.h +++ b/include/asm-x86/gpio.h | |||
@@ -16,10 +16,6 @@ | |||
16 | #ifndef _ASM_I386_GPIO_H | 16 | #ifndef _ASM_I386_GPIO_H |
17 | #define _ASM_I386_GPIO_H | 17 | #define _ASM_I386_GPIO_H |
18 | 18 | ||
19 | #ifdef CONFIG_X86_RDC321X | ||
20 | #include <gpio.h> | ||
21 | #else /* CONFIG_X86_RDC321X */ | ||
22 | |||
23 | #include <asm-generic/gpio.h> | 19 | #include <asm-generic/gpio.h> |
24 | 20 | ||
25 | #ifdef CONFIG_GPIOLIB | 21 | #ifdef CONFIG_GPIOLIB |
@@ -57,6 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) | |||
57 | 53 | ||
58 | #endif /* CONFIG_GPIOLIB */ | 54 | #endif /* CONFIG_GPIOLIB */ |
59 | 55 | ||
60 | #endif /* CONFIG_X86_RDC321X */ | ||
61 | |||
62 | #endif /* _ASM_I386_GPIO_H */ | 56 | #endif /* _ASM_I386_GPIO_H */ |
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index d63166fb3ab7..ecc8061904a9 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
6 | extern struct dma_mapping_ops nommu_dma_ops; | ||
6 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
7 | extern int iommu_detected; | 8 | extern int iommu_detected; |
8 | 9 | ||
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index 8f855a15f64d..c0e52a14fd4d 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h | |||
@@ -10,14 +10,15 @@ | |||
10 | # define VA_PTE_0 5 | 10 | # define VA_PTE_0 5 |
11 | # define PA_PTE_1 6 | 11 | # define PA_PTE_1 6 |
12 | # define VA_PTE_1 7 | 12 | # define VA_PTE_1 7 |
13 | # define PA_SWAP_PAGE 8 | ||
13 | # ifdef CONFIG_X86_PAE | 14 | # ifdef CONFIG_X86_PAE |
14 | # define PA_PMD_0 8 | 15 | # define PA_PMD_0 9 |
15 | # define VA_PMD_0 9 | 16 | # define VA_PMD_0 10 |
16 | # define PA_PMD_1 10 | 17 | # define PA_PMD_1 11 |
17 | # define VA_PMD_1 11 | 18 | # define VA_PMD_1 12 |
18 | # define PAGES_NR 12 | 19 | # define PAGES_NR 13 |
19 | # else | 20 | # else |
20 | # define PAGES_NR 8 | 21 | # define PAGES_NR 9 |
21 | # endif | 22 | # endif |
22 | #else | 23 | #else |
23 | # define PA_CONTROL_PAGE 0 | 24 | # define PA_CONTROL_PAGE 0 |
@@ -152,11 +153,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
152 | } | 153 | } |
153 | 154 | ||
154 | #ifdef CONFIG_X86_32 | 155 | #ifdef CONFIG_X86_32 |
155 | asmlinkage NORET_TYPE void | 156 | asmlinkage unsigned long |
156 | relocate_kernel(unsigned long indirection_page, | 157 | relocate_kernel(unsigned long indirection_page, |
157 | unsigned long control_page, | 158 | unsigned long control_page, |
158 | unsigned long start_address, | 159 | unsigned long start_address, |
159 | unsigned int has_pae) ATTRIB_NORET; | 160 | unsigned int has_pae, |
161 | unsigned int preserve_context); | ||
160 | #else | 162 | #else |
161 | NORET_TYPE void | 163 | NORET_TYPE void |
162 | relocate_kernel(unsigned long indirection_page, | 164 | relocate_kernel(unsigned long indirection_page, |
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 75d2c95005d7..c47e2ab5c5ca 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h | |||
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) | |||
122 | 122 | ||
123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | 123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) |
124 | { | 124 | { |
125 | return physid_mask_of_physid(0); | 125 | return physid_mask_of_physid(apicid); |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 3e5dbc4195f4..04caa2f544df 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define _PAGE_BIT_UNUSED2 10 | 18 | #define _PAGE_BIT_UNUSED2 10 |
19 | #define _PAGE_BIT_UNUSED3 11 | 19 | #define _PAGE_BIT_UNUSED3 11 |
20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ | 20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ |
21 | #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 | ||
21 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | 22 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
22 | 23 | ||
23 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) | 24 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
@@ -34,6 +35,8 @@ | |||
34 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) | 35 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) |
35 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) | 36 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) |
36 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) | 37 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) |
38 | #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) | ||
39 | #define __HAVE_ARCH_PTE_SPECIAL | ||
37 | 40 | ||
38 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 41 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
39 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 42 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
@@ -54,7 +57,7 @@ | |||
54 | 57 | ||
55 | /* Set of bits not changed in pte_modify */ | 58 | /* Set of bits not changed in pte_modify */ |
56 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ | 59 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
57 | _PAGE_ACCESSED | _PAGE_DIRTY) | 60 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) |
58 | 61 | ||
59 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) | 62 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) |
60 | #define _PAGE_CACHE_WB (0) | 63 | #define _PAGE_CACHE_WB (0) |
@@ -180,7 +183,7 @@ static inline int pte_exec(pte_t pte) | |||
180 | 183 | ||
181 | static inline int pte_special(pte_t pte) | 184 | static inline int pte_special(pte_t pte) |
182 | { | 185 | { |
183 | return 0; | 186 | return pte_val(pte) & _PAGE_SPECIAL; |
184 | } | 187 | } |
185 | 188 | ||
186 | static inline int pmd_large(pmd_t pte) | 189 | static inline int pmd_large(pmd_t pte) |
@@ -246,7 +249,7 @@ static inline pte_t pte_clrglobal(pte_t pte) | |||
246 | 249 | ||
247 | static inline pte_t pte_mkspecial(pte_t pte) | 250 | static inline pte_t pte_mkspecial(pte_t pte) |
248 | { | 251 | { |
249 | return pte; | 252 | return __pte(pte_val(pte) | _PAGE_SPECIAL); |
250 | } | 253 | } |
251 | 254 | ||
252 | extern pteval_t __supported_pte_mask; | 255 | extern pteval_t __supported_pte_mask; |
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index c706a7442633..2730b351afcf 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h | |||
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
35 | int nents, int direction); | 35 | int nents, int direction); |
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | 36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, |
37 | int nents, int direction); | 37 | int nents, int direction); |
38 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); | 38 | extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); |
39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | 39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, |
40 | void *vaddr, dma_addr_t dma_handle); | 40 | void *vaddr, dma_addr_t dma_handle); |
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | 41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); |
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h index f6fa4d841bbc..5f702d1d5218 100644 --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h | |||
@@ -451,3 +451,4 @@ extern struct movsl_mask { | |||
451 | #endif | 451 | #endif |
452 | 452 | ||
453 | #endif | 453 | #endif |
454 | |||
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h index 3c7d537dd15d..51882ae3db4d 100644 --- a/include/asm-xtensa/dma-mapping.h +++ b/include/asm-xtensa/dma-mapping.h | |||
@@ -139,7 +139,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |||
139 | consistent_sync(sg_virt(sg), sg->length, dir); | 139 | consistent_sync(sg_virt(sg), sg->length, dir); |
140 | } | 140 | } |
141 | static inline int | 141 | static inline int |
142 | dma_mapping_error(dma_addr_t dma_addr) | 142 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
143 | { | 143 | { |
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
diff --git a/include/linux/aio.h b/include/linux/aio.h index b51ddd28444e..09b276c35227 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/uio.h> | 7 | #include <linux/uio.h> |
8 | 8 | ||
9 | #include <asm/atomic.h> | 9 | #include <asm/atomic.h> |
10 | #include <linux/uio.h> | ||
11 | 10 | ||
12 | #define AIO_MAXSEGS 4 | 11 | #define AIO_MAXSEGS 4 |
13 | #define AIO_KIOGRP_NR_ATOMIC 8 | 12 | #define AIO_KIOGRP_NR_ATOMIC 8 |
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 6cd39a927e1f..025e4f575103 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h | |||
@@ -8,7 +8,13 @@ | |||
8 | #include <linux/proc_fs.h> | 8 | #include <linux/proc_fs.h> |
9 | 9 | ||
10 | #define ELFCORE_ADDR_MAX (-1ULL) | 10 | #define ELFCORE_ADDR_MAX (-1ULL) |
11 | |||
12 | #ifdef CONFIG_PROC_VMCORE | ||
11 | extern unsigned long long elfcorehdr_addr; | 13 | extern unsigned long long elfcorehdr_addr; |
14 | #else | ||
15 | static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | ||
16 | #endif | ||
17 | |||
12 | extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, | 18 | extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, |
13 | unsigned long, int); | 19 | unsigned long, int); |
14 | extern const struct file_operations proc_vmcore_operations; | 20 | extern const struct file_operations proc_vmcore_operations; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 49d8eb7a71be..53d2edb709b3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -499,7 +499,7 @@ struct backing_dev_info; | |||
499 | struct address_space { | 499 | struct address_space { |
500 | struct inode *host; /* owner: inode, block_device */ | 500 | struct inode *host; /* owner: inode, block_device */ |
501 | struct radix_tree_root page_tree; /* radix tree of all pages */ | 501 | struct radix_tree_root page_tree; /* radix tree of all pages */ |
502 | rwlock_t tree_lock; /* and rwlock protecting it */ | 502 | spinlock_t tree_lock; /* and lock protecting it */ |
503 | unsigned int i_mmap_writable;/* count VM_SHARED mappings */ | 503 | unsigned int i_mmap_writable;/* count VM_SHARED mappings */ |
504 | struct prio_tree_root i_mmap; /* tree of private and shared mappings */ | 504 | struct prio_tree_root i_mmap; /* tree of private and shared mappings */ |
505 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ | 505 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 7d51cbca49ab..75ae6d8aba4f 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | |||
758 | } | 758 | } |
759 | 759 | ||
760 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | 760 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); |
761 | if (!dma_mapping_error(dma_addr)) { | 761 | if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { |
762 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 762 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 |
763 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | 763 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { |
764 | *mptr++ = cpu_to_le32(0x7C020002); | 764 | *mptr++ = cpu_to_le32(0x7C020002); |
diff --git a/include/linux/init.h b/include/linux/init.h index 42ae95411a93..11b84e106053 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -170,6 +170,13 @@ extern void (*late_time_init)(void); | |||
170 | __attribute__((__section__(".initcall" level ".init"))) = fn | 170 | __attribute__((__section__(".initcall" level ".init"))) = fn |
171 | 171 | ||
172 | /* | 172 | /* |
173 | * Early initcalls run before initializing SMP. | ||
174 | * | ||
175 | * Only for built-in code, not modules. | ||
176 | */ | ||
177 | #define early_initcall(fn) __define_initcall("early",fn,early) | ||
178 | |||
179 | /* | ||
173 | * A "pure" initcall has no dependencies on anything else, and purely | 180 | * A "pure" initcall has no dependencies on anything else, and purely |
174 | * initializes variables that couldn't be statically initialized. | 181 | * initializes variables that couldn't be statically initialized. |
175 | * | 182 | * |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 3265968cd2cd..82f88a8a827b 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -83,6 +83,7 @@ struct kimage { | |||
83 | 83 | ||
84 | unsigned long start; | 84 | unsigned long start; |
85 | struct page *control_code_page; | 85 | struct page *control_code_page; |
86 | struct page *swap_page; | ||
86 | 87 | ||
87 | unsigned long nr_segments; | 88 | unsigned long nr_segments; |
88 | struct kexec_segment segment[KEXEC_SEGMENT_MAX]; | 89 | struct kexec_segment segment[KEXEC_SEGMENT_MAX]; |
@@ -98,18 +99,20 @@ struct kimage { | |||
98 | unsigned int type : 1; | 99 | unsigned int type : 1; |
99 | #define KEXEC_TYPE_DEFAULT 0 | 100 | #define KEXEC_TYPE_DEFAULT 0 |
100 | #define KEXEC_TYPE_CRASH 1 | 101 | #define KEXEC_TYPE_CRASH 1 |
102 | unsigned int preserve_context : 1; | ||
101 | }; | 103 | }; |
102 | 104 | ||
103 | 105 | ||
104 | 106 | ||
105 | /* kexec interface functions */ | 107 | /* kexec interface functions */ |
106 | extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET; | 108 | extern void machine_kexec(struct kimage *image); |
107 | extern int machine_kexec_prepare(struct kimage *image); | 109 | extern int machine_kexec_prepare(struct kimage *image); |
108 | extern void machine_kexec_cleanup(struct kimage *image); | 110 | extern void machine_kexec_cleanup(struct kimage *image); |
109 | extern asmlinkage long sys_kexec_load(unsigned long entry, | 111 | extern asmlinkage long sys_kexec_load(unsigned long entry, |
110 | unsigned long nr_segments, | 112 | unsigned long nr_segments, |
111 | struct kexec_segment __user *segments, | 113 | struct kexec_segment __user *segments, |
112 | unsigned long flags); | 114 | unsigned long flags); |
115 | extern int kernel_kexec(void); | ||
113 | #ifdef CONFIG_COMPAT | 116 | #ifdef CONFIG_COMPAT |
114 | extern asmlinkage long compat_sys_kexec_load(unsigned long entry, | 117 | extern asmlinkage long compat_sys_kexec_load(unsigned long entry, |
115 | unsigned long nr_segments, | 118 | unsigned long nr_segments, |
@@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image; | |||
156 | #define kexec_flush_icache_page(page) | 159 | #define kexec_flush_icache_page(page) |
157 | #endif | 160 | #endif |
158 | 161 | ||
159 | #define KEXEC_ON_CRASH 0x00000001 | 162 | #define KEXEC_ON_CRASH 0x00000001 |
160 | #define KEXEC_ARCH_MASK 0xffff0000 | 163 | #define KEXEC_PRESERVE_CONTEXT 0x00000002 |
164 | #define KEXEC_ARCH_MASK 0xffff0000 | ||
161 | 165 | ||
162 | /* These values match the ELF architecture values. | 166 | /* These values match the ELF architecture values. |
163 | * Unless there is a good reason that should continue to be the case. | 167 | * Unless there is a good reason that should continue to be the case. |
@@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image; | |||
174 | #define KEXEC_ARCH_MIPS_LE (10 << 16) | 178 | #define KEXEC_ARCH_MIPS_LE (10 << 16) |
175 | #define KEXEC_ARCH_MIPS ( 8 << 16) | 179 | #define KEXEC_ARCH_MIPS ( 8 << 16) |
176 | 180 | ||
177 | #define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */ | 181 | /* List of defined/legal kexec flags */ |
182 | #ifndef CONFIG_KEXEC_JUMP | ||
183 | #define KEXEC_FLAGS KEXEC_ON_CRASH | ||
184 | #else | ||
185 | #define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) | ||
186 | #endif | ||
178 | 187 | ||
179 | #define VMCOREINFO_BYTES (4096) | 188 | #define VMCOREINFO_BYTES (4096) |
180 | #define VMCOREINFO_NOTE_NAME "VMCOREINFO" | 189 | #define VMCOREINFO_NOTE_NAME "VMCOREINFO" |
diff --git a/include/linux/memstick.h b/include/linux/memstick.h index 37a5cdb03918..a9f998a3f48b 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h | |||
@@ -263,6 +263,10 @@ struct memstick_dev { | |||
263 | /* Get next request from the media driver. */ | 263 | /* Get next request from the media driver. */ |
264 | int (*next_request)(struct memstick_dev *card, | 264 | int (*next_request)(struct memstick_dev *card, |
265 | struct memstick_request **mrq); | 265 | struct memstick_request **mrq); |
266 | /* Tell the media driver to stop doing things */ | ||
267 | void (*stop)(struct memstick_dev *card); | ||
268 | /* Allow the media driver to continue */ | ||
269 | void (*start)(struct memstick_dev *card); | ||
266 | 270 | ||
267 | struct device dev; | 271 | struct device dev; |
268 | }; | 272 | }; |
@@ -284,7 +288,7 @@ struct memstick_host { | |||
284 | /* Notify the host that some requests are pending. */ | 288 | /* Notify the host that some requests are pending. */ |
285 | void (*request)(struct memstick_host *host); | 289 | void (*request)(struct memstick_host *host); |
286 | /* Set host IO parameters (power, clock, etc). */ | 290 | /* Set host IO parameters (power, clock, etc). */ |
287 | void (*set_param)(struct memstick_host *host, | 291 | int (*set_param)(struct memstick_host *host, |
288 | enum memstick_param param, | 292 | enum memstick_param param, |
289 | int value); | 293 | int value); |
290 | unsigned long private[0] ____cacheline_aligned; | 294 | unsigned long private[0] ____cacheline_aligned; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index d87a5a5fe87d..6e695eaab4ce 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -810,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * | |||
810 | 810 | ||
811 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 811 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, |
812 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 812 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); |
813 | void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); | ||
814 | 813 | ||
815 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 814 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
816 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 815 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
@@ -833,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
833 | struct vm_area_struct **pprev, unsigned long start, | 832 | struct vm_area_struct **pprev, unsigned long start, |
834 | unsigned long end, unsigned long newflags); | 833 | unsigned long end, unsigned long newflags); |
835 | 834 | ||
835 | #ifdef CONFIG_HAVE_GET_USER_PAGES_FAST | ||
836 | /* | ||
837 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | ||
838 | * operating on current and current->mm (force=0 and doesn't return any vmas). | ||
839 | * | ||
840 | * get_user_pages_fast may take mmap_sem and page tables, so no assumptions | ||
841 | * can be made about locking. get_user_pages_fast is to be implemented in a | ||
842 | * way that is advantageous (vs get_user_pages()) when the user memory area is | ||
843 | * already faulted in and present in ptes. However if the pages have to be | ||
844 | * faulted in, it may turn out to be slightly slower). | ||
845 | */ | ||
846 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
847 | struct page **pages); | ||
848 | |||
849 | #else | ||
850 | /* | ||
851 | * Should probably be moved to asm-generic, and architectures can include it if | ||
852 | * they don't implement their own get_user_pages_fast. | ||
853 | */ | ||
854 | #define get_user_pages_fast(start, nr_pages, write, pages) \ | ||
855 | ({ \ | ||
856 | struct mm_struct *mm = current->mm; \ | ||
857 | int ret; \ | ||
858 | \ | ||
859 | down_read(&mm->mmap_sem); \ | ||
860 | ret = get_user_pages(current, mm, start, nr_pages, \ | ||
861 | write, 0, pages, NULL); \ | ||
862 | up_read(&mm->mmap_sem); \ | ||
863 | \ | ||
864 | ret; \ | ||
865 | }) | ||
866 | #endif | ||
867 | |||
836 | /* | 868 | /* |
837 | * A callback you can register to apply pressure to ageable caches. | 869 | * A callback you can register to apply pressure to ageable caches. |
838 | * | 870 | * |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ee1ec2c7723c..a81d81890422 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
13 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
14 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
15 | #include <linux/hardirq.h> /* for in_interrupt() */ | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | 18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
@@ -62,6 +63,98 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) | |||
62 | #define page_cache_release(page) put_page(page) | 63 | #define page_cache_release(page) put_page(page) |
63 | void release_pages(struct page **pages, int nr, int cold); | 64 | void release_pages(struct page **pages, int nr, int cold); |
64 | 65 | ||
66 | /* | ||
67 | * speculatively take a reference to a page. | ||
68 | * If the page is free (_count == 0), then _count is untouched, and 0 | ||
69 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. | ||
70 | * | ||
71 | * This function must be called inside the same rcu_read_lock() section as has | ||
72 | * been used to lookup the page in the pagecache radix-tree (or page table): | ||
73 | * this allows allocators to use a synchronize_rcu() to stabilize _count. | ||
74 | * | ||
75 | * Unless an RCU grace period has passed, the count of all pages coming out | ||
76 | * of the allocator must be considered unstable. page_count may return higher | ||
77 | * than expected, and put_page must be able to do the right thing when the | ||
78 | * page has been finished with, no matter what it is subsequently allocated | ||
79 | * for (because put_page is what is used here to drop an invalid speculative | ||
80 | * reference). | ||
81 | * | ||
82 | * This is the interesting part of the lockless pagecache (and lockless | ||
83 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | ||
84 | * has the following pattern: | ||
85 | * 1. find page in radix tree | ||
86 | * 2. conditionally increment refcount | ||
87 | * 3. check the page is still in pagecache (if no, goto 1) | ||
88 | * | ||
89 | * Remove-side that cares about stability of _count (eg. reclaim) has the | ||
90 | * following (with tree_lock held for write): | ||
91 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | ||
92 | * B. remove page from pagecache | ||
93 | * C. free the page | ||
94 | * | ||
95 | * There are 2 critical interleavings that matter: | ||
96 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | ||
97 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | ||
98 | * subsequently, B will complete and 1 will find no page, causing the | ||
99 | * lookup to return NULL. | ||
100 | * | ||
101 | * It is possible that between 1 and 2, the page is removed then the exact same | ||
102 | * page is inserted into the same position in pagecache. That's OK: the | ||
103 | * old find_get_page using tree_lock could equally have run before or after | ||
104 | * such a re-insertion, depending on order that locks are granted. | ||
105 | * | ||
106 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | ||
107 | * will find the page or it will not. Likewise, the old find_get_page could run | ||
108 | * either before the insertion or afterwards, depending on timing. | ||
109 | */ | ||
110 | static inline int page_cache_get_speculative(struct page *page) | ||
111 | { | ||
112 | VM_BUG_ON(in_interrupt()); | ||
113 | |||
114 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | ||
115 | # ifdef CONFIG_PREEMPT | ||
116 | VM_BUG_ON(!in_atomic()); | ||
117 | # endif | ||
118 | /* | ||
119 | * Preempt must be disabled here - we rely on rcu_read_lock doing | ||
120 | * this for us. | ||
121 | * | ||
122 | * Pagecache won't be truncated from interrupt context, so if we have | ||
123 | * found a page in the radix tree here, we have pinned its refcount by | ||
124 | * disabling preempt, and hence no need for the "speculative get" that | ||
125 | * SMP requires. | ||
126 | */ | ||
127 | VM_BUG_ON(page_count(page) == 0); | ||
128 | atomic_inc(&page->_count); | ||
129 | |||
130 | #else | ||
131 | if (unlikely(!get_page_unless_zero(page))) { | ||
132 | /* | ||
133 | * Either the page has been freed, or will be freed. | ||
134 | * In either case, retry here and the caller should | ||
135 | * do the right thing (see comments above). | ||
136 | */ | ||
137 | return 0; | ||
138 | } | ||
139 | #endif | ||
140 | VM_BUG_ON(PageTail(page)); | ||
141 | |||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | static inline int page_freeze_refs(struct page *page, int count) | ||
146 | { | ||
147 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | ||
148 | } | ||
149 | |||
150 | static inline void page_unfreeze_refs(struct page *page, int count) | ||
151 | { | ||
152 | VM_BUG_ON(page_count(page) != 0); | ||
153 | VM_BUG_ON(count == 0); | ||
154 | |||
155 | atomic_set(&page->_count, count); | ||
156 | } | ||
157 | |||
65 | #ifdef CONFIG_NUMA | 158 | #ifdef CONFIG_NUMA |
66 | extern struct page *__page_cache_alloc(gfp_t gfp); | 159 | extern struct page *__page_cache_alloc(gfp_t gfp); |
67 | #else | 160 | #else |
@@ -133,7 +226,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping, | |||
133 | return read_cache_page(mapping, index, filler, data); | 226 | return read_cache_page(mapping, index, filler, data); |
134 | } | 227 | } |
135 | 228 | ||
136 | int add_to_page_cache(struct page *page, struct address_space *mapping, | 229 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
137 | pgoff_t index, gfp_t gfp_mask); | 230 | pgoff_t index, gfp_t gfp_mask); |
138 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 231 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
139 | pgoff_t index, gfp_t gfp_mask); | 232 | pgoff_t index, gfp_t gfp_mask); |
@@ -141,6 +234,22 @@ extern void remove_from_page_cache(struct page *page); | |||
141 | extern void __remove_from_page_cache(struct page *page); | 234 | extern void __remove_from_page_cache(struct page *page); |
142 | 235 | ||
143 | /* | 236 | /* |
237 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
238 | * the page is new, so we can just run SetPageLocked() against it. | ||
239 | */ | ||
240 | static inline int add_to_page_cache(struct page *page, | ||
241 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
242 | { | ||
243 | int error; | ||
244 | |||
245 | SetPageLocked(page); | ||
246 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
247 | if (unlikely(error)) | ||
248 | ClearPageLocked(page); | ||
249 | return error; | ||
250 | } | ||
251 | |||
252 | /* | ||
144 | * Return byte-offset into filesystem object for page. | 253 | * Return byte-offset into filesystem object for page. |
145 | */ | 254 | */ |
146 | static inline loff_t page_offset(struct page *page) | 255 | static inline loff_t page_offset(struct page *page) |
diff --git a/include/linux/parport.h b/include/linux/parport.h index dcb9e01a69ca..6a0d7cdb5774 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h | |||
@@ -560,5 +560,8 @@ extern int parport_device_proc_unregister(struct pardevice *device); | |||
560 | 560 | ||
561 | #endif /* !CONFIG_PARPORT_NOT_PC */ | 561 | #endif /* !CONFIG_PARPORT_NOT_PC */ |
562 | 562 | ||
563 | extern unsigned long parport_default_timeslice; | ||
564 | extern int parport_default_spintime; | ||
565 | |||
563 | #endif /* __KERNEL__ */ | 566 | #endif /* __KERNEL__ */ |
564 | #endif /* _PARPORT_H_ */ | 567 | #endif /* _PARPORT_H_ */ |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4cdd393e71e1..fac3337547eb 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -74,11 +74,6 @@ struct percpu_data { | |||
74 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | 74 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
75 | }) | 75 | }) |
76 | 76 | ||
77 | extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); | ||
78 | extern void percpu_depopulate(void *__pdata, int cpu); | ||
79 | extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | ||
80 | cpumask_t *mask); | ||
81 | extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); | ||
82 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | 77 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); |
83 | extern void percpu_free(void *__pdata); | 78 | extern void percpu_free(void *__pdata); |
84 | 79 | ||
@@ -86,26 +81,6 @@ extern void percpu_free(void *__pdata); | |||
86 | 81 | ||
87 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 82 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
88 | 83 | ||
89 | static inline void percpu_depopulate(void *__pdata, int cpu) | ||
90 | { | ||
91 | } | ||
92 | |||
93 | static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | ||
94 | { | ||
95 | } | ||
96 | |||
97 | static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, | ||
98 | int cpu) | ||
99 | { | ||
100 | return percpu_ptr(__pdata, cpu); | ||
101 | } | ||
102 | |||
103 | static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | ||
104 | cpumask_t *mask) | ||
105 | { | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 84 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) |
110 | { | 85 | { |
111 | return kzalloc(size, gfp); | 86 | return kzalloc(size, gfp); |
@@ -118,10 +93,6 @@ static inline void percpu_free(void *__pdata) | |||
118 | 93 | ||
119 | #endif /* CONFIG_SMP */ | 94 | #endif /* CONFIG_SMP */ |
120 | 95 | ||
121 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ | ||
122 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | ||
123 | #define percpu_depopulate_mask(__pdata, mask) \ | ||
124 | __percpu_depopulate_mask((__pdata), &(mask)) | ||
125 | #define percpu_alloc_mask(size, gfp, mask) \ | 96 | #define percpu_alloc_mask(size, gfp, mask) \ |
126 | __percpu_alloc_mask((size), (gfp), &(mask)) | 97 | __percpu_alloc_mask((size), (gfp), &(mask)) |
127 | 98 | ||
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index c6f5f9dd0cee..fd31756e1a00 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -121,6 +121,74 @@ static inline void ptrace_unlink(struct task_struct *child) | |||
121 | int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); | 121 | int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); |
122 | int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); | 122 | int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); |
123 | 123 | ||
124 | /** | ||
125 | * task_ptrace - return %PT_* flags that apply to a task | ||
126 | * @task: pointer to &task_struct in question | ||
127 | * | ||
128 | * Returns the %PT_* flags that apply to @task. | ||
129 | */ | ||
130 | static inline int task_ptrace(struct task_struct *task) | ||
131 | { | ||
132 | return task->ptrace; | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * ptrace_event - possibly stop for a ptrace event notification | ||
137 | * @mask: %PT_* bit to check in @current->ptrace | ||
138 | * @event: %PTRACE_EVENT_* value to report if @mask is set | ||
139 | * @message: value for %PTRACE_GETEVENTMSG to return | ||
140 | * | ||
141 | * This checks the @mask bit to see if ptrace wants stops for this event. | ||
142 | * If so we stop, reporting @event and @message to the ptrace parent. | ||
143 | * | ||
144 | * Returns nonzero if we did a ptrace notification, zero if not. | ||
145 | * | ||
146 | * Called without locks. | ||
147 | */ | ||
148 | static inline int ptrace_event(int mask, int event, unsigned long message) | ||
149 | { | ||
150 | if (mask && likely(!(current->ptrace & mask))) | ||
151 | return 0; | ||
152 | current->ptrace_message = message; | ||
153 | ptrace_notify((event << 8) | SIGTRAP); | ||
154 | return 1; | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * ptrace_init_task - initialize ptrace state for a new child | ||
159 | * @child: new child task | ||
160 | * @ptrace: true if child should be ptrace'd by parent's tracer | ||
161 | * | ||
162 | * This is called immediately after adding @child to its parent's children | ||
163 | * list. @ptrace is false in the normal case, and true to ptrace @child. | ||
164 | * | ||
165 | * Called with current's siglock and write_lock_irq(&tasklist_lock) held. | ||
166 | */ | ||
167 | static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | ||
168 | { | ||
169 | INIT_LIST_HEAD(&child->ptrace_entry); | ||
170 | INIT_LIST_HEAD(&child->ptraced); | ||
171 | child->parent = child->real_parent; | ||
172 | child->ptrace = 0; | ||
173 | if (unlikely(ptrace)) { | ||
174 | child->ptrace = current->ptrace; | ||
175 | __ptrace_link(child, current->parent); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped | ||
181 | * @task: task in %EXIT_DEAD state | ||
182 | * | ||
183 | * Called with write_lock(&tasklist_lock) held. | ||
184 | */ | ||
185 | static inline void ptrace_release_task(struct task_struct *task) | ||
186 | { | ||
187 | BUG_ON(!list_empty(&task->ptraced)); | ||
188 | ptrace_unlink(task); | ||
189 | BUG_ON(!list_empty(&task->ptrace_entry)); | ||
190 | } | ||
191 | |||
124 | #ifndef force_successful_syscall_return | 192 | #ifndef force_successful_syscall_return |
125 | /* | 193 | /* |
126 | * System call handlers that, upon successful completion, need to return a | 194 | * System call handlers that, upon successful completion, need to return a |
@@ -246,6 +314,10 @@ static inline void user_enable_block_step(struct task_struct *task) | |||
246 | #define arch_ptrace_stop(code, info) do { } while (0) | 314 | #define arch_ptrace_stop(code, info) do { } while (0) |
247 | #endif | 315 | #endif |
248 | 316 | ||
317 | extern int task_current_syscall(struct task_struct *target, long *callno, | ||
318 | unsigned long args[6], unsigned int maxargs, | ||
319 | unsigned long *sp, unsigned long *pc); | ||
320 | |||
249 | #endif | 321 | #endif |
250 | 322 | ||
251 | #endif | 323 | #endif |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index b8ce2b444bb5..a916c6660dfa 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -99,12 +99,15 @@ do { \ | |||
99 | * | 99 | * |
100 | * The notable exceptions to this rule are the following functions: | 100 | * The notable exceptions to this rule are the following functions: |
101 | * radix_tree_lookup | 101 | * radix_tree_lookup |
102 | * radix_tree_lookup_slot | ||
102 | * radix_tree_tag_get | 103 | * radix_tree_tag_get |
103 | * radix_tree_gang_lookup | 104 | * radix_tree_gang_lookup |
105 | * radix_tree_gang_lookup_slot | ||
104 | * radix_tree_gang_lookup_tag | 106 | * radix_tree_gang_lookup_tag |
107 | * radix_tree_gang_lookup_tag_slot | ||
105 | * radix_tree_tagged | 108 | * radix_tree_tagged |
106 | * | 109 | * |
107 | * The first 4 functions are able to be called locklessly, using RCU. The | 110 | * The first 7 functions are able to be called locklessly, using RCU. The |
108 | * caller must ensure calls to these functions are made within rcu_read_lock() | 111 | * caller must ensure calls to these functions are made within rcu_read_lock() |
109 | * regions. Other readers (lock-free or otherwise) and modifications may be | 112 | * regions. Other readers (lock-free or otherwise) and modifications may be |
110 | * running concurrently. | 113 | * running concurrently. |
@@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); | |||
159 | unsigned int | 162 | unsigned int |
160 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | 163 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, |
161 | unsigned long first_index, unsigned int max_items); | 164 | unsigned long first_index, unsigned int max_items); |
165 | unsigned int | ||
166 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | ||
167 | unsigned long first_index, unsigned int max_items); | ||
162 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, | 168 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, |
163 | unsigned long index, unsigned long max_scan); | 169 | unsigned long index, unsigned long max_scan); |
164 | int radix_tree_preload(gfp_t gfp_mask); | 170 | int radix_tree_preload(gfp_t gfp_mask); |
@@ -173,6 +179,10 @@ unsigned int | |||
173 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | 179 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, |
174 | unsigned long first_index, unsigned int max_items, | 180 | unsigned long first_index, unsigned int max_items, |
175 | unsigned int tag); | 181 | unsigned int tag); |
182 | unsigned int | ||
183 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | ||
184 | unsigned long first_index, unsigned int max_items, | ||
185 | unsigned int tag); | ||
176 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); | 186 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); |
177 | 187 | ||
178 | static inline void radix_tree_preload_end(void) | 188 | static inline void radix_tree_preload_end(void) |
diff --git a/include/linux/relay.h b/include/linux/relay.h index 6cd8c4425fc7..953fc055e875 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h | |||
@@ -48,6 +48,7 @@ struct rchan_buf | |||
48 | size_t *padding; /* padding counts per sub-buffer */ | 48 | size_t *padding; /* padding counts per sub-buffer */ |
49 | size_t prev_padding; /* temporary variable */ | 49 | size_t prev_padding; /* temporary variable */ |
50 | size_t bytes_consumed; /* bytes consumed in cur read subbuf */ | 50 | size_t bytes_consumed; /* bytes consumed in cur read subbuf */ |
51 | size_t early_bytes; /* bytes consumed before VFS inited */ | ||
51 | unsigned int cpu; /* this buf's cpu */ | 52 | unsigned int cpu; /* this buf's cpu */ |
52 | } ____cacheline_aligned; | 53 | } ____cacheline_aligned; |
53 | 54 | ||
@@ -68,6 +69,7 @@ struct rchan | |||
68 | int is_global; /* One global buffer ? */ | 69 | int is_global; /* One global buffer ? */ |
69 | struct list_head list; /* for channel list */ | 70 | struct list_head list; /* for channel list */ |
70 | struct dentry *parent; /* parent dentry passed to open */ | 71 | struct dentry *parent; /* parent dentry passed to open */ |
72 | int has_base_filename; /* has a filename associated? */ | ||
71 | char base_filename[NAME_MAX]; /* saved base filename */ | 73 | char base_filename[NAME_MAX]; /* saved base filename */ |
72 | }; | 74 | }; |
73 | 75 | ||
@@ -169,6 +171,9 @@ struct rchan *relay_open(const char *base_filename, | |||
169 | size_t n_subbufs, | 171 | size_t n_subbufs, |
170 | struct rchan_callbacks *cb, | 172 | struct rchan_callbacks *cb, |
171 | void *private_data); | 173 | void *private_data); |
174 | extern int relay_late_setup_files(struct rchan *chan, | ||
175 | const char *base_filename, | ||
176 | struct dentry *parent); | ||
172 | extern void relay_close(struct rchan *chan); | 177 | extern void relay_close(struct rchan *chan); |
173 | extern void relay_flush(struct rchan *chan); | 178 | extern void relay_flush(struct rchan *chan); |
174 | extern void relay_subbufs_consumed(struct rchan *chan, | 179 | extern void relay_subbufs_consumed(struct rchan *chan, |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index b01fe004cb5e..91f597ad6acc 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -225,8 +225,6 @@ typedef struct rtc_task { | |||
225 | int rtc_register(rtc_task_t *task); | 225 | int rtc_register(rtc_task_t *task); |
226 | int rtc_unregister(rtc_task_t *task); | 226 | int rtc_unregister(rtc_task_t *task); |
227 | int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); | 227 | int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); |
228 | void rtc_get_rtc_time(struct rtc_time *rtc_tm); | ||
229 | irqreturn_t rtc_interrupt(int irq, void *dev_id); | ||
230 | 228 | ||
231 | #endif /* __KERNEL__ */ | 229 | #endif /* __KERNEL__ */ |
232 | 230 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 42036ffe6b00..f59318a0099b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -292,7 +292,6 @@ extern void sched_show_task(struct task_struct *p); | |||
292 | 292 | ||
293 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 293 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
294 | extern void softlockup_tick(void); | 294 | extern void softlockup_tick(void); |
295 | extern void spawn_softlockup_task(void); | ||
296 | extern void touch_softlockup_watchdog(void); | 295 | extern void touch_softlockup_watchdog(void); |
297 | extern void touch_all_softlockup_watchdogs(void); | 296 | extern void touch_all_softlockup_watchdogs(void); |
298 | extern unsigned int softlockup_panic; | 297 | extern unsigned int softlockup_panic; |
@@ -1797,7 +1796,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_ | |||
1797 | extern int kill_pgrp(struct pid *pid, int sig, int priv); | 1796 | extern int kill_pgrp(struct pid *pid, int sig, int priv); |
1798 | extern int kill_pid(struct pid *pid, int sig, int priv); | 1797 | extern int kill_pid(struct pid *pid, int sig, int priv); |
1799 | extern int kill_proc_info(int, struct siginfo *, pid_t); | 1798 | extern int kill_proc_info(int, struct siginfo *, pid_t); |
1800 | extern void do_notify_parent(struct task_struct *, int); | 1799 | extern int do_notify_parent(struct task_struct *, int); |
1801 | extern void force_sig(int, struct task_struct *); | 1800 | extern void force_sig(int, struct task_struct *); |
1802 | extern void force_sig_specific(int, struct task_struct *); | 1801 | extern void force_sig_specific(int, struct task_struct *); |
1803 | extern int send_sig(int, struct task_struct *, int); | 1802 | extern int send_sig(int, struct task_struct *, int); |
@@ -1883,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
1883 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 1882 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
1884 | 1883 | ||
1885 | #ifdef CONFIG_SMP | 1884 | #ifdef CONFIG_SMP |
1886 | extern void wait_task_inactive(struct task_struct * p); | 1885 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
1887 | #else | 1886 | #else |
1888 | #define wait_task_inactive(p) do { } while (0) | 1887 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
1888 | long match_state) | ||
1889 | { | ||
1890 | return 1; | ||
1891 | } | ||
1889 | #endif | 1892 | #endif |
1890 | 1893 | ||
1891 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 1894 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) |
@@ -2139,16 +2142,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
2139 | 2142 | ||
2140 | #endif /* CONFIG_SMP */ | 2143 | #endif /* CONFIG_SMP */ |
2141 | 2144 | ||
2142 | #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT | ||
2143 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | 2145 | extern void arch_pick_mmap_layout(struct mm_struct *mm); |
2144 | #else | ||
2145 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) | ||
2146 | { | ||
2147 | mm->mmap_base = TASK_UNMAPPED_BASE; | ||
2148 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
2149 | mm->unmap_area = arch_unmap_area; | ||
2150 | } | ||
2151 | #endif | ||
2152 | 2146 | ||
2153 | #ifdef CONFIG_TRACING | 2147 | #ifdef CONFIG_TRACING |
2154 | extern void | 2148 | extern void |
@@ -2231,14 +2225,6 @@ static inline void inc_syscw(struct task_struct *tsk) | |||
2231 | } | 2225 | } |
2232 | #endif | 2226 | #endif |
2233 | 2227 | ||
2234 | #ifdef CONFIG_SMP | ||
2235 | void migration_init(void); | ||
2236 | #else | ||
2237 | static inline void migration_init(void) | ||
2238 | { | ||
2239 | } | ||
2240 | #endif | ||
2241 | |||
2242 | #ifndef TASK_SIZE_OF | 2228 | #ifndef TASK_SIZE_OF |
2243 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2229 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2244 | #endif | 2230 | #endif |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 41103910f8a2..9ff8e8499403 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -58,7 +58,7 @@ int slab_is_available(void); | |||
58 | 58 | ||
59 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | 59 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
60 | unsigned long, | 60 | unsigned long, |
61 | void (*)(struct kmem_cache *, void *)); | 61 | void (*)(void *)); |
62 | void kmem_cache_destroy(struct kmem_cache *); | 62 | void kmem_cache_destroy(struct kmem_cache *); |
63 | int kmem_cache_shrink(struct kmem_cache *); | 63 | int kmem_cache_shrink(struct kmem_cache *); |
64 | void kmem_cache_free(struct kmem_cache *, void *); | 64 | void kmem_cache_free(struct kmem_cache *, void *); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d117ea2825a9..5bad61a93f65 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -85,7 +85,7 @@ struct kmem_cache { | |||
85 | struct kmem_cache_order_objects min; | 85 | struct kmem_cache_order_objects min; |
86 | gfp_t allocflags; /* gfp flags to use on each alloc */ | 86 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
87 | int refcount; /* Refcount for slab cache destroy */ | 87 | int refcount; /* Refcount for slab cache destroy */ |
88 | void (*ctor)(struct kmem_cache *, void *); | 88 | void (*ctor)(void *); |
89 | int inuse; /* Offset to metadata */ | 89 | int inuse; /* Offset to metadata */ |
90 | int align; /* Alignment */ | 90 | int align; /* Alignment */ |
91 | const char *name; /* Name (only for display!) */ | 91 | const char *name; /* Name (only for display!) */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 48262f86c969..66484d4a8459 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data); | |||
74 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | 74 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS |
75 | void generic_smp_call_function_single_interrupt(void); | 75 | void generic_smp_call_function_single_interrupt(void); |
76 | void generic_smp_call_function_interrupt(void); | 76 | void generic_smp_call_function_interrupt(void); |
77 | void init_call_single_data(void); | ||
78 | void ipi_call_lock(void); | 77 | void ipi_call_lock(void); |
79 | void ipi_call_unlock(void); | 78 | void ipi_call_unlock(void); |
80 | void ipi_call_lock_irq(void); | 79 | void ipi_call_lock_irq(void); |
81 | void ipi_call_unlock_irq(void); | 80 | void ipi_call_unlock_irq(void); |
82 | #else | ||
83 | static inline void init_call_single_data(void) | ||
84 | { | ||
85 | } | ||
86 | #endif | 81 | #endif |
87 | 82 | ||
88 | /* | 83 | /* |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 4bf8cade9dbc..e530026eedf7 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) | |||
427 | { | 427 | { |
428 | switch (dev->bus->bustype) { | 428 | switch (dev->bus->bustype) { |
429 | case SSB_BUSTYPE_PCI: | 429 | case SSB_BUSTYPE_PCI: |
430 | return pci_dma_mapping_error(addr); | 430 | return pci_dma_mapping_error(dev->bus->host_pci, addr); |
431 | case SSB_BUSTYPE_SSB: | 431 | case SSB_BUSTYPE_SSB: |
432 | return dma_mapping_error(addr); | 432 | return dma_mapping_error(dev->dev, addr); |
433 | default: | 433 | default: |
434 | __ssb_dma_not_implemented(dev); | 434 | __ssb_dma_not_implemented(dev); |
435 | } | 435 | } |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index e8e69159af71..c63435095970 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e) | |||
278 | } | 278 | } |
279 | #endif | 279 | #endif |
280 | 280 | ||
281 | extern struct mutex pm_mutex; | ||
282 | |||
281 | #endif /* _LINUX_SUSPEND_H */ | 283 | #endif /* _LINUX_SUSPEND_H */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 0b3377650c85..de40f169a4e4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -237,7 +237,6 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, | |||
237 | 237 | ||
238 | /* linux/mm/swapfile.c */ | 238 | /* linux/mm/swapfile.c */ |
239 | extern long total_swap_pages; | 239 | extern long total_swap_pages; |
240 | extern unsigned int nr_swapfiles; | ||
241 | extern void si_swapinfo(struct sysinfo *); | 240 | extern void si_swapinfo(struct sysinfo *); |
242 | extern swp_entry_t get_swap_page(void); | 241 | extern swp_entry_t get_swap_page(void); |
243 | extern swp_entry_t get_swap_page_of_type(int); | 242 | extern swp_entry_t get_swap_page_of_type(int); |
@@ -254,8 +253,6 @@ extern int can_share_swap_page(struct page *); | |||
254 | extern int remove_exclusive_swap_page(struct page *); | 253 | extern int remove_exclusive_swap_page(struct page *); |
255 | struct backing_dev_info; | 254 | struct backing_dev_info; |
256 | 255 | ||
257 | extern spinlock_t swap_lock; | ||
258 | |||
259 | /* linux/mm/thrash.c */ | 256 | /* linux/mm/thrash.c */ |
260 | extern struct mm_struct * swap_token_mm; | 257 | extern struct mm_struct * swap_token_mm; |
261 | extern void grab_swap_token(void); | 258 | extern void grab_swap_token(void); |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h new file mode 100644 index 000000000000..589f429619c9 --- /dev/null +++ b/include/linux/tracehook.h | |||
@@ -0,0 +1,575 @@ | |||
1 | /* | ||
2 | * Tracing hooks | ||
3 | * | ||
4 | * Copyright (C) 2008 Red Hat, Inc. All rights reserved. | ||
5 | * | ||
6 | * This copyrighted material is made available to anyone wishing to use, | ||
7 | * modify, copy, or redistribute it subject to the terms and conditions | ||
8 | * of the GNU General Public License v.2. | ||
9 | * | ||
10 | * This file defines hook entry points called by core code where | ||
11 | * user tracing/debugging support might need to do something. These | ||
12 | * entry points are called tracehook_*(). Each hook declared below | ||
13 | * has a detailed kerneldoc comment giving the context (locking et | ||
14 | * al) from which it is called, and the meaning of its return value. | ||
15 | * | ||
16 | * Each function here typically has only one call site, so it is ok | ||
17 | * to have some nontrivial tracehook_*() inlines. In all cases, the | ||
18 | * fast path when no tracing is enabled should be very short. | ||
19 | * | ||
20 | * The purpose of this file and the tracehook_* layer is to consolidate | ||
21 | * the interface that the kernel core and arch code uses to enable any | ||
22 | * user debugging or tracing facility (such as ptrace). The interfaces | ||
23 | * here are carefully documented so that maintainers of core and arch | ||
24 | * code do not need to think about the implementation details of the | ||
25 | * tracing facilities. Likewise, maintainers of the tracing code do not | ||
26 | * need to understand all the calling core or arch code in detail, just | ||
27 | * documented circumstances of each call, such as locking conditions. | ||
28 | * | ||
29 | * If the calling core code changes so that locking is different, then | ||
30 | * it is ok to change the interface documented here. The maintainer of | ||
31 | * core code changing should notify the maintainers of the tracing code | ||
32 | * that they need to work out the change. | ||
33 | * | ||
34 | * Some tracehook_*() inlines take arguments that the current tracing | ||
35 | * implementations might not necessarily use. These function signatures | ||
36 | * are chosen to pass in all the information that is on hand in the | ||
37 | * caller and might conceivably be relevant to a tracer, so that the | ||
38 | * core code won't have to be updated when tracing adds more features. | ||
39 | * If a call site changes so that some of those parameters are no longer | ||
40 | * already on hand without extra work, then the tracehook_* interface | ||
41 | * can change so there is no make-work burden on the core code. The | ||
42 | * maintainer of core code changing should notify the maintainers of the | ||
43 | * tracing code that they need to work out the change. | ||
44 | */ | ||
45 | |||
46 | #ifndef _LINUX_TRACEHOOK_H | ||
47 | #define _LINUX_TRACEHOOK_H 1 | ||
48 | |||
49 | #include <linux/sched.h> | ||
50 | #include <linux/ptrace.h> | ||
51 | #include <linux/security.h> | ||
52 | struct linux_binprm; | ||
53 | |||
54 | /** | ||
55 | * tracehook_expect_breakpoints - guess if task memory might be touched | ||
56 | * @task: current task, making a new mapping | ||
57 | * | ||
58 | * Return nonzero if @task is expected to want breakpoint insertion in | ||
59 | * its memory at some point. A zero return is no guarantee it won't | ||
60 | * be done, but this is a hint that it's known to be likely. | ||
61 | * | ||
62 | * May be called with @task->mm->mmap_sem held for writing. | ||
63 | */ | ||
64 | static inline int tracehook_expect_breakpoints(struct task_struct *task) | ||
65 | { | ||
66 | return (task_ptrace(task) & PT_PTRACED) != 0; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * ptrace report for syscall entry and exit looks identical. | ||
71 | */ | ||
72 | static inline void ptrace_report_syscall(struct pt_regs *regs) | ||
73 | { | ||
74 | int ptrace = task_ptrace(current); | ||
75 | |||
76 | if (!(ptrace & PT_PTRACED)) | ||
77 | return; | ||
78 | |||
79 | ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | ||
80 | |||
81 | /* | ||
82 | * this isn't the same as continuing with a signal, but it will do | ||
83 | * for normal use. strace only continues with a signal if the | ||
84 | * stopping signal is not SIGTRAP. -brl | ||
85 | */ | ||
86 | if (current->exit_code) { | ||
87 | send_sig(current->exit_code, current, 1); | ||
88 | current->exit_code = 0; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * tracehook_report_syscall_entry - task is about to attempt a system call | ||
94 | * @regs: user register state of current task | ||
95 | * | ||
96 | * This will be called if %TIF_SYSCALL_TRACE has been set, when the | ||
97 | * current task has just entered the kernel for a system call. | ||
98 | * Full user register state is available here. Changing the values | ||
99 | * in @regs can affect the system call number and arguments to be tried. | ||
100 | * It is safe to block here, preventing the system call from beginning. | ||
101 | * | ||
102 | * Returns zero normally, or nonzero if the calling arch code should abort | ||
103 | * the system call. That must prevent normal entry so no system call is | ||
104 | * made. If @task ever returns to user mode after this, its register state | ||
105 | * is unspecified, but should be something harmless like an %ENOSYS error | ||
106 | * return. It should preserve enough information so that syscall_rollback() | ||
107 | * can work (see asm-generic/syscall.h). | ||
108 | * | ||
109 | * Called without locks, just after entering kernel mode. | ||
110 | */ | ||
111 | static inline __must_check int tracehook_report_syscall_entry( | ||
112 | struct pt_regs *regs) | ||
113 | { | ||
114 | ptrace_report_syscall(regs); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * tracehook_report_syscall_exit - task has just finished a system call | ||
120 | * @regs: user register state of current task | ||
121 | * @step: nonzero if simulating single-step or block-step | ||
122 | * | ||
123 | * This will be called if %TIF_SYSCALL_TRACE has been set, when the | ||
124 | * current task has just finished an attempted system call. Full | ||
125 | * user register state is available here. It is safe to block here, | ||
126 | * preventing signals from being processed. | ||
127 | * | ||
128 | * If @step is nonzero, this report is also in lieu of the normal | ||
129 | * trap that would follow the system call instruction because | ||
130 | * user_enable_block_step() or user_enable_single_step() was used. | ||
131 | * In this case, %TIF_SYSCALL_TRACE might not be set. | ||
132 | * | ||
133 | * Called without locks, just before checking for pending signals. | ||
134 | */ | ||
135 | static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) | ||
136 | { | ||
137 | ptrace_report_syscall(regs); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * tracehook_unsafe_exec - check for exec declared unsafe due to tracing | ||
142 | * @task: current task doing exec | ||
143 | * | ||
144 | * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. | ||
145 | * | ||
146 | * Called with task_lock() held on @task. | ||
147 | */ | ||
148 | static inline int tracehook_unsafe_exec(struct task_struct *task) | ||
149 | { | ||
150 | int unsafe = 0; | ||
151 | int ptrace = task_ptrace(task); | ||
152 | if (ptrace & PT_PTRACED) { | ||
153 | if (ptrace & PT_PTRACE_CAP) | ||
154 | unsafe |= LSM_UNSAFE_PTRACE_CAP; | ||
155 | else | ||
156 | unsafe |= LSM_UNSAFE_PTRACE; | ||
157 | } | ||
158 | return unsafe; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * tracehook_tracer_task - return the task that is tracing the given task | ||
163 | * @tsk: task to consider | ||
164 | * | ||
165 | * Returns NULL if noone is tracing @task, or the &struct task_struct | ||
166 | * pointer to its tracer. | ||
167 | * | ||
168 | * Must called under rcu_read_lock(). The pointer returned might be kept | ||
169 | * live only by RCU. During exec, this may be called with task_lock() | ||
170 | * held on @task, still held from when tracehook_unsafe_exec() was called. | ||
171 | */ | ||
172 | static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) | ||
173 | { | ||
174 | if (task_ptrace(tsk) & PT_PTRACED) | ||
175 | return rcu_dereference(tsk->parent); | ||
176 | return NULL; | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * tracehook_report_exec - a successful exec was completed | ||
181 | * @fmt: &struct linux_binfmt that performed the exec | ||
182 | * @bprm: &struct linux_binprm containing exec details | ||
183 | * @regs: user-mode register state | ||
184 | * | ||
185 | * An exec just completed, we are shortly going to return to user mode. | ||
186 | * The freshly initialized register state can be seen and changed in @regs. | ||
187 | * The name, file and other pointers in @bprm are still on hand to be | ||
188 | * inspected, but will be freed as soon as this returns. | ||
189 | * | ||
190 | * Called with no locks, but with some kernel resources held live | ||
191 | * and a reference on @fmt->module. | ||
192 | */ | ||
193 | static inline void tracehook_report_exec(struct linux_binfmt *fmt, | ||
194 | struct linux_binprm *bprm, | ||
195 | struct pt_regs *regs) | ||
196 | { | ||
197 | if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && | ||
198 | unlikely(task_ptrace(current) & PT_PTRACED)) | ||
199 | send_sig(SIGTRAP, current, 0); | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * tracehook_report_exit - task has begun to exit | ||
204 | * @exit_code: pointer to value destined for @current->exit_code | ||
205 | * | ||
206 | * @exit_code points to the value passed to do_exit(), which tracing | ||
207 | * might change here. This is almost the first thing in do_exit(), | ||
208 | * before freeing any resources or setting the %PF_EXITING flag. | ||
209 | * | ||
210 | * Called with no locks held. | ||
211 | */ | ||
212 | static inline void tracehook_report_exit(long *exit_code) | ||
213 | { | ||
214 | ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * tracehook_prepare_clone - prepare for new child to be cloned | ||
219 | * @clone_flags: %CLONE_* flags from clone/fork/vfork system call | ||
220 | * | ||
221 | * This is called before a new user task is to be cloned. | ||
222 | * Its return value will be passed to tracehook_finish_clone(). | ||
223 | * | ||
224 | * Called with no locks held. | ||
225 | */ | ||
226 | static inline int tracehook_prepare_clone(unsigned clone_flags) | ||
227 | { | ||
228 | if (clone_flags & CLONE_UNTRACED) | ||
229 | return 0; | ||
230 | |||
231 | if (clone_flags & CLONE_VFORK) { | ||
232 | if (current->ptrace & PT_TRACE_VFORK) | ||
233 | return PTRACE_EVENT_VFORK; | ||
234 | } else if ((clone_flags & CSIGNAL) != SIGCHLD) { | ||
235 | if (current->ptrace & PT_TRACE_CLONE) | ||
236 | return PTRACE_EVENT_CLONE; | ||
237 | } else if (current->ptrace & PT_TRACE_FORK) | ||
238 | return PTRACE_EVENT_FORK; | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * tracehook_finish_clone - new child created and being attached | ||
245 | * @child: new child task | ||
246 | * @clone_flags: %CLONE_* flags from clone/fork/vfork system call | ||
247 | * @trace: return value from tracehook_clone_prepare() | ||
248 | * | ||
249 | * This is called immediately after adding @child to its parent's children list. | ||
250 | * The @trace value is that returned by tracehook_prepare_clone(). | ||
251 | * | ||
252 | * Called with current's siglock and write_lock_irq(&tasklist_lock) held. | ||
253 | */ | ||
254 | static inline void tracehook_finish_clone(struct task_struct *child, | ||
255 | unsigned long clone_flags, int trace) | ||
256 | { | ||
257 | ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * tracehook_report_clone - in parent, new child is about to start running | ||
262 | * @trace: return value from tracehook_clone_prepare() | ||
263 | * @regs: parent's user register state | ||
264 | * @clone_flags: flags from parent's system call | ||
265 | * @pid: new child's PID in the parent's namespace | ||
266 | * @child: new child task | ||
267 | * | ||
268 | * Called after a child is set up, but before it has been started running. | ||
269 | * The @trace value is that returned by tracehook_clone_prepare(). | ||
270 | * This is not a good place to block, because the child has not started yet. | ||
271 | * Suspend the child here if desired, and block in tracehook_clone_complete(). | ||
272 | * This must prevent the child from self-reaping if tracehook_clone_complete() | ||
273 | * uses the @child pointer; otherwise it might have died and been released by | ||
274 | * the time tracehook_report_clone_complete() is called. | ||
275 | * | ||
276 | * Called with no locks held, but the child cannot run until this returns. | ||
277 | */ | ||
278 | static inline void tracehook_report_clone(int trace, struct pt_regs *regs, | ||
279 | unsigned long clone_flags, | ||
280 | pid_t pid, struct task_struct *child) | ||
281 | { | ||
282 | if (unlikely(trace)) { | ||
283 | /* | ||
284 | * The child starts up with an immediate SIGSTOP. | ||
285 | */ | ||
286 | sigaddset(&child->pending.signal, SIGSTOP); | ||
287 | set_tsk_thread_flag(child, TIF_SIGPENDING); | ||
288 | } | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * tracehook_report_clone_complete - new child is running | ||
293 | * @trace: return value from tracehook_clone_prepare() | ||
294 | * @regs: parent's user register state | ||
295 | * @clone_flags: flags from parent's system call | ||
296 | * @pid: new child's PID in the parent's namespace | ||
297 | * @child: child task, already running | ||
298 | * | ||
299 | * This is called just after the child has started running. This is | ||
300 | * just before the clone/fork syscall returns, or blocks for vfork | ||
301 | * child completion if @clone_flags has the %CLONE_VFORK bit set. | ||
302 | * The @child pointer may be invalid if a self-reaping child died and | ||
303 | * tracehook_report_clone() took no action to prevent it from self-reaping. | ||
304 | * | ||
305 | * Called with no locks held. | ||
306 | */ | ||
307 | static inline void tracehook_report_clone_complete(int trace, | ||
308 | struct pt_regs *regs, | ||
309 | unsigned long clone_flags, | ||
310 | pid_t pid, | ||
311 | struct task_struct *child) | ||
312 | { | ||
313 | if (unlikely(trace)) | ||
314 | ptrace_event(0, trace, pid); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * tracehook_report_vfork_done - vfork parent's child has exited or exec'd | ||
319 | * @child: child task, already running | ||
320 | * @pid: new child's PID in the parent's namespace | ||
321 | * | ||
322 | * Called after a %CLONE_VFORK parent has waited for the child to complete. | ||
323 | * The clone/vfork system call will return immediately after this. | ||
324 | * The @child pointer may be invalid if a self-reaping child died and | ||
325 | * tracehook_report_clone() took no action to prevent it from self-reaping. | ||
326 | * | ||
327 | * Called with no locks held. | ||
328 | */ | ||
329 | static inline void tracehook_report_vfork_done(struct task_struct *child, | ||
330 | pid_t pid) | ||
331 | { | ||
332 | ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid); | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * tracehook_prepare_release_task - task is being reaped, clean up tracing | ||
337 | * @task: task in %EXIT_DEAD state | ||
338 | * | ||
339 | * This is called in release_task() just before @task gets finally reaped | ||
340 | * and freed. This would be the ideal place to remove and clean up any | ||
341 | * tracing-related state for @task. | ||
342 | * | ||
343 | * Called with no locks held. | ||
344 | */ | ||
345 | static inline void tracehook_prepare_release_task(struct task_struct *task) | ||
346 | { | ||
347 | } | ||
348 | |||
349 | /** | ||
350 | * tracehook_finish_release_task - task is being reaped, clean up tracing | ||
351 | * @task: task in %EXIT_DEAD state | ||
352 | * | ||
353 | * This is called in release_task() when @task is being in the middle of | ||
354 | * being reaped. After this, there must be no tracing entanglements. | ||
355 | * | ||
356 | * Called with write_lock_irq(&tasklist_lock) held. | ||
357 | */ | ||
358 | static inline void tracehook_finish_release_task(struct task_struct *task) | ||
359 | { | ||
360 | ptrace_release_task(task); | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * tracehook_signal_handler - signal handler setup is complete | ||
365 | * @sig: number of signal being delivered | ||
366 | * @info: siginfo_t of signal being delivered | ||
367 | * @ka: sigaction setting that chose the handler | ||
368 | * @regs: user register state | ||
369 | * @stepping: nonzero if debugger single-step or block-step in use | ||
370 | * | ||
371 | * Called by the arch code after a signal handler has been set up. | ||
372 | * Register and stack state reflects the user handler about to run. | ||
373 | * Signal mask changes have already been made. | ||
374 | * | ||
375 | * Called without locks, shortly before returning to user mode | ||
376 | * (or handling more signals). | ||
377 | */ | ||
378 | static inline void tracehook_signal_handler(int sig, siginfo_t *info, | ||
379 | const struct k_sigaction *ka, | ||
380 | struct pt_regs *regs, int stepping) | ||
381 | { | ||
382 | if (stepping) | ||
383 | ptrace_notify(SIGTRAP); | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal | ||
388 | * @task: task receiving the signal | ||
389 | * @sig: signal number being sent | ||
390 | * @handler: %SIG_IGN or %SIG_DFL | ||
391 | * | ||
392 | * Return zero iff tracing doesn't care to examine this ignored signal, | ||
393 | * so it can short-circuit normal delivery and never even get queued. | ||
394 | * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN. | ||
395 | * | ||
396 | * Called with @task->sighand->siglock held. | ||
397 | */ | ||
398 | static inline int tracehook_consider_ignored_signal(struct task_struct *task, | ||
399 | int sig, | ||
400 | void __user *handler) | ||
401 | { | ||
402 | return (task_ptrace(task) & PT_PTRACED) != 0; | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * tracehook_consider_fatal_signal - suppress special handling of fatal signal | ||
407 | * @task: task receiving the signal | ||
408 | * @sig: signal number being sent | ||
409 | * @handler: %SIG_DFL or %SIG_IGN | ||
410 | * | ||
411 | * Return nonzero to prevent special handling of this termination signal. | ||
412 | * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, | ||
413 | * in which case force_sig() is about to reset it to %SIG_DFL. | ||
414 | * When this returns zero, this signal might cause a quick termination | ||
415 | * that does not give the debugger a chance to intercept the signal. | ||
416 | * | ||
417 | * Called with or without @task->sighand->siglock held. | ||
418 | */ | ||
419 | static inline int tracehook_consider_fatal_signal(struct task_struct *task, | ||
420 | int sig, | ||
421 | void __user *handler) | ||
422 | { | ||
423 | return (task_ptrace(task) & PT_PTRACED) != 0; | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * tracehook_force_sigpending - let tracing force signal_pending(current) on | ||
428 | * | ||
429 | * Called when recomputing our signal_pending() flag. Return nonzero | ||
430 | * to force the signal_pending() flag on, so that tracehook_get_signal() | ||
431 | * will be called before the next return to user mode. | ||
432 | * | ||
433 | * Called with @current->sighand->siglock held. | ||
434 | */ | ||
435 | static inline int tracehook_force_sigpending(void) | ||
436 | { | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * tracehook_get_signal - deliver synthetic signal to traced task | ||
442 | * @task: @current | ||
443 | * @regs: task_pt_regs(@current) | ||
444 | * @info: details of synthetic signal | ||
445 | * @return_ka: sigaction for synthetic signal | ||
446 | * | ||
447 | * Return zero to check for a real pending signal normally. | ||
448 | * Return -1 after releasing the siglock to repeat the check. | ||
449 | * Return a signal number to induce an artifical signal delivery, | ||
450 | * setting *@info and *@return_ka to specify its details and behavior. | ||
451 | * | ||
452 | * The @return_ka->sa_handler value controls the disposition of the | ||
453 | * signal, no matter the signal number. For %SIG_DFL, the return value | ||
454 | * is a representative signal to indicate the behavior (e.g. %SIGTERM | ||
455 | * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop, | ||
456 | * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number | ||
457 | * reported will be @info->si_signo instead. | ||
458 | * | ||
459 | * Called with @task->sighand->siglock held, before dequeuing pending signals. | ||
460 | */ | ||
461 | static inline int tracehook_get_signal(struct task_struct *task, | ||
462 | struct pt_regs *regs, | ||
463 | siginfo_t *info, | ||
464 | struct k_sigaction *return_ka) | ||
465 | { | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * tracehook_notify_jctl - report about job control stop/continue | ||
471 | * @notify: nonzero if this is the last thread in the group to stop | ||
472 | * @why: %CLD_STOPPED or %CLD_CONTINUED | ||
473 | * | ||
474 | * This is called when we might call do_notify_parent_cldstop(). | ||
475 | * It's called when about to stop for job control; we are already in | ||
476 | * %TASK_STOPPED state, about to call schedule(). It's also called when | ||
477 | * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made. | ||
478 | * | ||
479 | * Return nonzero to generate a %SIGCHLD with @why, which is | ||
480 | * normal if @notify is nonzero. | ||
481 | * | ||
482 | * Called with no locks held. | ||
483 | */ | ||
484 | static inline int tracehook_notify_jctl(int notify, int why) | ||
485 | { | ||
486 | return notify || (current->ptrace & PT_PTRACED); | ||
487 | } | ||
488 | |||
489 | /** | ||
490 | * tracehook_notify_death - task is dead, ready to notify parent | ||
491 | * @task: @current task now exiting | ||
492 | * @death_cookie: value to pass to tracehook_report_death() | ||
493 | * @group_dead: nonzero if this was the last thread in the group to die | ||
494 | * | ||
495 | * Return the signal number to send our parent with do_notify_parent(), or | ||
496 | * zero to send no signal and leave a zombie, or -1 to self-reap right now. | ||
497 | * | ||
498 | * Called with write_lock_irq(&tasklist_lock) held. | ||
499 | */ | ||
500 | static inline int tracehook_notify_death(struct task_struct *task, | ||
501 | void **death_cookie, int group_dead) | ||
502 | { | ||
503 | if (task->exit_signal == -1) | ||
504 | return task->ptrace ? SIGCHLD : -1; | ||
505 | |||
506 | /* | ||
507 | * If something other than our normal parent is ptracing us, then | ||
508 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal | ||
509 | * only has special meaning to our real parent. | ||
510 | */ | ||
511 | if (thread_group_empty(task) && !ptrace_reparented(task)) | ||
512 | return task->exit_signal; | ||
513 | |||
514 | return task->ptrace ? SIGCHLD : 0; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * tracehook_report_death - task is dead and ready to be reaped | ||
519 | * @task: @current task now exiting | ||
520 | * @signal: signal number sent to parent, or 0 or -1 | ||
521 | * @death_cookie: value passed back from tracehook_notify_death() | ||
522 | * @group_dead: nonzero if this was the last thread in the group to die | ||
523 | * | ||
524 | * Thread has just become a zombie or is about to self-reap. If positive, | ||
525 | * @signal is the signal number just sent to the parent (usually %SIGCHLD). | ||
526 | * If @signal is -1, this thread will self-reap. If @signal is 0, this is | ||
527 | * a delayed_group_leader() zombie. The @death_cookie was passed back by | ||
528 | * tracehook_notify_death(). | ||
529 | * | ||
530 | * If normal reaping is not inhibited, @task->exit_state might be changing | ||
531 | * in parallel. | ||
532 | * | ||
533 | * Called without locks. | ||
534 | */ | ||
535 | static inline void tracehook_report_death(struct task_struct *task, | ||
536 | int signal, void *death_cookie, | ||
537 | int group_dead) | ||
538 | { | ||
539 | } | ||
540 | |||
541 | #ifdef TIF_NOTIFY_RESUME | ||
542 | /** | ||
543 | * set_notify_resume - cause tracehook_notify_resume() to be called | ||
544 | * @task: task that will call tracehook_notify_resume() | ||
545 | * | ||
546 | * Calling this arranges that @task will call tracehook_notify_resume() | ||
547 | * before returning to user mode. If it's already running in user mode, | ||
548 | * it will enter the kernel and call tracehook_notify_resume() soon. | ||
549 | * If it's blocked, it will not be woken. | ||
550 | */ | ||
551 | static inline void set_notify_resume(struct task_struct *task) | ||
552 | { | ||
553 | if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) | ||
554 | kick_process(task); | ||
555 | } | ||
556 | |||
557 | /** | ||
558 | * tracehook_notify_resume - report when about to return to user mode | ||
559 | * @regs: user-mode registers of @current task | ||
560 | * | ||
561 | * This is called when %TIF_NOTIFY_RESUME has been set. Now we are | ||
562 | * about to return to user mode, and the user state in @regs can be | ||
563 | * inspected or adjusted. The caller in arch code has cleared | ||
564 | * %TIF_NOTIFY_RESUME before the call. If the flag gets set again | ||
565 | * asynchronously, this will be called again before we return to | ||
566 | * user mode. | ||
567 | * | ||
568 | * Called without locks. | ||
569 | */ | ||
570 | static inline void tracehook_notify_resume(struct pt_regs *regs) | ||
571 | { | ||
572 | } | ||
573 | #endif /* TIF_NOTIFY_RESUME */ | ||
574 | |||
575 | #endif /* <linux/tracehook.h> */ | ||
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 90b529f7a154..936e333e7ce5 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1590,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) | |||
1590 | { | 1590 | { |
1591 | if (dev->dma_ops) | 1591 | if (dev->dma_ops) |
1592 | return dev->dma_ops->mapping_error(dev, dma_addr); | 1592 | return dev->dma_ops->mapping_error(dev, dma_addr); |
1593 | return dma_mapping_error(dma_addr); | 1593 | return dma_mapping_error(dev->dma_device, dma_addr); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | /** | 1596 | /** |
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h index 1ccf462b433a..613173b5db69 100644 --- a/include/video/atmel_lcdc.h +++ b/include/video/atmel_lcdc.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #ifndef __ATMEL_LCDC_H__ | 22 | #ifndef __ATMEL_LCDC_H__ |
23 | #define __ATMEL_LCDC_H__ | 23 | #define __ATMEL_LCDC_H__ |
24 | 24 | ||
25 | #include <linux/workqueue.h> | ||
25 | 26 | ||
26 | /* Way LCD wires are connected to the chip: | 27 | /* Way LCD wires are connected to the chip: |
27 | * Some Atmel chips use BGR color mode (instead of standard RGB) | 28 | * Some Atmel chips use BGR color mode (instead of standard RGB) |