diff options
300 files changed, 10056 insertions, 5896 deletions
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index 7e81e37c0b1e..b245d524d568 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt | |||
@@ -23,7 +23,8 @@ interface. | |||
23 | Using sysfs | 23 | Using sysfs |
24 | ~~~~~~~~~~~ | 24 | ~~~~~~~~~~~ |
25 | 25 | ||
26 | sysfs is always compiled in. You can access it by doing: | 26 | sysfs is always compiled in if CONFIG_SYSFS is defined. You can access |
27 | it by doing: | ||
27 | 28 | ||
28 | mount -t sysfs sysfs /sys | 29 | mount -t sysfs sysfs /sys |
29 | 30 | ||
diff --git a/Documentation/sound/alsa/Procfile.txt b/Documentation/sound/alsa/Procfile.txt index 381908d8ca42..719a819f8cc2 100644 --- a/Documentation/sound/alsa/Procfile.txt +++ b/Documentation/sound/alsa/Procfile.txt | |||
@@ -101,6 +101,8 @@ card*/pcm*/xrun_debug | |||
101 | bit 0 = Enable XRUN/jiffies debug messages | 101 | bit 0 = Enable XRUN/jiffies debug messages |
102 | bit 1 = Show stack trace at XRUN / jiffies check | 102 | bit 1 = Show stack trace at XRUN / jiffies check |
103 | bit 2 = Enable additional jiffies check | 103 | bit 2 = Enable additional jiffies check |
104 | bit 3 = Log hwptr update at each period interrupt | ||
105 | bit 4 = Log hwptr update at each snd_pcm_update_hw_ptr() | ||
104 | 106 | ||
105 | When the bit 0 is set, the driver will show the messages to | 107 | When the bit 0 is set, the driver will show the messages to |
106 | kernel log when an xrun is detected. The debug message is | 108 | kernel log when an xrun is detected. The debug message is |
@@ -117,6 +119,9 @@ card*/pcm*/xrun_debug | |||
117 | buggy) hardware that doesn't give smooth pointer updates. | 119 | buggy) hardware that doesn't give smooth pointer updates. |
118 | This feature is enabled via the bit 2. | 120 | This feature is enabled via the bit 2. |
119 | 121 | ||
122 | Bits 3 and 4 are for logging the hwptr records. Note that | ||
123 | these will give flood of kernel messages. | ||
124 | |||
120 | card*/pcm*/sub*/info | 125 | card*/pcm*/sub*/info |
121 | The general information of this PCM sub-stream. | 126 | The general information of this PCM sub-stream. |
122 | 127 | ||
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx index 014d255231fc..68c236c01846 100644 --- a/Documentation/video4linux/CARDLIST.em28xx +++ b/Documentation/video4linux/CARDLIST.em28xx | |||
@@ -20,7 +20,7 @@ | |||
20 | 19 -> EM2860/SAA711X Reference Design (em2860) | 20 | 19 -> EM2860/SAA711X Reference Design (em2860) |
21 | 20 -> AMD ATI TV Wonder HD 600 (em2880) [0438:b002] | 21 | 20 -> AMD ATI TV Wonder HD 600 (em2880) [0438:b002] |
22 | 21 -> eMPIA Technology, Inc. GrabBeeX+ Video Encoder (em2800) [eb1a:2801] | 22 | 21 -> eMPIA Technology, Inc. GrabBeeX+ Video Encoder (em2800) [eb1a:2801] |
23 | 22 -> Unknown EM2750/EM2751 webcam grabber (em2750) [eb1a:2750,eb1a:2751] | 23 | 22 -> EM2710/EM2750/EM2751 webcam grabber (em2750) [eb1a:2750,eb1a:2751] |
24 | 23 -> Huaqi DLCW-130 (em2750) | 24 | 23 -> Huaqi DLCW-130 (em2750) |
25 | 24 -> D-Link DUB-T210 TV Tuner (em2820/em2840) [2001:f112] | 25 | 24 -> D-Link DUB-T210 TV Tuner (em2820/em2840) [2001:f112] |
26 | 25 -> Gadmei UTV310 (em2820/em2840) | 26 | 25 -> Gadmei UTV310 (em2820/em2840) |
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt index 2bcf78896e22..573f95b58807 100644 --- a/Documentation/video4linux/gspca.txt +++ b/Documentation/video4linux/gspca.txt | |||
@@ -44,7 +44,9 @@ zc3xx 0458:7007 Genius VideoCam V2 | |||
44 | zc3xx 0458:700c Genius VideoCam V3 | 44 | zc3xx 0458:700c Genius VideoCam V3 |
45 | zc3xx 0458:700f Genius VideoCam Web V2 | 45 | zc3xx 0458:700f Genius VideoCam Web V2 |
46 | sonixj 0458:7025 Genius Eye 311Q | 46 | sonixj 0458:7025 Genius Eye 311Q |
47 | sn9c20x 0458:7029 Genius Look 320s | ||
47 | sonixj 0458:702e Genius Slim 310 NB | 48 | sonixj 0458:702e Genius Slim 310 NB |
49 | sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650) | ||
48 | sonixj 045e:00f5 MicroSoft VX3000 | 50 | sonixj 045e:00f5 MicroSoft VX3000 |
49 | sonixj 045e:00f7 MicroSoft VX1000 | 51 | sonixj 045e:00f7 MicroSoft VX1000 |
50 | ov519 045e:028c Micro$oft xbox cam | 52 | ov519 045e:028c Micro$oft xbox cam |
@@ -282,6 +284,28 @@ sonixj 0c45:613a Microdia Sonix PC Camera | |||
282 | sonixj 0c45:613b Surfer SN-206 | 284 | sonixj 0c45:613b Surfer SN-206 |
283 | sonixj 0c45:613c Sonix Pccam168 | 285 | sonixj 0c45:613c Sonix Pccam168 |
284 | sonixj 0c45:6143 Sonix Pccam168 | 286 | sonixj 0c45:6143 Sonix Pccam168 |
287 | sn9c20x 0c45:6240 PC Camera (SN9C201 + MT9M001) | ||
288 | sn9c20x 0c45:6242 PC Camera (SN9C201 + MT9M111) | ||
289 | sn9c20x 0c45:6248 PC Camera (SN9C201 + OV9655) | ||
290 | sn9c20x 0c45:624e PC Camera (SN9C201 + SOI968) | ||
291 | sn9c20x 0c45:624f PC Camera (SN9C201 + OV9650) | ||
292 | sn9c20x 0c45:6251 PC Camera (SN9C201 + OV9650) | ||
293 | sn9c20x 0c45:6253 PC Camera (SN9C201 + OV9650) | ||
294 | sn9c20x 0c45:6260 PC Camera (SN9C201 + OV7670) | ||
295 | sn9c20x 0c45:6270 PC Camera (SN9C201 + MT9V011/MT9V111/MT9V112) | ||
296 | sn9c20x 0c45:627b PC Camera (SN9C201 + OV7660) | ||
297 | sn9c20x 0c45:627c PC Camera (SN9C201 + HV7131R) | ||
298 | sn9c20x 0c45:627f PC Camera (SN9C201 + OV9650) | ||
299 | sn9c20x 0c45:6280 PC Camera (SN9C202 + MT9M001) | ||
300 | sn9c20x 0c45:6282 PC Camera (SN9C202 + MT9M111) | ||
301 | sn9c20x 0c45:6288 PC Camera (SN9C202 + OV9655) | ||
302 | sn9c20x 0c45:628e PC Camera (SN9C202 + SOI968) | ||
303 | sn9c20x 0c45:628f PC Camera (SN9C202 + OV9650) | ||
304 | sn9c20x 0c45:62a0 PC Camera (SN9C202 + OV7670) | ||
305 | sn9c20x 0c45:62b0 PC Camera (SN9C202 + MT9V011/MT9V111/MT9V112) | ||
306 | sn9c20x 0c45:62b3 PC Camera (SN9C202 + OV9655) | ||
307 | sn9c20x 0c45:62bb PC Camera (SN9C202 + OV7660) | ||
308 | sn9c20x 0c45:62bc PC Camera (SN9C202 + HV7131R) | ||
285 | sunplus 0d64:0303 Sunplus FashionCam DXG | 309 | sunplus 0d64:0303 Sunplus FashionCam DXG |
286 | etoms 102c:6151 Qcam Sangha CIF | 310 | etoms 102c:6151 Qcam Sangha CIF |
287 | etoms 102c:6251 Qcam xxxxxx VGA | 311 | etoms 102c:6251 Qcam xxxxxx VGA |
@@ -290,6 +314,7 @@ spca561 10fd:7e50 FlyCam Usb 100 | |||
290 | zc3xx 10fd:8050 Typhoon Webshot II USB 300k | 314 | zc3xx 10fd:8050 Typhoon Webshot II USB 300k |
291 | ov534 1415:2000 Sony HD Eye for PS3 (SLEH 00201) | 315 | ov534 1415:2000 Sony HD Eye for PS3 (SLEH 00201) |
292 | pac207 145f:013a Trust WB-1300N | 316 | pac207 145f:013a Trust WB-1300N |
317 | sn9c20x 145f:013d Trust WB-3600R | ||
293 | vc032x 15b8:6001 HP 2.0 Megapixel | 318 | vc032x 15b8:6001 HP 2.0 Megapixel |
294 | vc032x 15b8:6002 HP 2.0 Megapixel rz406aa | 319 | vc032x 15b8:6002 HP 2.0 Megapixel rz406aa |
295 | spca501 1776:501c Arowana 300K CMOS Camera | 320 | spca501 1776:501c Arowana 300K CMOS Camera |
@@ -300,4 +325,11 @@ spca500 2899:012c Toptro Industrial | |||
300 | spca508 8086:0110 Intel Easy PC Camera | 325 | spca508 8086:0110 Intel Easy PC Camera |
301 | spca500 8086:0630 Intel Pocket PC Camera | 326 | spca500 8086:0630 Intel Pocket PC Camera |
302 | spca506 99fa:8988 Grandtec V.cap | 327 | spca506 99fa:8988 Grandtec V.cap |
328 | sn9c20x a168:0610 Dino-Lite Digital Microscope (SN9C201 + HV7131R) | ||
329 | sn9c20x a168:0611 Dino-Lite Digital Microscope (SN9C201 + HV7131R) | ||
330 | sn9c20x a168:0613 Dino-Lite Digital Microscope (SN9C201 + HV7131R) | ||
331 | sn9c20x a168:0618 Dino-Lite Digital Microscope (SN9C201 + HV7131R) | ||
332 | sn9c20x a168:0614 Dino-Lite Digital Microscope (SN9C201 + MT9M111) | ||
333 | sn9c20x a168:0615 Dino-Lite Digital Microscope (SN9C201 + MT9M111) | ||
334 | sn9c20x a168:0617 Dino-Lite Digital Microscope (SN9C201 + MT9M111) | ||
303 | spca561 abcd:cdee Petcam | 335 | spca561 abcd:cdee Petcam |
diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h index c13636575fba..42866759f3fa 100644 --- a/arch/alpha/include/asm/tlb.h +++ b/arch/alpha/include/asm/tlb.h | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | #include <asm-generic/tlb.h> | 10 | #include <asm-generic/tlb.h> |
11 | 11 | ||
12 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) | 12 | #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) |
13 | #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) | 13 | #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) |
14 | 14 | ||
15 | #endif | 15 | #endif |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 321c83e43a1e..f41a6f57cd12 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | 104 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) |
105 | #define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) | 105 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) |
106 | #define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) | 106 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
107 | 107 | ||
108 | #define tlb_migrate_finish(mm) do { } while (0) | 108 | #define tlb_migrate_finish(mm) do { } while (0) |
109 | 109 | ||
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-core.h b/arch/arm/mach-s3c2410/include/mach/gpio-core.h index 8fe192081d3a..f8b879a7973c 100644 --- a/arch/arm/mach-s3c2410/include/mach/gpio-core.h +++ b/arch/arm/mach-s3c2410/include/mach/gpio-core.h | |||
@@ -28,7 +28,7 @@ static inline struct s3c_gpio_chip *s3c_gpiolib_getchip(unsigned int pin) | |||
28 | return NULL; | 28 | return NULL; |
29 | 29 | ||
30 | chip = &s3c24xx_gpios[pin/32]; | 30 | chip = &s3c24xx_gpios[pin/32]; |
31 | return (S3C2410_GPIO_OFFSET(pin) > chip->chip.ngpio) ? chip : NULL; | 31 | return (S3C2410_GPIO_OFFSET(pin) < chip->chip.ngpio) ? chip : NULL; |
32 | } | 32 | } |
33 | 33 | ||
34 | #endif /* __ASM_ARCH_GPIO_CORE_H */ | 34 | #endif /* __ASM_ARCH_GPIO_CORE_H */ |
diff --git a/arch/arm/plat-s3c64xx/s3c6400-clock.c b/arch/arm/plat-s3c64xx/s3c6400-clock.c index 1debc1f9f987..febac1950d8e 100644 --- a/arch/arm/plat-s3c64xx/s3c6400-clock.c +++ b/arch/arm/plat-s3c64xx/s3c6400-clock.c | |||
@@ -153,7 +153,7 @@ static unsigned long s3c64xx_clk_arm_round_rate(struct clk *clk, | |||
153 | u32 div; | 153 | u32 div; |
154 | 154 | ||
155 | if (parent < rate) | 155 | if (parent < rate) |
156 | return rate; | 156 | return parent; |
157 | 157 | ||
158 | div = (parent / rate) - 1; | 158 | div = (parent / rate) - 1; |
159 | if (div > armclk_mask) | 159 | if (div > armclk_mask) |
@@ -175,7 +175,7 @@ static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate) | |||
175 | div = clk_get_rate(clk->parent) / rate; | 175 | div = clk_get_rate(clk->parent) / rate; |
176 | 176 | ||
177 | val = __raw_readl(S3C_CLK_DIV0); | 177 | val = __raw_readl(S3C_CLK_DIV0); |
178 | val &= armclk_mask; | 178 | val &= ~armclk_mask; |
179 | val |= (div - 1); | 179 | val |= (div - 1); |
180 | __raw_writel(val, S3C_CLK_DIV0); | 180 | __raw_writel(val, S3C_CLK_DIV0); |
181 | 181 | ||
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h index 640821323943..92ecd8446ef8 100644 --- a/arch/avr32/include/asm/pgalloc.h +++ b/arch/avr32/include/asm/pgalloc.h | |||
@@ -83,7 +83,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
83 | quicklist_free_page(QUICK_PT, NULL, pte); | 83 | quicklist_free_page(QUICK_PT, NULL, pte); |
84 | } | 84 | } |
85 | 85 | ||
86 | #define __pte_free_tlb(tlb,pte) \ | 86 | #define __pte_free_tlb(tlb,pte,addr) \ |
87 | do { \ | 87 | do { \ |
88 | pgtable_page_dtor(pte); \ | 88 | pgtable_page_dtor(pte); \ |
89 | tlb_remove_page((tlb), pte); \ | 89 | tlb_remove_page((tlb), pte); \ |
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h index a1ba761d0573..6da975db112f 100644 --- a/arch/cris/include/asm/pgalloc.h +++ b/arch/cris/include/asm/pgalloc.h | |||
@@ -47,7 +47,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
47 | __free_page(pte); | 47 | __free_page(pte); |
48 | } | 48 | } |
49 | 49 | ||
50 | #define __pte_free_tlb(tlb,pte) \ | 50 | #define __pte_free_tlb(tlb,pte,address) \ |
51 | do { \ | 51 | do { \ |
52 | pgtable_page_dtor(pte); \ | 52 | pgtable_page_dtor(pte); \ |
53 | tlb_remove_page((tlb), pte); \ | 53 | tlb_remove_page((tlb), pte); \ |
diff --git a/arch/frv/include/asm/pgalloc.h b/arch/frv/include/asm/pgalloc.h index 971e6addb009..416d19a632f2 100644 --- a/arch/frv/include/asm/pgalloc.h +++ b/arch/frv/include/asm/pgalloc.h | |||
@@ -49,7 +49,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
49 | __free_page(pte); | 49 | __free_page(pte); |
50 | } | 50 | } |
51 | 51 | ||
52 | #define __pte_free_tlb(tlb,pte) \ | 52 | #define __pte_free_tlb(tlb,pte,address) \ |
53 | do { \ | 53 | do { \ |
54 | pgtable_page_dtor(pte); \ | 54 | pgtable_page_dtor(pte); \ |
55 | tlb_remove_page((tlb),(pte)); \ | 55 | tlb_remove_page((tlb),(pte)); \ |
@@ -62,7 +62,7 @@ do { \ | |||
62 | */ | 62 | */ |
63 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); }) | 63 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); }) |
64 | #define pmd_free(mm, x) do { } while (0) | 64 | #define pmd_free(mm, x) do { } while (0) |
65 | #define __pmd_free_tlb(tlb,x) do { } while (0) | 65 | #define __pmd_free_tlb(tlb,x,a) do { } while (0) |
66 | 66 | ||
67 | #endif /* CONFIG_MMU */ | 67 | #endif /* CONFIG_MMU */ |
68 | 68 | ||
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 33233011b1c1..22c60692b551 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h | |||
@@ -225,7 +225,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) | |||
225 | */ | 225 | */ |
226 | #define pud_alloc_one(mm, address) NULL | 226 | #define pud_alloc_one(mm, address) NULL |
227 | #define pud_free(mm, x) do { } while (0) | 227 | #define pud_free(mm, x) do { } while (0) |
228 | #define __pud_free_tlb(tlb, x) do { } while (0) | 228 | #define __pud_free_tlb(tlb, x, address) do { } while (0) |
229 | 229 | ||
230 | /* | 230 | /* |
231 | * The "pud_xxx()" functions here are trivial for a folded two-level | 231 | * The "pud_xxx()" functions here are trivial for a folded two-level |
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index b9ac1a6fc216..96a8d927db28 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h | |||
@@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) | |||
48 | { | 48 | { |
49 | quicklist_free(0, NULL, pud); | 49 | quicklist_free(0, NULL, pud); |
50 | } | 50 | } |
51 | #define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud) | 51 | #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) |
52 | #endif /* CONFIG_PGTABLE_4 */ | 52 | #endif /* CONFIG_PGTABLE_4 */ |
53 | 53 | ||
54 | static inline void | 54 | static inline void |
@@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
67 | quicklist_free(0, NULL, pmd); | 67 | quicklist_free(0, NULL, pmd); |
68 | } | 68 | } |
69 | 69 | ||
70 | #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) | 70 | #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) |
71 | 71 | ||
72 | static inline void | 72 | static inline void |
73 | pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) | 73 | pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) |
@@ -117,6 +117,6 @@ static inline void check_pgt_cache(void) | |||
117 | quicklist_trim(0, NULL, 25, 16); | 117 | quicklist_trim(0, NULL, 25, 16); |
118 | } | 118 | } |
119 | 119 | ||
120 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) | 120 | #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) |
121 | 121 | ||
122 | #endif /* _ASM_IA64_PGALLOC_H */ | 122 | #endif /* _ASM_IA64_PGALLOC_H */ |
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 20d8a39680c2..85d965cb19a0 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
@@ -236,22 +236,22 @@ do { \ | |||
236 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ | 236 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ |
237 | } while (0) | 237 | } while (0) |
238 | 238 | ||
239 | #define pte_free_tlb(tlb, ptep) \ | 239 | #define pte_free_tlb(tlb, ptep, address) \ |
240 | do { \ | 240 | do { \ |
241 | tlb->need_flush = 1; \ | 241 | tlb->need_flush = 1; \ |
242 | __pte_free_tlb(tlb, ptep); \ | 242 | __pte_free_tlb(tlb, ptep, address); \ |
243 | } while (0) | 243 | } while (0) |
244 | 244 | ||
245 | #define pmd_free_tlb(tlb, ptep) \ | 245 | #define pmd_free_tlb(tlb, ptep, address) \ |
246 | do { \ | 246 | do { \ |
247 | tlb->need_flush = 1; \ | 247 | tlb->need_flush = 1; \ |
248 | __pmd_free_tlb(tlb, ptep); \ | 248 | __pmd_free_tlb(tlb, ptep, address); \ |
249 | } while (0) | 249 | } while (0) |
250 | 250 | ||
251 | #define pud_free_tlb(tlb, pudp) \ | 251 | #define pud_free_tlb(tlb, pudp, address) \ |
252 | do { \ | 252 | do { \ |
253 | tlb->need_flush = 1; \ | 253 | tlb->need_flush = 1; \ |
254 | __pud_free_tlb(tlb, pudp); \ | 254 | __pud_free_tlb(tlb, pudp, address); \ |
255 | } while (0) | 255 | } while (0) |
256 | 256 | ||
257 | #endif /* _ASM_IA64_TLB_H */ | 257 | #endif /* _ASM_IA64_TLB_H */ |
diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h index f11a2b909cdb..0fc736198979 100644 --- a/arch/m32r/include/asm/pgalloc.h +++ b/arch/m32r/include/asm/pgalloc.h | |||
@@ -58,7 +58,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
58 | __free_page(pte); | 58 | __free_page(pte); |
59 | } | 59 | } |
60 | 60 | ||
61 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) | 61 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * allocating and freeing a pmd is trivial: the 1-entry pmd is | 64 | * allocating and freeing a pmd is trivial: the 1-entry pmd is |
@@ -68,7 +68,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
68 | 68 | ||
69 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) | 69 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) |
70 | #define pmd_free(mm, x) do { } while (0) | 70 | #define pmd_free(mm, x) do { } while (0) |
71 | #define __pmd_free_tlb(tlb, x) do { } while (0) | 71 | #define __pmd_free_tlb(tlb, x, addr) do { } while (0) |
72 | #define pgd_populate(mm, pmd, pte) BUG() | 72 | #define pgd_populate(mm, pmd, pte) BUG() |
73 | 73 | ||
74 | #define check_pgt_cache() do { } while (0) | 74 | #define check_pgt_cache() do { } while (0) |
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index d08bf6261df8..15ee4c74a9f0 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h | |||
@@ -54,7 +54,8 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page) | |||
54 | __free_page(page); | 54 | __free_page(page); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page) | 57 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, |
58 | unsigned long address) | ||
58 | { | 59 | { |
59 | pgtable_page_dtor(page); | 60 | pgtable_page_dtor(page); |
60 | cache_page(kmap(page)); | 61 | cache_page(kmap(page)); |
@@ -73,7 +74,8 @@ static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
73 | return free_pointer_table(pmd); | 74 | return free_pointer_table(pmd); |
74 | } | 75 | } |
75 | 76 | ||
76 | static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 77 | static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
78 | unsigned long address) | ||
77 | { | 79 | { |
78 | return free_pointer_table(pmd); | 80 | return free_pointer_table(pmd); |
79 | } | 81 | } |
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index d4c83f143816..48d80d5a666f 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h | |||
@@ -32,7 +32,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page) | |||
32 | __free_page(page); | 32 | __free_page(page); |
33 | } | 33 | } |
34 | 34 | ||
35 | #define __pte_free_tlb(tlb,pte) \ | 35 | #define __pte_free_tlb(tlb,pte,addr) \ |
36 | do { \ | 36 | do { \ |
37 | pgtable_page_dtor(pte); \ | 37 | pgtable_page_dtor(pte); \ |
38 | tlb_remove_page((tlb), pte); \ | 38 | tlb_remove_page((tlb), pte); \ |
@@ -80,7 +80,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page | |||
80 | * inside the pgd, so has no extra memory associated with it. | 80 | * inside the pgd, so has no extra memory associated with it. |
81 | */ | 81 | */ |
82 | #define pmd_free(mm, x) do { } while (0) | 82 | #define pmd_free(mm, x) do { } while (0) |
83 | #define __pmd_free_tlb(tlb, x) do { } while (0) | 83 | #define __pmd_free_tlb(tlb, x, addr) do { } while (0) |
84 | 84 | ||
85 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 85 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
86 | { | 86 | { |
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index d0bcf80a1136..8439598d4655 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile | |||
@@ -6,14 +6,16 @@ endif | |||
6 | 6 | ||
7 | # What CPU vesion are we building for, and crack it open | 7 | # What CPU vesion are we building for, and crack it open |
8 | # as major.minor.rev | 8 | # as major.minor.rev |
9 | CPU_VER=$(subst ",,$(CONFIG_XILINX_MICROBLAZE0_HW_VER) ) | 9 | CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER)) |
10 | CPU_MAJOR=$(shell echo $(CPU_VER) | cut -d '.' -f 1) | 10 | CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1) |
11 | CPU_MINOR=$(shell echo $(CPU_VER) | cut -d '.' -f 2) | 11 | CPU_MINOR := $(shell echo $(CPU_VER) | cut -d '.' -f 2) |
12 | CPU_REV=$(shell echo $(CPU_VER) | cut -d '.' -f 3) | 12 | CPU_REV := $(shell echo $(CPU_VER) | cut -d '.' -f 3) |
13 | 13 | ||
14 | export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV | 14 | export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV |
15 | 15 | ||
16 | # Use cpu-related CONFIG_ vars to set compile options. | 16 | # Use cpu-related CONFIG_ vars to set compile options. |
17 | # The various CONFIG_XILINX cpu features options are integers 0/1/2... | ||
18 | # rather than bools y/n | ||
17 | 19 | ||
18 | # Work out HW multipler support. This is icky. | 20 | # Work out HW multipler support. This is icky. |
19 | # 1. Spartan2 has no HW multiplers. | 21 | # 1. Spartan2 has no HW multiplers. |
@@ -34,30 +36,29 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare | |||
34 | 36 | ||
35 | CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) | 37 | CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) |
36 | 38 | ||
37 | # The various CONFIG_XILINX cpu features options are integers 0/1/2... | ||
38 | # rather than bools y/n | ||
39 | |||
40 | # r31 holds current when in kernel mode | 39 | # r31 holds current when in kernel mode |
41 | CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) | 40 | KBUILD_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) |
42 | 41 | ||
43 | LDFLAGS := | 42 | LDFLAGS := |
44 | LDFLAGS_vmlinux := | 43 | LDFLAGS_vmlinux := |
45 | LDFLAGS_BLOB := --format binary --oformat elf32-microblaze | ||
46 | 44 | ||
47 | LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name) | 45 | LIBGCC := $(shell $(CC) $(KBUILD_KERNEL) -print-libgcc-file-name) |
48 | 46 | ||
49 | head-y := arch/microblaze/kernel/head.o | 47 | head-y := arch/microblaze/kernel/head.o |
50 | libs-y += arch/microblaze/lib/ $(LIBGCC) | 48 | libs-y += arch/microblaze/lib/ |
51 | core-y += arch/microblaze/kernel/ arch/microblaze/mm/ \ | 49 | libs-y += $(LIBGCC) |
52 | arch/microblaze/platform/ | 50 | core-y += arch/microblaze/kernel/ |
51 | core-y += arch/microblaze/mm/ | ||
52 | core-y += arch/microblaze/platform/ | ||
53 | 53 | ||
54 | boot := arch/$(ARCH)/boot | 54 | boot := arch/microblaze/boot |
55 | 55 | ||
56 | # defines filename extension depending memory management type | 56 | # defines filename extension depending memory management type |
57 | ifeq ($(CONFIG_MMU),) | 57 | ifeq ($(CONFIG_MMU),) |
58 | MMUEXT := -nommu | 58 | MMU := -nommu |
59 | endif | 59 | endif |
60 | export MMUEXT | 60 | |
61 | export MMU | ||
61 | 62 | ||
62 | all: linux.bin | 63 | all: linux.bin |
63 | 64 | ||
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 5c173424d074..7c3ec13b44d8 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <asm/byteorder.h> | 14 | #include <asm/byteorder.h> |
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <asm/byteorder.h> | ||
18 | #include <linux/mm.h> /* Get struct page {...} */ | 17 | #include <linux/mm.h> /* Get struct page {...} */ |
19 | 18 | ||
20 | 19 | ||
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 59a757e46ba5..b0131da1387b 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h | |||
@@ -180,7 +180,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) | |||
180 | __free_page(ptepage); | 180 | __free_page(ptepage); |
181 | } | 181 | } |
182 | 182 | ||
183 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) | 183 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) |
184 | 184 | ||
185 | #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) | 185 | #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) |
186 | 186 | ||
@@ -193,7 +193,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) | |||
193 | */ | 193 | */ |
194 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | 194 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) |
195 | /*#define pmd_free(mm, x) do { } while (0)*/ | 195 | /*#define pmd_free(mm, x) do { } while (0)*/ |
196 | #define __pmd_free_tlb(tlb, x) do { } while (0) | 196 | #define __pmd_free_tlb(tlb, x, addr) do { } while (0) |
197 | #define pgd_populate(mm, pmd, pte) BUG() | 197 | #define pgd_populate(mm, pmd, pte) BUG() |
198 | 198 | ||
199 | extern int do_check_pgt_cache(int, int); | 199 | extern int do_check_pgt_cache(int, int); |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 4c57a586a989..cc3a4dfc3eaa 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
@@ -185,6 +185,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |||
185 | 185 | ||
186 | /* Definitions for MicroBlaze. */ | 186 | /* Definitions for MicroBlaze. */ |
187 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ | 187 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ |
188 | #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ | ||
188 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ | 189 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ |
189 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ | 190 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ |
190 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ | 191 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ |
@@ -320,8 +321,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | |||
320 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } | 321 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } |
321 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | 322 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
322 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 323 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
323 | /* FIXME */ | 324 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
324 | static inline int pte_file(pte_t pte) { return 0; } | ||
325 | 325 | ||
326 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | 326 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } |
327 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | 327 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } |
@@ -488,7 +488,7 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) | |||
488 | /* Encode and decode a nonlinear file mapping entry */ | 488 | /* Encode and decode a nonlinear file mapping entry */ |
489 | #define PTE_FILE_MAX_BITS 29 | 489 | #define PTE_FILE_MAX_BITS 29 |
490 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) | 490 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) |
491 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) }) | 491 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) |
492 | 492 | ||
493 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 493 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
494 | 494 | ||
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 20f7b3a926e8..37e6f305a68e 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h | |||
@@ -16,6 +16,18 @@ | |||
16 | #define _ASM_MICROBLAZE_PROM_H | 16 | #define _ASM_MICROBLAZE_PROM_H |
17 | #ifdef __KERNEL__ | 17 | #ifdef __KERNEL__ |
18 | 18 | ||
19 | /* Definitions used by the flattened device tree */ | ||
20 | #define OF_DT_HEADER 0xd00dfeed /* marker */ | ||
21 | #define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ | ||
22 | #define OF_DT_END_NODE 0x2 /* End node */ | ||
23 | #define OF_DT_PROP 0x3 /* Property: name off, size, content */ | ||
24 | #define OF_DT_NOP 0x4 /* nop */ | ||
25 | #define OF_DT_END 0x9 | ||
26 | |||
27 | #define OF_DT_VERSION 0x10 | ||
28 | |||
29 | #ifndef __ASSEMBLY__ | ||
30 | |||
19 | #include <linux/types.h> | 31 | #include <linux/types.h> |
20 | #include <linux/proc_fs.h> | 32 | #include <linux/proc_fs.h> |
21 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
@@ -29,16 +41,6 @@ | |||
29 | #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) | 41 | #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) |
30 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) | 42 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) |
31 | 43 | ||
32 | /* Definitions used by the flattened device tree */ | ||
33 | #define OF_DT_HEADER 0xd00dfeed /* marker */ | ||
34 | #define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ | ||
35 | #define OF_DT_END_NODE 0x2 /* End node */ | ||
36 | #define OF_DT_PROP 0x3 /* Property: name off, size, content */ | ||
37 | #define OF_DT_NOP 0x4 /* nop */ | ||
38 | #define OF_DT_END 0x9 | ||
39 | |||
40 | #define OF_DT_VERSION 0x10 | ||
41 | |||
42 | /* | 44 | /* |
43 | * This is what gets passed to the kernel by prom_init or kexec | 45 | * This is what gets passed to the kernel by prom_init or kexec |
44 | * | 46 | * |
@@ -309,5 +311,6 @@ extern void __iomem *of_iomap(struct device_node *device, int index); | |||
309 | */ | 311 | */ |
310 | #include <linux/of.h> | 312 | #include <linux/of.h> |
311 | 313 | ||
314 | #endif /* __ASSEMBLY__ */ | ||
312 | #endif /* __KERNEL__ */ | 315 | #endif /* __KERNEL__ */ |
313 | #endif /* _ASM_MICROBLAZE_PROM_H */ | 316 | #endif /* _ASM_MICROBLAZE_PROM_H */ |
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h index c472d2801132..e8abd4a0349c 100644 --- a/arch/microblaze/include/asm/tlb.h +++ b/arch/microblaze/include/asm/tlb.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifndef _ASM_MICROBLAZE_TLB_H | 11 | #ifndef _ASM_MICROBLAZE_TLB_H |
12 | #define _ASM_MICROBLAZE_TLB_H | 12 | #define _ASM_MICROBLAZE_TLB_H |
13 | 13 | ||
14 | #define tlb_flush(tlb) do {} while (0) | 14 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) |
15 | 15 | ||
16 | #include <asm-generic/tlb.h> | 16 | #include <asm-generic/tlb.h> |
17 | 17 | ||
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 65adad61e7e9..5431b4631a7a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -189,7 +189,7 @@ extern long strnlen_user(const char *src, long count); | |||
189 | 189 | ||
190 | #define __put_user(x, ptr) \ | 190 | #define __put_user(x, ptr) \ |
191 | ({ \ | 191 | ({ \ |
192 | __typeof__(*(ptr)) __gu_val = x; \ | 192 | __typeof__(*(ptr)) volatile __gu_val = (x); \ |
193 | long __gu_err = 0; \ | 193 | long __gu_err = 0; \ |
194 | switch (sizeof(__gu_val)) { \ | 194 | switch (sizeof(__gu_val)) { \ |
195 | case 1: \ | 195 | case 1: \ |
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index f4a5e19a20eb..d487729683de 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile | |||
@@ -17,4 +17,4 @@ obj-$(CONFIG_HEART_BEAT) += heartbeat.o | |||
17 | obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o | 17 | obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o |
18 | obj-$(CONFIG_MMU) += misc.o | 18 | obj-$(CONFIG_MMU) += misc.o |
19 | 19 | ||
20 | obj-y += entry$(MMUEXT).o | 20 | obj-y += entry$(MMU).o |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c index 153f57c57b6d..c259786e7faa 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | #define CI(c, p) { ci->c = PVR_##p(pvr); } | 23 | #define CI(c, p) { ci->c = PVR_##p(pvr); } |
24 | #define err_printk(x) \ | 24 | #define err_printk(x) \ |
25 | early_printk("ERROR: Microblaze " x " - different for PVR and DTS\n"); | 25 | early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); |
26 | 26 | ||
27 | void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) | 27 | void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) |
28 | { | 28 | { |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c index 450ca6bb828d..adb448f93d5f 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-static.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c | |||
@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY; | |||
18 | static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER; | 18 | static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER; |
19 | 19 | ||
20 | #define err_printk(x) \ | 20 | #define err_printk(x) \ |
21 | early_printk("ERROR: Microblaze " x "- different for kernel and DTS\n"); | 21 | early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n"); |
22 | 22 | ||
23 | void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) | 23 | void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) |
24 | { | 24 | { |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index a10bea119b94..c411c6757deb 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c | |||
@@ -26,6 +26,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { | |||
26 | {"7.10.b", 0x09}, | 26 | {"7.10.b", 0x09}, |
27 | {"7.10.c", 0x0a}, | 27 | {"7.10.c", 0x0a}, |
28 | {"7.10.d", 0x0b}, | 28 | {"7.10.d", 0x0b}, |
29 | {"7.20.a", 0x0c}, | ||
30 | {"7.20.b", 0x0d}, | ||
29 | /* FIXME There is no keycode defined in MBV for these versions */ | 31 | /* FIXME There is no keycode defined in MBV for these versions */ |
30 | {"2.10.a", 0x10}, | 32 | {"2.10.a", 0x10}, |
31 | {"3.00.a", 0x20}, | 33 | {"3.00.a", 0x20}, |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index e568d6ec621b..e41c6ce2a7be 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/linkage.h> | 31 | #include <linux/linkage.h> |
32 | #include <asm/thread_info.h> | 32 | #include <asm/thread_info.h> |
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | #include <asm/prom.h> /* for OF_DT_HEADER */ | ||
34 | 35 | ||
35 | #ifdef CONFIG_MMU | 36 | #ifdef CONFIG_MMU |
36 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ | 37 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ |
@@ -54,11 +55,19 @@ ENTRY(_start) | |||
54 | andi r1, r1, ~2 | 55 | andi r1, r1, ~2 |
55 | mts rmsr, r1 | 56 | mts rmsr, r1 |
56 | 57 | ||
57 | /* save fdt to kernel location */ | 58 | /* r7 may point to an FDT, or there may be one linked in. |
58 | /* r7 stores pointer to fdt blob */ | 59 | if it's in r7, we've got to save it away ASAP. |
59 | beqi r7, no_fdt_arg | 60 | We ensure r7 points to a valid FDT, just in case the bootloader |
61 | is broken or non-existent */ | ||
62 | beqi r7, no_fdt_arg /* NULL pointer? don't copy */ | ||
63 | lw r11, r0, r7 /* Does r7 point to a */ | ||
64 | rsubi r11, r11, OF_DT_HEADER /* valid FDT? */ | ||
65 | beqi r11, _prepare_copy_fdt | ||
66 | or r7, r0, r0 /* clear R7 when not valid DTB */ | ||
67 | bnei r11, no_fdt_arg /* No - get out of here */ | ||
68 | _prepare_copy_fdt: | ||
60 | or r11, r0, r0 /* incremment */ | 69 | or r11, r0, r0 /* incremment */ |
61 | ori r4, r0, TOPHYS(_fdt_start) /* save bram context */ | 70 | ori r4, r0, TOPHYS(_fdt_start) |
62 | ori r3, r0, (0x4000 - 4) | 71 | ori r3, r0, (0x4000 - 4) |
63 | _copy_fdt: | 72 | _copy_fdt: |
64 | lw r12, r7, r11 /* r12 = r7 + r11 */ | 73 | lw r12, r7, r11 /* r12 = r7 + r11 */ |
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 9d591cd74fc2..3288c9737671 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
@@ -74,6 +74,7 @@ | |||
74 | 74 | ||
75 | #include <asm/mmu.h> | 75 | #include <asm/mmu.h> |
76 | #include <asm/pgtable.h> | 76 | #include <asm/pgtable.h> |
77 | #include <asm/signal.h> | ||
77 | #include <asm/asm-offsets.h> | 78 | #include <asm/asm-offsets.h> |
78 | 79 | ||
79 | /* Helpful Macros */ | 80 | /* Helpful Macros */ |
@@ -428,19 +429,9 @@ handle_unaligned_ex: | |||
428 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ | 429 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ |
429 | nop | 430 | nop |
430 | _no_delayslot: | 431 | _no_delayslot: |
431 | #endif | 432 | /* jump to high level unaligned handler */ |
432 | 433 | RESTORE_STATE; | |
433 | #ifdef CONFIG_MMU | 434 | bri unaligned_data_trap |
434 | /* Check if unaligned address is last on a 4k page */ | ||
435 | andi r5, r4, 0xffc | ||
436 | xori r5, r5, 0xffc | ||
437 | bnei r5, _unaligned_ex2 | ||
438 | _unaligned_ex1: | ||
439 | RESTORE_STATE; | ||
440 | /* Another page must be accessed or physical address not in page table */ | ||
441 | bri unaligned_data_trap | ||
442 | |||
443 | _unaligned_ex2: | ||
444 | #endif | 435 | #endif |
445 | andi r6, r3, 0x3E0; /* Mask and extract the register operand */ | 436 | andi r6, r3, 0x3E0; /* Mask and extract the register operand */ |
446 | srl r6, r6; /* r6 >> 5 */ | 437 | srl r6, r6; /* r6 >> 5 */ |
@@ -450,45 +441,6 @@ _no_delayslot: | |||
450 | srl r6, r6; | 441 | srl r6, r6; |
451 | /* Store the register operand in a temporary location */ | 442 | /* Store the register operand in a temporary location */ |
452 | sbi r6, r0, TOPHYS(ex_reg_op); | 443 | sbi r6, r0, TOPHYS(ex_reg_op); |
453 | #ifdef CONFIG_MMU | ||
454 | /* Get physical address */ | ||
455 | /* If we are faulting a kernel address, we have to use the | ||
456 | * kernel page tables. | ||
457 | */ | ||
458 | ori r5, r0, CONFIG_KERNEL_START | ||
459 | cmpu r5, r4, r5 | ||
460 | bgti r5, _unaligned_ex3 | ||
461 | ori r5, r0, swapper_pg_dir | ||
462 | bri _unaligned_ex4 | ||
463 | |||
464 | /* Get the PGD for the current thread. */ | ||
465 | _unaligned_ex3: /* user thread */ | ||
466 | addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */ | ||
467 | lwi r5, r5, TASK_THREAD + PGDIR | ||
468 | _unaligned_ex4: | ||
469 | tophys(r5,r5) | ||
470 | BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */ | ||
471 | andi r6, r6, 0xffc | ||
472 | /* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */ | ||
473 | or r5, r5, r6 | ||
474 | lwi r6, r5, 0 /* Get L1 entry */ | ||
475 | andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */ | ||
476 | beqi r5, _unaligned_ex1 /* Bail if no table */ | ||
477 | |||
478 | tophys(r5,r5) | ||
479 | BSRLI(r6,r4,10) /* Compute PTE address */ | ||
480 | andi r6, r6, 0xffc | ||
481 | andi r5, r5, 0xfffff003 | ||
482 | or r5, r5, r6 | ||
483 | lwi r5, r5, 0 /* Get Linux PTE */ | ||
484 | |||
485 | andi r6, r5, _PAGE_PRESENT | ||
486 | beqi r6, _unaligned_ex1 /* Bail if no page */ | ||
487 | |||
488 | andi r5, r5, 0xfffff000 /* Extract RPN */ | ||
489 | andi r4, r4, 0x00000fff /* Extract offset */ | ||
490 | or r4, r4, r5 /* Create physical address */ | ||
491 | #endif /* CONFIG_MMU */ | ||
492 | 444 | ||
493 | andi r6, r3, 0x400; /* Extract ESR[S] */ | 445 | andi r6, r3, 0x400; /* Extract ESR[S] */ |
494 | bnei r6, ex_sw; | 446 | bnei r6, ex_sw; |
@@ -959,15 +911,15 @@ _unaligned_data_exception: | |||
959 | andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ | 911 | andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ |
960 | ex_lw_vm: | 912 | ex_lw_vm: |
961 | beqid r6, ex_lhw_vm; | 913 | beqid r6, ex_lhw_vm; |
962 | lbui r5, r4, 0; /* Exception address in r4 - delay slot */ | 914 | load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ |
963 | /* Load a word, byte-by-byte from destination address and save it in tmp space*/ | 915 | /* Load a word, byte-by-byte from destination address and save it in tmp space*/ |
964 | la r6, r0, ex_tmp_data_loc_0; | 916 | la r6, r0, ex_tmp_data_loc_0; |
965 | sbi r5, r6, 0; | 917 | sbi r5, r6, 0; |
966 | lbui r5, r4, 1; | 918 | load2: lbui r5, r4, 1; |
967 | sbi r5, r6, 1; | 919 | sbi r5, r6, 1; |
968 | lbui r5, r4, 2; | 920 | load3: lbui r5, r4, 2; |
969 | sbi r5, r6, 2; | 921 | sbi r5, r6, 2; |
970 | lbui r5, r4, 3; | 922 | load4: lbui r5, r4, 3; |
971 | sbi r5, r6, 3; | 923 | sbi r5, r6, 3; |
972 | brid ex_lw_tail_vm; | 924 | brid ex_lw_tail_vm; |
973 | /* Get the destination register value into r3 - delay slot */ | 925 | /* Get the destination register value into r3 - delay slot */ |
@@ -977,7 +929,7 @@ ex_lhw_vm: | |||
977 | * save it in tmp space */ | 929 | * save it in tmp space */ |
978 | la r6, r0, ex_tmp_data_loc_0; | 930 | la r6, r0, ex_tmp_data_loc_0; |
979 | sbi r5, r6, 0; | 931 | sbi r5, r6, 0; |
980 | lbui r5, r4, 1; | 932 | load5: lbui r5, r4, 1; |
981 | sbi r5, r6, 1; | 933 | sbi r5, r6, 1; |
982 | lhui r3, r6, 0; /* Get the destination register value into r3 */ | 934 | lhui r3, r6, 0; /* Get the destination register value into r3 */ |
983 | ex_lw_tail_vm: | 935 | ex_lw_tail_vm: |
@@ -996,22 +948,53 @@ ex_sw_tail_vm: | |||
996 | swi r3, r5, 0; /* Get the word - delay slot */ | 948 | swi r3, r5, 0; /* Get the word - delay slot */ |
997 | /* Store the word, byte-by-byte into destination address */ | 949 | /* Store the word, byte-by-byte into destination address */ |
998 | lbui r3, r5, 0; | 950 | lbui r3, r5, 0; |
999 | sbi r3, r4, 0; | 951 | store1: sbi r3, r4, 0; |
1000 | lbui r3, r5, 1; | 952 | lbui r3, r5, 1; |
1001 | sbi r3, r4, 1; | 953 | store2: sbi r3, r4, 1; |
1002 | lbui r3, r5, 2; | 954 | lbui r3, r5, 2; |
1003 | sbi r3, r4, 2; | 955 | store3: sbi r3, r4, 2; |
1004 | lbui r3, r5, 3; | 956 | lbui r3, r5, 3; |
1005 | brid ret_from_exc; | 957 | brid ret_from_exc; |
1006 | sbi r3, r4, 3; /* Delay slot */ | 958 | store4: sbi r3, r4, 3; /* Delay slot */ |
1007 | ex_shw_vm: | 959 | ex_shw_vm: |
1008 | /* Store the lower half-word, byte-by-byte into destination address */ | 960 | /* Store the lower half-word, byte-by-byte into destination address */ |
1009 | lbui r3, r5, 2; | 961 | lbui r3, r5, 2; |
1010 | sbi r3, r4, 0; | 962 | store5: sbi r3, r4, 0; |
1011 | lbui r3, r5, 3; | 963 | lbui r3, r5, 3; |
1012 | brid ret_from_exc; | 964 | brid ret_from_exc; |
1013 | sbi r3, r4, 1; /* Delay slot */ | 965 | store6: sbi r3, r4, 1; /* Delay slot */ |
1014 | ex_sw_end_vm: /* Exception handling of store word, ends. */ | 966 | ex_sw_end_vm: /* Exception handling of store word, ends. */ |
967 | |||
968 | /* We have to prevent cases that get/put_user macros get unaligned pointer | ||
969 | * to bad page area. We have to find out which origin instruction caused it | ||
970 | * and called fixup for that origin instruction not instruction in unaligned | ||
971 | * handler */ | ||
972 | ex_unaligned_fixup: | ||
973 | ori r5, r7, 0 /* setup pointer to pt_regs */ | ||
974 | lwi r6, r7, PT_PC; /* faulting address is one instruction above */ | ||
975 | addik r6, r6, -4 /* for finding proper fixup */ | ||
976 | swi r6, r7, PT_PC; /* a save back it to PT_PC */ | ||
977 | addik r7, r0, SIGSEGV | ||
978 | /* call bad_page_fault for finding aligned fixup, fixup address is saved | ||
979 | * in PT_PC which is used as return address from exception */ | ||
980 | la r15, r0, ret_from_exc-8 /* setup return address */ | ||
981 | brid bad_page_fault | ||
982 | nop | ||
983 | |||
984 | /* We prevent all load/store because it could failed any attempt to access */ | ||
985 | .section __ex_table,"a"; | ||
986 | .word load1,ex_unaligned_fixup; | ||
987 | .word load2,ex_unaligned_fixup; | ||
988 | .word load3,ex_unaligned_fixup; | ||
989 | .word load4,ex_unaligned_fixup; | ||
990 | .word load5,ex_unaligned_fixup; | ||
991 | .word store1,ex_unaligned_fixup; | ||
992 | .word store2,ex_unaligned_fixup; | ||
993 | .word store3,ex_unaligned_fixup; | ||
994 | .word store4,ex_unaligned_fixup; | ||
995 | .word store5,ex_unaligned_fixup; | ||
996 | .word store6,ex_unaligned_fixup; | ||
997 | .previous; | ||
1015 | .end _unaligned_data_exception | 998 | .end _unaligned_data_exception |
1016 | #endif /* CONFIG_MMU */ | 999 | #endif /* CONFIG_MMU */ |
1017 | 1000 | ||
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c index 51414171326f..5a45b1adfef1 100644 --- a/arch/microblaze/kernel/module.c +++ b/arch/microblaze/kernel/module.c | |||
@@ -57,7 +57,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
57 | Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; | 57 | Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; |
58 | Elf32_Sym *sym; | 58 | Elf32_Sym *sym; |
59 | unsigned long int *location; | 59 | unsigned long int *location; |
60 | unsigned long int locoffs; | ||
61 | unsigned long int value; | 60 | unsigned long int value; |
62 | #if __GNUC__ < 4 | 61 | #if __GNUC__ < 4 |
63 | unsigned long int old_value; | 62 | unsigned long int old_value; |
@@ -113,10 +112,12 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
113 | break; | 112 | break; |
114 | 113 | ||
115 | case R_MICROBLAZE_64_PCREL: | 114 | case R_MICROBLAZE_64_PCREL: |
116 | locoffs = (location[0] & 0xFFFF) << 16 | | 115 | #if __GNUC__ < 4 |
116 | old_value = (location[0] & 0xFFFF) << 16 | | ||
117 | (location[1] & 0xFFFF); | 117 | (location[1] & 0xFFFF); |
118 | value -= (unsigned long int)(location) + 4 + | 118 | value -= old_value; |
119 | locoffs; | 119 | #endif |
120 | value -= (unsigned long int)(location) + 4; | ||
120 | location[0] = (location[0] & 0xFFFF0000) | | 121 | location[0] = (location[0] & 0xFFFF0000) | |
121 | (value >> 16); | 122 | (value >> 16); |
122 | location[1] = (location[1] & 0xFFFF0000) | | 123 | location[1] = (location[1] & 0xFFFF0000) | |
@@ -125,6 +126,14 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
125 | value); | 126 | value); |
126 | break; | 127 | break; |
127 | 128 | ||
129 | case R_MICROBLAZE_32_PCREL_LO: | ||
130 | pr_debug("R_MICROBLAZE_32_PCREL_LO\n"); | ||
131 | break; | ||
132 | |||
133 | case R_MICROBLAZE_64_NONE: | ||
134 | pr_debug("R_MICROBLAZE_NONE\n"); | ||
135 | break; | ||
136 | |||
128 | case R_MICROBLAZE_NONE: | 137 | case R_MICROBLAZE_NONE: |
129 | pr_debug("R_MICROBLAZE_NONE\n"); | 138 | pr_debug("R_MICROBLAZE_NONE\n"); |
130 | break; | 139 | break; |
@@ -133,7 +142,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
133 | printk(KERN_ERR "module %s: " | 142 | printk(KERN_ERR "module %s: " |
134 | "Unknown relocation: %u\n", | 143 | "Unknown relocation: %u\n", |
135 | module->name, | 144 | module->name, |
136 | ELF32_R_TYPE(rela->r_info)); | 145 | ELF32_R_TYPE(rela[i].r_info)); |
137 | return -ENOEXEC; | 146 | return -ENOEXEC; |
138 | } | 147 | } |
139 | } | 148 | } |
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 8709bea09604..2a97bf513b64 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
@@ -138,8 +138,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
138 | setup_early_printk(NULL); | 138 | setup_early_printk(NULL); |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | early_printk("Ramdisk addr 0x%08x, FDT 0x%08x\n", ram, fdt); | 141 | early_printk("Ramdisk addr 0x%08x, ", ram); |
142 | printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); | 142 | if (fdt) |
143 | early_printk("FDT at 0x%08x\n", fdt); | ||
144 | else | ||
145 | early_printk("Compiled-in FDT at 0x%08x\n", | ||
146 | (unsigned int)_fdt_start); | ||
143 | 147 | ||
144 | #ifdef CONFIG_MTD_UCLINUX | 148 | #ifdef CONFIG_MTD_UCLINUX |
145 | early_printk("Found romfs @ 0x%08x (0x%08x)\n", | 149 | early_printk("Found romfs @ 0x%08x (0x%08x)\n", |
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index e000bce09b2b..b96f1682bb24 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c | |||
@@ -33,105 +33,6 @@ | |||
33 | #include <linux/unistd.h> | 33 | #include <linux/unistd.h> |
34 | 34 | ||
35 | #include <asm/syscalls.h> | 35 | #include <asm/syscalls.h> |
36 | /* | ||
37 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
38 | * | ||
39 | * This is really horribly ugly. This will be remove with new toolchain. | ||
40 | */ | ||
41 | asmlinkage long | ||
42 | sys_ipc(uint call, int first, int second, int third, void *ptr, long fifth) | ||
43 | { | ||
44 | int version, ret; | ||
45 | |||
46 | version = call >> 16; /* hack for backward compatibility */ | ||
47 | call &= 0xffff; | ||
48 | |||
49 | ret = -EINVAL; | ||
50 | switch (call) { | ||
51 | case SEMOP: | ||
52 | ret = sys_semop(first, (struct sembuf *)ptr, second); | ||
53 | break; | ||
54 | case SEMGET: | ||
55 | ret = sys_semget(first, second, third); | ||
56 | break; | ||
57 | case SEMCTL: | ||
58 | { | ||
59 | union semun fourth; | ||
60 | |||
61 | if (!ptr) | ||
62 | break; | ||
63 | ret = (access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT) | ||
64 | || (get_user(fourth.__pad, (void **)ptr)) ; | ||
65 | if (ret) | ||
66 | break; | ||
67 | ret = sys_semctl(first, second, third, fourth); | ||
68 | break; | ||
69 | } | ||
70 | case MSGSND: | ||
71 | ret = sys_msgsnd(first, (struct msgbuf *) ptr, second, third); | ||
72 | break; | ||
73 | case MSGRCV: | ||
74 | switch (version) { | ||
75 | case 0: { | ||
76 | struct ipc_kludge tmp; | ||
77 | |||
78 | if (!ptr) | ||
79 | break; | ||
80 | ret = (access_ok(VERIFY_READ, ptr, sizeof(tmp)) | ||
81 | ? 0 : -EFAULT) || copy_from_user(&tmp, | ||
82 | (struct ipc_kludge *) ptr, sizeof(tmp)); | ||
83 | if (ret) | ||
84 | break; | ||
85 | ret = sys_msgrcv(first, tmp.msgp, second, tmp.msgtyp, | ||
86 | third); | ||
87 | break; | ||
88 | } | ||
89 | default: | ||
90 | ret = sys_msgrcv(first, (struct msgbuf *) ptr, | ||
91 | second, fifth, third); | ||
92 | break; | ||
93 | } | ||
94 | break; | ||
95 | case MSGGET: | ||
96 | ret = sys_msgget((key_t) first, second); | ||
97 | break; | ||
98 | case MSGCTL: | ||
99 | ret = sys_msgctl(first, second, (struct msqid_ds *) ptr); | ||
100 | break; | ||
101 | case SHMAT: | ||
102 | switch (version) { | ||
103 | default: { | ||
104 | ulong raddr; | ||
105 | ret = access_ok(VERIFY_WRITE, (ulong *) third, | ||
106 | sizeof(ulong)) ? 0 : -EFAULT; | ||
107 | if (ret) | ||
108 | break; | ||
109 | ret = do_shmat(first, (char *) ptr, second, &raddr); | ||
110 | if (ret) | ||
111 | break; | ||
112 | ret = put_user(raddr, (ulong *) third); | ||
113 | break; | ||
114 | } | ||
115 | case 1: /* iBCS2 emulator entry point */ | ||
116 | if (!segment_eq(get_fs(), get_ds())) | ||
117 | break; | ||
118 | ret = do_shmat(first, (char *) ptr, second, | ||
119 | (ulong *) third); | ||
120 | break; | ||
121 | } | ||
122 | break; | ||
123 | case SHMDT: | ||
124 | ret = sys_shmdt((char *)ptr); | ||
125 | break; | ||
126 | case SHMGET: | ||
127 | ret = sys_shmget(first, second, third); | ||
128 | break; | ||
129 | case SHMCTL: | ||
130 | ret = sys_shmctl(first, second, (struct shmid_ds *) ptr); | ||
131 | break; | ||
132 | } | ||
133 | return ret; | ||
134 | } | ||
135 | 36 | ||
136 | asmlinkage long microblaze_vfork(struct pt_regs *regs) | 37 | asmlinkage long microblaze_vfork(struct pt_regs *regs) |
137 | { | 38 | { |
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 31b32a6c5f4e..216db817beb6 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S | |||
@@ -121,7 +121,7 @@ ENTRY(sys_call_table) | |||
121 | .long sys_wait4 | 121 | .long sys_wait4 |
122 | .long sys_swapoff /* 115 */ | 122 | .long sys_swapoff /* 115 */ |
123 | .long sys_sysinfo | 123 | .long sys_sysinfo |
124 | .long sys_ipc | 124 | .long sys_ni_syscall /* old sys_ipc */ |
125 | .long sys_fsync | 125 | .long sys_fsync |
126 | .long sys_ni_syscall /* sys_sigreturn_wrapper */ | 126 | .long sys_ni_syscall /* sys_sigreturn_wrapper */ |
127 | .long sys_clone /* 120 */ | 127 | .long sys_clone /* 120 */ |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 956607a63f4c..d9d249a66ff2 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -69,7 +69,7 @@ static int store_updates_sp(struct pt_regs *regs) | |||
69 | * It is called from do_page_fault above and from some of the procedures | 69 | * It is called from do_page_fault above and from some of the procedures |
70 | * in traps.c. | 70 | * in traps.c. |
71 | */ | 71 | */ |
72 | static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | 72 | void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) |
73 | { | 73 | { |
74 | const struct exception_table_entry *fixup; | 74 | const struct exception_table_entry *fixup; |
75 | /* MS: no context */ | 75 | /* MS: no context */ |
@@ -122,15 +122,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
122 | } | 122 | } |
123 | #endif /* CONFIG_KGDB */ | 123 | #endif /* CONFIG_KGDB */ |
124 | 124 | ||
125 | if (in_atomic() || mm == NULL) { | 125 | if (in_atomic() || !mm) { |
126 | /* FIXME */ | 126 | if (kernel_mode(regs)) |
127 | if (kernel_mode(regs)) { | 127 | goto bad_area_nosemaphore; |
128 | printk(KERN_EMERG | 128 | |
129 | "Page fault in kernel mode - Oooou!!! pid %d\n", | ||
130 | current->pid); | ||
131 | _exception(SIGSEGV, regs, code, address); | ||
132 | return; | ||
133 | } | ||
134 | /* in_atomic() in user mode is really bad, | 129 | /* in_atomic() in user mode is really bad, |
135 | as is current->mm == NULL. */ | 130 | as is current->mm == NULL. */ |
136 | printk(KERN_EMERG "Page fault in user mode with " | 131 | printk(KERN_EMERG "Page fault in user mode with " |
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 1275831dda29..3738f4b48cbd 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h | |||
@@ -98,23 +98,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
98 | __free_pages(pte, PTE_ORDER); | 98 | __free_pages(pte, PTE_ORDER); |
99 | } | 99 | } |
100 | 100 | ||
101 | #define __pte_free_tlb(tlb,pte) \ | 101 | #define __pte_free_tlb(tlb,pte,address) \ |
102 | do { \ | 102 | do { \ |
103 | pgtable_page_dtor(pte); \ | 103 | pgtable_page_dtor(pte); \ |
104 | tlb_remove_page((tlb), pte); \ | 104 | tlb_remove_page((tlb), pte); \ |
105 | } while (0) | 105 | } while (0) |
106 | 106 | ||
107 | #ifdef CONFIG_32BIT | ||
108 | |||
109 | /* | ||
110 | * allocating and freeing a pmd is trivial: the 1-entry pmd is | ||
111 | * inside the pgd, so has no extra memory associated with it. | ||
112 | */ | ||
113 | #define pmd_free(mm, x) do { } while (0) | ||
114 | #define __pmd_free_tlb(tlb, x) do { } while (0) | ||
115 | |||
116 | #endif | ||
117 | |||
118 | #ifdef CONFIG_64BIT | 107 | #ifdef CONFIG_64BIT |
119 | 108 | ||
120 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 109 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
@@ -132,7 +121,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
132 | free_pages((unsigned long)pmd, PMD_ORDER); | 121 | free_pages((unsigned long)pmd, PMD_ORDER); |
133 | } | 122 | } |
134 | 123 | ||
135 | #define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x) | 124 | #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) |
136 | 125 | ||
137 | #endif | 126 | #endif |
138 | 127 | ||
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h index ec057e1bd4cf..a19f11327cd8 100644 --- a/arch/mn10300/include/asm/pgalloc.h +++ b/arch/mn10300/include/asm/pgalloc.h | |||
@@ -51,6 +51,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | 53 | ||
54 | #define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) | 54 | #define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte)) |
55 | 55 | ||
56 | #endif /* _ASM_PGALLOC_H */ | 56 | #endif /* _ASM_PGALLOC_H */ |
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h index 383b1db310ee..07924903989e 100644 --- a/arch/parisc/include/asm/tlb.h +++ b/arch/parisc/include/asm/tlb.h | |||
@@ -21,7 +21,7 @@ do { if (!(tlb)->fullmm) \ | |||
21 | 21 | ||
22 | #include <asm-generic/tlb.h> | 22 | #include <asm-generic/tlb.h> |
23 | 23 | ||
24 | #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) | 24 | #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) |
25 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) | 25 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) |
26 | 26 | ||
27 | #endif | 27 | #endif |
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 0815eb40acae..c9500d666a1d 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h | |||
@@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | |||
16 | */ | 16 | */ |
17 | /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ | 17 | /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ |
18 | #define pmd_free(mm, x) do { } while (0) | 18 | #define pmd_free(mm, x) do { } while (0) |
19 | #define __pmd_free_tlb(tlb,x) do { } while (0) | 19 | #define __pmd_free_tlb(tlb,x,a) do { } while (0) |
20 | /* #define pgd_populate(mm, pmd, pte) BUG() */ | 20 | /* #define pgd_populate(mm, pmd, pte) BUG() */ |
21 | 21 | ||
22 | #ifndef CONFIG_BOOKE | 22 | #ifndef CONFIG_BOOKE |
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index afda2bdd860f..e6f069c4f713 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf) | |||
118 | kmem_cache_free(pgtable_cache[cachenum], p); | 118 | kmem_cache_free(pgtable_cache[cachenum], p); |
119 | } | 119 | } |
120 | 120 | ||
121 | #define __pmd_free_tlb(tlb, pmd) \ | 121 | #define __pmd_free_tlb(tlb, pmd,addr) \ |
122 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ | 122 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ |
123 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) | 123 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) |
124 | #ifndef CONFIG_PPC_64K_PAGES | 124 | #ifndef CONFIG_PPC_64K_PAGES |
125 | #define __pud_free_tlb(tlb, pud) \ | 125 | #define __pud_free_tlb(tlb, pud, addr) \ |
126 | pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ | 126 | pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ |
127 | PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) | 127 | PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) |
128 | #endif /* CONFIG_PPC_64K_PAGES */ | 128 | #endif /* CONFIG_PPC_64K_PAGES */ |
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index 5d8480265a77..1730e5e298d6 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h | |||
@@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, | |||
38 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); | 38 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); |
39 | 39 | ||
40 | #ifdef CONFIG_SMP | 40 | #ifdef CONFIG_SMP |
41 | #define __pte_free_tlb(tlb,ptepage) \ | 41 | #define __pte_free_tlb(tlb,ptepage,address) \ |
42 | do { \ | 42 | do { \ |
43 | pgtable_page_dtor(ptepage); \ | 43 | pgtable_page_dtor(ptepage); \ |
44 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ | 44 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ |
45 | PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ | 45 | PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ |
46 | } while (0) | 46 | } while (0) |
47 | #else | 47 | #else |
48 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) | 48 | #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | 51 | ||
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9920d6a7cf29..c46ef2ffa3d9 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
305 | 305 | ||
306 | pmd = pmd_offset(pud, start); | 306 | pmd = pmd_offset(pud, start); |
307 | pud_clear(pud); | 307 | pud_clear(pud); |
308 | pmd_free_tlb(tlb, pmd); | 308 | pmd_free_tlb(tlb, pmd, start); |
309 | } | 309 | } |
310 | 310 | ||
311 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | 311 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
348 | 348 | ||
349 | pud = pud_offset(pgd, start); | 349 | pud = pud_offset(pgd, start); |
350 | pgd_clear(pgd); | 350 | pgd_clear(pgd); |
351 | pud_free_tlb(tlb, pud); | 351 | pud_free_tlb(tlb, pud, start); |
352 | } | 352 | } |
353 | 353 | ||
354 | /* | 354 | /* |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 3d8a96d39d9d..81150b053689 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
96 | * pte_free_tlb frees a pte table and clears the CRSTE for the | 96 | * pte_free_tlb frees a pte table and clears the CRSTE for the |
97 | * page table from the tlb. | 97 | * page table from the tlb. |
98 | */ | 98 | */ |
99 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) | 99 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
100 | unsigned long address) | ||
100 | { | 101 | { |
101 | if (!tlb->fullmm) { | 102 | if (!tlb->fullmm) { |
102 | tlb->array[tlb->nr_ptes++] = pte; | 103 | tlb->array[tlb->nr_ptes++] = pte; |
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) | |||
113 | * as the pgd. pmd_free_tlb checks the asce_limit against 2GB | 114 | * as the pgd. pmd_free_tlb checks the asce_limit against 2GB |
114 | * to avoid the double free of the pmd in this case. | 115 | * to avoid the double free of the pmd in this case. |
115 | */ | 116 | */ |
116 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 117 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
118 | unsigned long address) | ||
117 | { | 119 | { |
118 | #ifdef __s390x__ | 120 | #ifdef __s390x__ |
119 | if (tlb->mm->context.asce_limit <= (1UL << 31)) | 121 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | |||
134 | * as the pgd. pud_free_tlb checks the asce_limit against 4TB | 136 | * as the pgd. pud_free_tlb checks the asce_limit against 4TB |
135 | * to avoid the double free of the pud in this case. | 137 | * to avoid the double free of the pud in this case. |
136 | */ | 138 | */ |
137 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) | 139 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
140 | unsigned long address) | ||
138 | { | 141 | { |
139 | #ifdef __s390x__ | 142 | #ifdef __s390x__ |
140 | if (tlb->mm->context.asce_limit <= (1UL << 42)) | 143 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index f9b144049dc9..8d15314381e0 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void) | |||
210 | machine_flags |= MACHINE_FLAG_VM; | 210 | machine_flags |= MACHINE_FLAG_VM; |
211 | } | 211 | } |
212 | 212 | ||
213 | static void early_pgm_check_handler(void) | 213 | static __init void early_pgm_check_handler(void) |
214 | { | 214 | { |
215 | unsigned long addr; | 215 | unsigned long addr; |
216 | const struct exception_table_entry *fixup; | 216 | const struct exception_table_entry *fixup; |
@@ -222,7 +222,7 @@ static void early_pgm_check_handler(void) | |||
222 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; | 222 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; |
223 | } | 223 | } |
224 | 224 | ||
225 | void setup_lowcore_early(void) | 225 | static noinline __init void setup_lowcore_early(void) |
226 | { | 226 | { |
227 | psw_t psw; | 227 | psw_t psw; |
228 | 228 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2270730f5354..be2cae083406 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
687 | #ifndef CONFIG_64BIT | 687 | #ifndef CONFIG_64BIT |
688 | if (MACHINE_HAS_IEEE) | 688 | if (MACHINE_HAS_IEEE) |
689 | lowcore->extended_save_area_addr = (u32) save_area; | 689 | lowcore->extended_save_area_addr = (u32) save_area; |
690 | #else | ||
691 | if (vdso_alloc_per_cpu(smp_processor_id(), lowcore)) | ||
692 | BUG(); | ||
693 | #endif | 690 | #endif |
694 | set_prefix((u32)(unsigned long) lowcore); | 691 | set_prefix((u32)(unsigned long) lowcore); |
695 | local_mcck_enable(); | 692 | local_mcck_enable(); |
696 | local_irq_enable(); | 693 | local_irq_enable(); |
694 | #ifdef CONFIG_64BIT | ||
695 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) | ||
696 | BUG(); | ||
697 | #endif | ||
697 | for_each_possible_cpu(cpu) | 698 | for_each_possible_cpu(cpu) |
698 | if (cpu != smp_processor_id()) | 699 | if (cpu != smp_processor_id()) |
699 | smp_create_idle(cpu); | 700 | smp_create_idle(cpu); |
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 79dbfee831ec..49106c6e6f88 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
@@ -88,10 +88,17 @@ __kernel_clock_gettime: | |||
88 | llilh %r4,0x0100 | 88 | llilh %r4,0x0100 |
89 | sar %a4,%r4 | 89 | sar %a4,%r4 |
90 | lghi %r4,0 | 90 | lghi %r4,0 |
91 | epsw %r5,0 | ||
91 | sacf 512 /* Magic ectg instruction */ | 92 | sacf 512 /* Magic ectg instruction */ |
92 | .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4 | 93 | .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4 |
93 | sacf 0 | 94 | tml %r5,0x4000 |
94 | sar %a4,%r2 | 95 | jo 11f |
96 | tml %r5,0x8000 | ||
97 | jno 10f | ||
98 | sacf 256 | ||
99 | j 11f | ||
100 | 10: sacf 0 | ||
101 | 11: sar %a4,%r2 | ||
95 | algr %r1,%r0 /* r1 = cputime as TOD value */ | 102 | algr %r1,%r0 /* r1 = cputime as TOD value */ |
96 | mghi %r1,1000 /* convert to nanoseconds */ | 103 | mghi %r1,1000 /* convert to nanoseconds */ |
97 | srlg %r1,%r1,12 /* r1 = cputime in nanosec */ | 104 | srlg %r1,%r1,12 /* r1 = cputime in nanosec */ |
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c index e6a4fe9f5f24..bd1f5c6b0b8c 100644 --- a/arch/s390/power/swsusp.c +++ b/arch/s390/power/swsusp.c | |||
@@ -7,24 +7,36 @@ | |||
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <asm/system.h> | ||
10 | 11 | ||
11 | /* | ||
12 | * save CPU registers before creating a hibernation image and before | ||
13 | * restoring the memory state from it | ||
14 | */ | ||
15 | void save_processor_state(void) | 12 | void save_processor_state(void) |
16 | { | 13 | { |
17 | /* implentation contained in the | 14 | /* swsusp_arch_suspend() actually saves all cpu register contents. |
18 | * swsusp_arch_suspend function | 15 | * Machine checks must be disabled since swsusp_arch_suspend() stores |
16 | * register contents to their lowcore save areas. That's the same | ||
17 | * place where register contents on machine checks would be saved. | ||
18 | * To avoid register corruption disable machine checks. | ||
19 | * We must also disable machine checks in the new psw mask for | ||
20 | * program checks, since swsusp_arch_suspend() may generate program | ||
21 | * checks. Disabling machine checks for all other new psw masks is | ||
22 | * just paranoia. | ||
19 | */ | 23 | */ |
24 | local_mcck_disable(); | ||
25 | /* Disable lowcore protection */ | ||
26 | __ctl_clear_bit(0,28); | ||
27 | S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; | ||
28 | S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; | ||
29 | S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; | ||
30 | S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; | ||
20 | } | 31 | } |
21 | 32 | ||
22 | /* | ||
23 | * restore the contents of CPU registers | ||
24 | */ | ||
25 | void restore_processor_state(void) | 33 | void restore_processor_state(void) |
26 | { | 34 | { |
27 | /* implentation contained in the | 35 | S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; |
28 | * swsusp_arch_resume function | 36 | S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; |
29 | */ | 37 | S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; |
38 | S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; | ||
39 | /* Enable lowcore protection */ | ||
40 | __ctl_set_bit(0,28); | ||
41 | local_mcck_enable(); | ||
30 | } | 42 | } |
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S index 76d688da32fa..b26df5c5933e 100644 --- a/arch/s390/power/swsusp_asm64.S +++ b/arch/s390/power/swsusp_asm64.S | |||
@@ -32,19 +32,14 @@ swsusp_arch_suspend: | |||
32 | /* Deactivate DAT */ | 32 | /* Deactivate DAT */ |
33 | stnsm __SF_EMPTY(%r15),0xfb | 33 | stnsm __SF_EMPTY(%r15),0xfb |
34 | 34 | ||
35 | /* Switch off lowcore protection */ | ||
36 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
37 | ni __SF_EMPTY+4(%r15),0xef | ||
38 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
39 | |||
40 | /* Store prefix register on stack */ | 35 | /* Store prefix register on stack */ |
41 | stpx __SF_EMPTY(%r15) | 36 | stpx __SF_EMPTY(%r15) |
42 | 37 | ||
43 | /* Setup base register for lowcore (absolute 0) */ | 38 | /* Save prefix register contents for lowcore */ |
44 | llgf %r1,__SF_EMPTY(%r15) | 39 | llgf %r4,__SF_EMPTY(%r15) |
45 | 40 | ||
46 | /* Get pointer to save area */ | 41 | /* Get pointer to save area */ |
47 | aghi %r1,0x1000 | 42 | lghi %r1,0x1000 |
48 | 43 | ||
49 | /* Store registers */ | 44 | /* Store registers */ |
50 | mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ | 45 | mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ |
@@ -79,17 +74,15 @@ swsusp_arch_suspend: | |||
79 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) | 74 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) |
80 | spx __SF_EMPTY(%r15) | 75 | spx __SF_EMPTY(%r15) |
81 | 76 | ||
82 | /* Setup lowcore */ | 77 | lghi %r2,0 |
83 | brasl %r14,setup_lowcore_early | 78 | lghi %r3,2*PAGE_SIZE |
79 | lghi %r5,2*PAGE_SIZE | ||
80 | 1: mvcle %r2,%r4,0 | ||
81 | jo 1b | ||
84 | 82 | ||
85 | /* Save image */ | 83 | /* Save image */ |
86 | brasl %r14,swsusp_save | 84 | brasl %r14,swsusp_save |
87 | 85 | ||
88 | /* Switch on lowcore protection */ | ||
89 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
90 | oi __SF_EMPTY+4(%r15),0x10 | ||
91 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
92 | |||
93 | /* Restore prefix register and return */ | 86 | /* Restore prefix register and return */ |
94 | lghi %r1,0x1000 | 87 | lghi %r1,0x1000 |
95 | spx 0x318(%r1) | 88 | spx 0x318(%r1) |
@@ -117,11 +110,6 @@ swsusp_arch_resume: | |||
117 | /* Deactivate DAT */ | 110 | /* Deactivate DAT */ |
118 | stnsm __SF_EMPTY(%r15),0xfb | 111 | stnsm __SF_EMPTY(%r15),0xfb |
119 | 112 | ||
120 | /* Switch off lowcore protection */ | ||
121 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
122 | ni __SF_EMPTY+4(%r15),0xef | ||
123 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
124 | |||
125 | /* Set prefix page to zero */ | 113 | /* Set prefix page to zero */ |
126 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) | 114 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) |
127 | spx __SF_EMPTY(%r15) | 115 | spx __SF_EMPTY(%r15) |
@@ -175,7 +163,7 @@ swsusp_arch_resume: | |||
175 | /* Load old stack */ | 163 | /* Load old stack */ |
176 | lg %r15,0x2f8(%r13) | 164 | lg %r15,0x2f8(%r13) |
177 | 165 | ||
178 | /* Pointer to save arae */ | 166 | /* Pointer to save area */ |
179 | lghi %r13,0x1000 | 167 | lghi %r13,0x1000 |
180 | 168 | ||
181 | #ifdef CONFIG_SMP | 169 | #ifdef CONFIG_SMP |
@@ -187,11 +175,6 @@ swsusp_arch_resume: | |||
187 | /* Restore prefix register */ | 175 | /* Restore prefix register */ |
188 | spx 0x318(%r13) | 176 | spx 0x318(%r13) |
189 | 177 | ||
190 | /* Switch on lowcore protection */ | ||
191 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
192 | oi __SF_EMPTY+4(%r15),0x10 | ||
193 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
194 | |||
195 | /* Activate DAT */ | 178 | /* Activate DAT */ |
196 | stosm __SF_EMPTY(%r15),0x04 | 179 | stosm __SF_EMPTY(%r15),0x04 |
197 | 180 | ||
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 84dd2db7104c..63ca37bd9a95 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h | |||
@@ -73,20 +73,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
73 | quicklist_free_page(QUICK_PT, NULL, pte); | 73 | quicklist_free_page(QUICK_PT, NULL, pte); |
74 | } | 74 | } |
75 | 75 | ||
76 | #define __pte_free_tlb(tlb,pte) \ | 76 | #define __pte_free_tlb(tlb,pte,addr) \ |
77 | do { \ | 77 | do { \ |
78 | pgtable_page_dtor(pte); \ | 78 | pgtable_page_dtor(pte); \ |
79 | tlb_remove_page((tlb), (pte)); \ | 79 | tlb_remove_page((tlb), (pte)); \ |
80 | } while (0) | 80 | } while (0) |
81 | 81 | ||
82 | /* | ||
83 | * allocating and freeing a pmd is trivial: the 1-entry pmd is | ||
84 | * inside the pgd, so has no extra memory associated with it. | ||
85 | */ | ||
86 | |||
87 | #define pmd_free(mm, x) do { } while (0) | ||
88 | #define __pmd_free_tlb(tlb,x) do { } while (0) | ||
89 | |||
90 | static inline void check_pgt_cache(void) | 82 | static inline void check_pgt_cache(void) |
91 | { | 83 | { |
92 | quicklist_trim(QUICK_PGD, NULL, 25, 16); | 84 | quicklist_trim(QUICK_PGD, NULL, 25, 16); |
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 9c16f737074a..da8fe7ab8728 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h | |||
@@ -91,9 +91,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | 93 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) |
94 | #define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) | 94 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) |
95 | #define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) | 95 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
96 | #define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp) | 96 | #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) |
97 | 97 | ||
98 | #define tlb_migrate_finish(mm) do { } while (0) | 98 | #define tlb_migrate_finish(mm) do { } while (0) |
99 | 99 | ||
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h index 681582d26969..ca2b34456c4b 100644 --- a/arch/sparc/include/asm/pgalloc_32.h +++ b/arch/sparc/include/asm/pgalloc_32.h | |||
@@ -44,8 +44,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long) | |||
44 | BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) | 44 | BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) |
45 | #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) | 45 | #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) |
46 | 46 | ||
47 | #define pmd_free(mm, pmd) free_pmd_fast(pmd) | 47 | #define pmd_free(mm, pmd) free_pmd_fast(pmd) |
48 | #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) | 48 | #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) |
49 | 49 | ||
50 | BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *) | 50 | BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *) |
51 | #define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE) | 51 | #define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE) |
@@ -62,7 +62,7 @@ BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *) | |||
62 | #define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte) | 62 | #define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte) |
63 | 63 | ||
64 | BTFIXUPDEF_CALL(void, pte_free, pgtable_t ) | 64 | BTFIXUPDEF_CALL(void, pte_free, pgtable_t ) |
65 | #define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) | 65 | #define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) |
66 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) | 66 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) |
67 | 67 | ||
68 | #endif /* _SPARC_PGALLOC_H */ | 68 | #endif /* _SPARC_PGALLOC_H */ |
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index ee38e731bfa6..dca406b9b6fc 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h | |||
@@ -100,9 +100,9 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) | 102 | #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) |
103 | #define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage) | 103 | #define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage) |
104 | #define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp) | 104 | #define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp) |
105 | #define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp) | 105 | #define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr) |
106 | 106 | ||
107 | #define tlb_migrate_finish(mm) do { } while (0) | 107 | #define tlb_migrate_finish(mm) do { } while (0) |
108 | #define tlb_start_vma(tlb, vma) do { } while (0) | 108 | #define tlb_start_vma(tlb, vma) do { } while (0) |
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h index 718984359f8c..32c8ce4e1515 100644 --- a/arch/um/include/asm/pgalloc.h +++ b/arch/um/include/asm/pgalloc.h | |||
@@ -40,7 +40,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
40 | __free_page(pte); | 40 | __free_page(pte); |
41 | } | 41 | } |
42 | 42 | ||
43 | #define __pte_free_tlb(tlb,pte) \ | 43 | #define __pte_free_tlb(tlb,pte, address) \ |
44 | do { \ | 44 | do { \ |
45 | pgtable_page_dtor(pte); \ | 45 | pgtable_page_dtor(pte); \ |
46 | tlb_remove_page((tlb),(pte)); \ | 46 | tlb_remove_page((tlb),(pte)); \ |
@@ -53,7 +53,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
53 | free_page((unsigned long)pmd); | 53 | free_page((unsigned long)pmd); |
54 | } | 54 | } |
55 | 55 | ||
56 | #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) | 56 | #define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x)) |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #define check_pgt_cache() do { } while (0) | 59 | #define check_pgt_cache() do { } while (0) |
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 5240fa1c5e08..660caedac9eb 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h | |||
@@ -116,11 +116,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
116 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | 116 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
117 | } while (0) | 117 | } while (0) |
118 | 118 | ||
119 | #define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep) | 119 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
120 | 120 | ||
121 | #define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp) | 121 | #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) |
122 | 122 | ||
123 | #define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp) | 123 | #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) |
124 | 124 | ||
125 | #define tlb_migrate_finish(mm) do {} while (0) | 125 | #define tlb_migrate_finish(mm) do {} while (0) |
126 | 126 | ||
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index dd14c54ac718..0e8c2a0fd922 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) | |||
46 | __free_page(pte); | 46 | __free_page(pte); |
47 | } | 47 | } |
48 | 48 | ||
49 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); | 49 | extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); |
50 | |||
51 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
52 | unsigned long address) | ||
53 | { | ||
54 | ___pte_free_tlb(tlb, pte); | ||
55 | } | ||
50 | 56 | ||
51 | static inline void pmd_populate_kernel(struct mm_struct *mm, | 57 | static inline void pmd_populate_kernel(struct mm_struct *mm, |
52 | pmd_t *pmd, pte_t *pte) | 58 | pmd_t *pmd, pte_t *pte) |
@@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
78 | free_page((unsigned long)pmd); | 84 | free_page((unsigned long)pmd); |
79 | } | 85 | } |
80 | 86 | ||
81 | extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); | 87 | extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); |
88 | |||
89 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | ||
90 | unsigned long adddress) | ||
91 | { | ||
92 | ___pmd_free_tlb(tlb, pmd); | ||
93 | } | ||
82 | 94 | ||
83 | #ifdef CONFIG_X86_PAE | 95 | #ifdef CONFIG_X86_PAE |
84 | extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); | 96 | extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); |
@@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) | |||
108 | free_page((unsigned long)pud); | 120 | free_page((unsigned long)pud); |
109 | } | 121 | } |
110 | 122 | ||
111 | extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); | 123 | extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); |
124 | |||
125 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | ||
126 | unsigned long address) | ||
127 | { | ||
128 | ___pud_free_tlb(tlb, pud); | ||
129 | } | ||
130 | |||
112 | #endif /* PAGETABLE_LEVELS > 3 */ | 131 | #endif /* PAGETABLE_LEVELS > 3 */ |
113 | #endif /* PAGETABLE_LEVELS > 2 */ | 132 | #endif /* PAGETABLE_LEVELS > 2 */ |
114 | 133 | ||
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 20e6a795e160..d2c6c930b491 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -212,9 +212,9 @@ extern int __get_user_bad(void); | |||
212 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") | 212 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
213 | #else | 213 | #else |
214 | #define __put_user_asm_u64(x, ptr, retval, errret) \ | 214 | #define __put_user_asm_u64(x, ptr, retval, errret) \ |
215 | __put_user_asm(x, ptr, retval, "q", "", "Zr", errret) | 215 | __put_user_asm(x, ptr, retval, "q", "", "er", errret) |
216 | #define __put_user_asm_ex_u64(x, addr) \ | 216 | #define __put_user_asm_ex_u64(x, addr) \ |
217 | __put_user_asm_ex(x, addr, "q", "", "Zr") | 217 | __put_user_asm_ex(x, addr, "q", "", "er") |
218 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) | 218 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) |
219 | #endif | 219 | #endif |
220 | 220 | ||
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 8cc687326eb8..db24b215fc50 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) | |||
88 | ret, "l", "k", "ir", 4); | 88 | ret, "l", "k", "ir", 4); |
89 | return ret; | 89 | return ret; |
90 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, | 90 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, |
91 | ret, "q", "", "ir", 8); | 91 | ret, "q", "", "er", 8); |
92 | return ret; | 92 | return ret; |
93 | case 10: | 93 | case 10: |
94 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, | 94 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
95 | ret, "q", "", "ir", 10); | 95 | ret, "q", "", "er", 10); |
96 | if (unlikely(ret)) | 96 | if (unlikely(ret)) |
97 | return ret; | 97 | return ret; |
98 | asm("":::"memory"); | 98 | asm("":::"memory"); |
@@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) | |||
101 | return ret; | 101 | return ret; |
102 | case 16: | 102 | case 16: |
103 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, | 103 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
104 | ret, "q", "", "ir", 16); | 104 | ret, "q", "", "er", 16); |
105 | if (unlikely(ret)) | 105 | if (unlikely(ret)) |
106 | return ret; | 106 | return ret; |
107 | asm("":::"memory"); | 107 | asm("":::"memory"); |
108 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, | 108 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, |
109 | ret, "q", "", "ir", 8); | 109 | ret, "q", "", "er", 8); |
110 | return ret; | 110 | return ret; |
111 | default: | 111 | default: |
112 | return copy_user_generic((__force void *)dst, src, size); | 112 | return copy_user_generic((__force void *)dst, src, size); |
@@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
157 | ret, "q", "", "=r", 8); | 157 | ret, "q", "", "=r", 8); |
158 | if (likely(!ret)) | 158 | if (likely(!ret)) |
159 | __put_user_asm(tmp, (u64 __user *)dst, | 159 | __put_user_asm(tmp, (u64 __user *)dst, |
160 | ret, "q", "", "ir", 8); | 160 | ret, "q", "", "er", 8); |
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | default: | 163 | default: |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 28e5f5956042..e2485b03f1cf 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
356 | #endif | 356 | #endif |
357 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) | 357 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) |
358 | /* check CPU config space for extended APIC ID */ | 358 | /* check CPU config space for extended APIC ID */ |
359 | if (c->x86 >= 0xf) { | 359 | if (cpu_has_apic && c->x86 >= 0xf) { |
360 | unsigned int val; | 360 | unsigned int val; |
361 | val = read_pci_config(0, 24, 0, 0x68); | 361 | val = read_pci_config(0, 24, 0, 0x68); |
362 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) | 362 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 484c1e5f658e..1cfb623ce11c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1692,17 +1692,15 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, | |||
1692 | const char *buf, size_t siz) | 1692 | const char *buf, size_t siz) |
1693 | { | 1693 | { |
1694 | char *p; | 1694 | char *p; |
1695 | int len; | ||
1696 | 1695 | ||
1697 | strncpy(mce_helper, buf, sizeof(mce_helper)); | 1696 | strncpy(mce_helper, buf, sizeof(mce_helper)); |
1698 | mce_helper[sizeof(mce_helper)-1] = 0; | 1697 | mce_helper[sizeof(mce_helper)-1] = 0; |
1699 | len = strlen(mce_helper); | ||
1700 | p = strchr(mce_helper, '\n'); | 1698 | p = strchr(mce_helper, '\n'); |
1701 | 1699 | ||
1702 | if (*p) | 1700 | if (p) |
1703 | *p = 0; | 1701 | *p = 0; |
1704 | 1702 | ||
1705 | return len; | 1703 | return strlen(mce_helper) + !!p; |
1706 | } | 1704 | } |
1707 | 1705 | ||
1708 | static ssize_t set_ignore_ce(struct sys_device *s, | 1706 | static ssize_t set_ignore_ce(struct sys_device *s, |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 696f0e475c2d..92b7703d3d58 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -187,7 +187,7 @@ static void __init apic_intr_init(void) | |||
187 | #ifdef CONFIG_X86_THERMAL_VECTOR | 187 | #ifdef CONFIG_X86_THERMAL_VECTOR |
188 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 188 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
189 | #endif | 189 | #endif |
190 | #ifdef CONFIG_X86_THRESHOLD | 190 | #ifdef CONFIG_X86_MCE_THRESHOLD |
191 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | 191 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); |
192 | #endif | 192 | #endif |
193 | #if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) | 193 | #if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) |
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 846510b78a09..2a62d843f015 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -347,7 +347,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) | |||
347 | 347 | ||
348 | static struct irqaction mfgptirq = { | 348 | static struct irqaction mfgptirq = { |
349 | .handler = mfgpt_tick, | 349 | .handler = mfgpt_tick, |
350 | .flags = IRQF_DISABLED | IRQF_NOBALANCING, | 350 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, |
351 | .name = "mfgpt-timer" | 351 | .name = "mfgpt-timer" |
352 | }; | 352 | }; |
353 | 353 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index d2d1ce8170f0..508e982dd072 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -249,6 +249,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
249 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), | 249 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), |
250 | }, | 250 | }, |
251 | }, | 251 | }, |
252 | { /* Handle problems with rebooting on CompuLab SBC-FITPC2 */ | ||
253 | .callback = set_bios_reboot, | ||
254 | .ident = "CompuLab SBC-FITPC2", | ||
255 | .matches = { | ||
256 | DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"), | ||
257 | DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), | ||
258 | }, | ||
259 | }, | ||
252 | { } | 260 | { } |
253 | }; | 261 | }; |
254 | 262 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index de2cab132844..63f32d220ef2 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -672,6 +672,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | |||
672 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), | 672 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), |
673 | }, | 673 | }, |
674 | }, | 674 | }, |
675 | { | ||
676 | /* | ||
677 | * AMI BIOS with low memory corruption was found on Intel DG45ID board. | ||
678 | * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | ||
679 | * match only DMI_BOARD_NAME and see if there is more bad products | ||
680 | * with this vendor. | ||
681 | */ | ||
682 | .callback = dmi_low_memory_corruption, | ||
683 | .ident = "AMI BIOS", | ||
684 | .matches = { | ||
685 | DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), | ||
686 | }, | ||
687 | }, | ||
675 | #endif | 688 | #endif |
676 | {} | 689 | {} |
677 | }; | 690 | }; |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 367e87882041..59f31d2dd435 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -112,11 +112,6 @@ SECTIONS | |||
112 | _sdata = .; | 112 | _sdata = .; |
113 | DATA_DATA | 113 | DATA_DATA |
114 | CONSTRUCTORS | 114 | CONSTRUCTORS |
115 | |||
116 | #ifdef CONFIG_X86_64 | ||
117 | /* End of data section */ | ||
118 | _edata = .; | ||
119 | #endif | ||
120 | } :data | 115 | } :data |
121 | 116 | ||
122 | #ifdef CONFIG_X86_32 | 117 | #ifdef CONFIG_X86_32 |
@@ -156,10 +151,8 @@ SECTIONS | |||
156 | .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { | 151 | .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { |
157 | *(.data.read_mostly) | 152 | *(.data.read_mostly) |
158 | 153 | ||
159 | #ifdef CONFIG_X86_32 | ||
160 | /* End of data section */ | 154 | /* End of data section */ |
161 | _edata = .; | 155 | _edata = .; |
162 | #endif | ||
163 | } | 156 | } |
164 | 157 | ||
165 | #ifdef CONFIG_X86_64 | 158 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 58f621e81919..2112ed55e7ea 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -103,6 +103,7 @@ EXPORT_SYMBOL(kmap); | |||
103 | EXPORT_SYMBOL(kunmap); | 103 | EXPORT_SYMBOL(kunmap); |
104 | EXPORT_SYMBOL(kmap_atomic); | 104 | EXPORT_SYMBOL(kmap_atomic); |
105 | EXPORT_SYMBOL(kunmap_atomic); | 105 | EXPORT_SYMBOL(kunmap_atomic); |
106 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
106 | 107 | ||
107 | void __init set_highmem_pages_init(void) | 108 | void __init set_highmem_pages_init(void) |
108 | { | 109 | { |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 8e43bdd45456..af8f9650058c 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -25,7 +25,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
25 | return pte; | 25 | return pte; |
26 | } | 26 | } |
27 | 27 | ||
28 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | 28 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
29 | { | 29 | { |
30 | pgtable_page_dtor(pte); | 30 | pgtable_page_dtor(pte); |
31 | paravirt_release_pte(page_to_pfn(pte)); | 31 | paravirt_release_pte(page_to_pfn(pte)); |
@@ -33,14 +33,14 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | |||
33 | } | 33 | } |
34 | 34 | ||
35 | #if PAGETABLE_LEVELS > 2 | 35 | #if PAGETABLE_LEVELS > 2 |
36 | void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 36 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
37 | { | 37 | { |
38 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); | 38 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
39 | tlb_remove_page(tlb, virt_to_page(pmd)); | 39 | tlb_remove_page(tlb, virt_to_page(pmd)); |
40 | } | 40 | } |
41 | 41 | ||
42 | #if PAGETABLE_LEVELS > 3 | 42 | #if PAGETABLE_LEVELS > 3 |
43 | void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) | 43 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
44 | { | 44 | { |
45 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); | 45 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
46 | tlb_remove_page(tlb, virt_to_page(pud)); | 46 | tlb_remove_page(tlb, virt_to_page(pud)); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 2dfcbf9df2ae..dbb5381f7b3b 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -79,8 +79,10 @@ static __init void bad_srat(void) | |||
79 | acpi_numa = -1; | 79 | acpi_numa = -1; |
80 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 80 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
81 | apicid_to_node[i] = NUMA_NO_NODE; | 81 | apicid_to_node[i] = NUMA_NO_NODE; |
82 | for (i = 0; i < MAX_NUMNODES; i++) | 82 | for (i = 0; i < MAX_NUMNODES; i++) { |
83 | nodes_add[i].start = nodes[i].end = 0; | 83 | nodes[i].start = nodes[i].end = 0; |
84 | nodes_add[i].start = nodes_add[i].end = 0; | ||
85 | } | ||
84 | remove_all_active_ranges(); | 86 | remove_all_active_ranges(); |
85 | } | 87 | } |
86 | 88 | ||
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h index 31c220faca02..0d766f9c1083 100644 --- a/arch/xtensa/include/asm/tlb.h +++ b/arch/xtensa/include/asm/tlb.h | |||
@@ -42,6 +42,6 @@ | |||
42 | 42 | ||
43 | #include <asm-generic/tlb.h> | 43 | #include <asm-generic/tlb.h> |
44 | 44 | ||
45 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) | 45 | #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) |
46 | 46 | ||
47 | #endif /* _XTENSA_TLB_H */ | 47 | #endif /* _XTENSA_TLB_H */ |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 336eb1ed73cc..958c1fa41900 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -515,10 +515,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
515 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ | 515 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ |
516 | { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ | 516 | { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ |
517 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ | 517 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ |
518 | { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */ | ||
519 | { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ | ||
518 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ | 520 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ |
519 | { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ | 521 | { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ |
522 | { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ | ||
520 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ | 523 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ |
521 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ | 524 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ |
525 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ | ||
522 | 526 | ||
523 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 527 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
524 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 528 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d0a14cf2bd74..56b8a3ff1286 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -596,9 +596,12 @@ static const struct ich_laptop ich_laptop[] = { | |||
596 | { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ | 596 | { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ |
597 | { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ | 597 | { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ |
598 | { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ | 598 | { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ |
599 | { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */ | ||
599 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ | 600 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ |
600 | { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ | 601 | { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ |
602 | { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unkown HP */ | ||
601 | { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ | 603 | { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ |
604 | { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ | ||
602 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ | 605 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ |
603 | { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ | 606 | { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ |
604 | { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ | 607 | { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2c6aedaef718..8ac98ff16d7d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1515,6 +1515,7 @@ static int ata_hpa_resize(struct ata_device *dev) | |||
1515 | 1515 | ||
1516 | return rc; | 1516 | return rc; |
1517 | } | 1517 | } |
1518 | dev->n_native_sectors = native_sectors; | ||
1518 | 1519 | ||
1519 | /* nothing to do? */ | 1520 | /* nothing to do? */ |
1520 | if (native_sectors <= sectors || !ata_ignore_hpa) { | 1521 | if (native_sectors <= sectors || !ata_ignore_hpa) { |
@@ -4099,6 +4100,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, | |||
4099 | unsigned int readid_flags) | 4100 | unsigned int readid_flags) |
4100 | { | 4101 | { |
4101 | u64 n_sectors = dev->n_sectors; | 4102 | u64 n_sectors = dev->n_sectors; |
4103 | u64 n_native_sectors = dev->n_native_sectors; | ||
4102 | int rc; | 4104 | int rc; |
4103 | 4105 | ||
4104 | if (!ata_dev_enabled(dev)) | 4106 | if (!ata_dev_enabled(dev)) |
@@ -4128,16 +4130,30 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, | |||
4128 | /* verify n_sectors hasn't changed */ | 4130 | /* verify n_sectors hasn't changed */ |
4129 | if (dev->class == ATA_DEV_ATA && n_sectors && | 4131 | if (dev->class == ATA_DEV_ATA && n_sectors && |
4130 | dev->n_sectors != n_sectors) { | 4132 | dev->n_sectors != n_sectors) { |
4131 | ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " | 4133 | ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch " |
4132 | "%llu != %llu\n", | 4134 | "%llu != %llu\n", |
4133 | (unsigned long long)n_sectors, | 4135 | (unsigned long long)n_sectors, |
4134 | (unsigned long long)dev->n_sectors); | 4136 | (unsigned long long)dev->n_sectors); |
4135 | 4137 | /* | |
4136 | /* restore original n_sectors */ | 4138 | * Something could have caused HPA to be unlocked |
4137 | dev->n_sectors = n_sectors; | 4139 | * involuntarily. If n_native_sectors hasn't changed |
4138 | 4140 | * and the new size matches it, keep the device. | |
4139 | rc = -ENODEV; | 4141 | */ |
4140 | goto fail; | 4142 | if (dev->n_native_sectors == n_native_sectors && |
4143 | dev->n_sectors > n_sectors && | ||
4144 | dev->n_sectors == n_native_sectors) { | ||
4145 | ata_dev_printk(dev, KERN_WARNING, | ||
4146 | "new n_sectors matches native, probably " | ||
4147 | "late HPA unlock, continuing\n"); | ||
4148 | /* keep using the old n_sectors */ | ||
4149 | dev->n_sectors = n_sectors; | ||
4150 | } else { | ||
4151 | /* restore original n_[native]_sectors and fail */ | ||
4152 | dev->n_native_sectors = n_native_sectors; | ||
4153 | dev->n_sectors = n_sectors; | ||
4154 | rc = -ENODEV; | ||
4155 | goto fail; | ||
4156 | } | ||
4141 | } | 4157 | } |
4142 | 4158 | ||
4143 | return 0; | 4159 | return 0; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 1a07c061f644..79711b64054b 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2327,7 +2327,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2327 | struct ata_port *ap = link->ap; | 2327 | struct ata_port *ap = link->ap; |
2328 | struct ata_link *slave = ap->slave_link; | 2328 | struct ata_link *slave = ap->slave_link; |
2329 | struct ata_eh_context *ehc = &link->eh_context; | 2329 | struct ata_eh_context *ehc = &link->eh_context; |
2330 | struct ata_eh_context *sehc = &slave->eh_context; | 2330 | struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; |
2331 | unsigned int *classes = ehc->classes; | 2331 | unsigned int *classes = ehc->classes; |
2332 | unsigned int lflags = link->flags; | 2332 | unsigned int lflags = link->flags; |
2333 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); | 2333 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); |
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 8561a9f195c1..5702affcb325 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c | |||
@@ -26,9 +26,7 @@ | |||
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/ata_platform.h> | 27 | #include <linux/ata_platform.h> |
28 | 28 | ||
29 | #include <mach/at91sam9260_matrix.h> | ||
30 | #include <mach/at91sam9_smc.h> | 29 | #include <mach/at91sam9_smc.h> |
31 | #include <mach/at91sam9260.h> | ||
32 | #include <mach/board.h> | 30 | #include <mach/board.h> |
33 | #include <mach/gpio.h> | 31 | #include <mach/gpio.h> |
34 | 32 | ||
@@ -44,65 +42,62 @@ struct at91_ide_info { | |||
44 | unsigned long mode; | 42 | unsigned long mode; |
45 | unsigned int cs; | 43 | unsigned int cs; |
46 | 44 | ||
45 | struct clk *mck; | ||
46 | |||
47 | void __iomem *ide_addr; | 47 | void __iomem *ide_addr; |
48 | void __iomem *alt_addr; | 48 | void __iomem *alt_addr; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | const struct ata_timing initial_timing = | 51 | static const struct ata_timing initial_timing = |
52 | {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; | 52 | {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; |
53 | 53 | ||
54 | static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz) | 54 | static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) |
55 | { | 55 | { |
56 | unsigned long mul; | 56 | unsigned long mul; |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = | 59 | * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = |
60 | * x * (f / 1_000_000_000) = | 60 | * x * (f / 1_000_000_000) = |
61 | * x * ((f * 65536) / 1_000_000_000) / 65536 = | 61 | * x * ((f * 65536) / 1_000_000_000) / 65536 = |
62 | * x * (((f / 10_000) * 65536) / 100_000) / 65536 = | 62 | * x * (((f / 10_000) * 65536) / 100_000) / 65536 = |
63 | */ | 63 | */ |
64 | 64 | ||
65 | mul = (mck_hz / 10000) << 16; | 65 | mul = (mck_hz / 10000) << 16; |
66 | mul /= 100000; | 66 | mul /= 100000; |
67 | 67 | ||
68 | return (ns * mul + 65536) >> 16; /* rounding */ | 68 | return (ns * mul + 65536) >> 16; /* rounding */ |
69 | } | 69 | } |
70 | 70 | ||
71 | static void set_smc_mode(struct at91_ide_info *info) | 71 | static void set_smc_mode(struct at91_ide_info *info) |
72 | { | 72 | { |
73 | at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); | 73 | at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); |
74 | return; | 74 | return; |
75 | } | 75 | } |
76 | 76 | ||
77 | static void set_smc_timing(struct device *dev, | 77 | static void set_smc_timing(struct device *dev, |
78 | struct at91_ide_info *info, const struct ata_timing *ata) | 78 | struct at91_ide_info *info, const struct ata_timing *ata) |
79 | { | 79 | { |
80 | int read_cycle, write_cycle, active, recover; | 80 | unsigned long read_cycle, write_cycle, active, recover; |
81 | int nrd_setup, nrd_pulse, nrd_recover; | 81 | unsigned long nrd_setup, nrd_pulse, nrd_recover; |
82 | int nwe_setup, nwe_pulse; | 82 | unsigned long nwe_setup, nwe_pulse; |
83 | 83 | ||
84 | int ncs_write_setup, ncs_write_pulse; | 84 | unsigned long ncs_write_setup, ncs_write_pulse; |
85 | int ncs_read_setup, ncs_read_pulse; | 85 | unsigned long ncs_read_setup, ncs_read_pulse; |
86 | 86 | ||
87 | unsigned int mck_hz; | 87 | unsigned long mck_hz; |
88 | struct clk *mck; | ||
89 | 88 | ||
90 | read_cycle = ata->cyc8b; | 89 | read_cycle = ata->cyc8b; |
91 | nrd_setup = ata->setup; | 90 | nrd_setup = ata->setup; |
92 | nrd_pulse = ata->act8b; | 91 | nrd_pulse = ata->act8b; |
93 | nrd_recover = ata->rec8b; | 92 | nrd_recover = ata->rec8b; |
94 | 93 | ||
95 | mck = clk_get(NULL, "mck"); | 94 | mck_hz = clk_get_rate(info->mck); |
96 | BUG_ON(IS_ERR(mck)); | ||
97 | mck_hz = clk_get_rate(mck); | ||
98 | 95 | ||
99 | read_cycle = calc_mck_cycles(read_cycle, mck_hz); | 96 | read_cycle = calc_mck_cycles(read_cycle, mck_hz); |
100 | nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); | 97 | nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); |
101 | nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); | 98 | nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); |
102 | nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); | 99 | nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); |
103 | 100 | ||
104 | clk_put(mck); | ||
105 | |||
106 | active = nrd_setup + nrd_pulse; | 101 | active = nrd_setup + nrd_pulse; |
107 | recover = read_cycle - active; | 102 | recover = read_cycle - active; |
108 | 103 | ||
@@ -121,13 +116,13 @@ static void set_smc_timing(struct device *dev, | |||
121 | ncs_write_setup = ncs_read_setup; | 116 | ncs_write_setup = ncs_read_setup; |
122 | ncs_write_pulse = ncs_read_pulse; | 117 | ncs_write_pulse = ncs_read_pulse; |
123 | 118 | ||
124 | dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n", | 119 | dev_dbg(dev, "ATA timings: nrd_setup = %lu nrd_pulse = %lu nrd_cycle = %lu\n", |
125 | nrd_setup, nrd_pulse, read_cycle); | 120 | nrd_setup, nrd_pulse, read_cycle); |
126 | dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n", | 121 | dev_dbg(dev, "ATA timings: nwe_setup = %lu nwe_pulse = %lu nwe_cycle = %lu\n", |
127 | nwe_setup, nwe_pulse, write_cycle); | 122 | nwe_setup, nwe_pulse, write_cycle); |
128 | dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n", | 123 | dev_dbg(dev, "ATA timings: ncs_read_setup = %lu ncs_read_pulse = %lu\n", |
129 | ncs_read_setup, ncs_read_pulse); | 124 | ncs_read_setup, ncs_read_pulse); |
130 | dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n", | 125 | dev_dbg(dev, "ATA timings: ncs_write_setup = %lu ncs_write_pulse = %lu\n", |
131 | ncs_write_setup, ncs_write_pulse); | 126 | ncs_write_setup, ncs_write_pulse); |
132 | 127 | ||
133 | at91_sys_write(AT91_SMC_SETUP(info->cs), | 128 | at91_sys_write(AT91_SMC_SETUP(info->cs), |
@@ -217,6 +212,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
217 | struct resource *mem_res; | 212 | struct resource *mem_res; |
218 | struct ata_host *host; | 213 | struct ata_host *host; |
219 | struct ata_port *ap; | 214 | struct ata_port *ap; |
215 | |||
220 | int irq_flags = 0; | 216 | int irq_flags = 0; |
221 | int irq = 0; | 217 | int irq = 0; |
222 | int ret; | 218 | int ret; |
@@ -261,6 +257,13 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
261 | return -ENOMEM; | 257 | return -ENOMEM; |
262 | } | 258 | } |
263 | 259 | ||
260 | info->mck = clk_get(NULL, "mck"); | ||
261 | |||
262 | if (IS_ERR(info->mck)) { | ||
263 | dev_err(dev, "failed to get access to mck clock\n"); | ||
264 | return -ENODEV; | ||
265 | } | ||
266 | |||
264 | info->cs = board->chipselect; | 267 | info->cs = board->chipselect; |
265 | info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | | 268 | info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | |
266 | AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | | 269 | AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | |
@@ -304,6 +307,7 @@ err_alt_ioremap: | |||
304 | devm_iounmap(dev, info->ide_addr); | 307 | devm_iounmap(dev, info->ide_addr); |
305 | 308 | ||
306 | err_ide_ioremap: | 309 | err_ide_ioremap: |
310 | clk_put(info->mck); | ||
307 | kfree(info); | 311 | kfree(info); |
308 | 312 | ||
309 | return ret; | 313 | return ret; |
@@ -326,6 +330,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev) | |||
326 | 330 | ||
327 | devm_iounmap(dev, info->ide_addr); | 331 | devm_iounmap(dev, info->ide_addr); |
328 | devm_iounmap(dev, info->alt_addr); | 332 | devm_iounmap(dev, info->alt_addr); |
333 | clk_put(info->mck); | ||
329 | 334 | ||
330 | kfree(info); | 335 | kfree(info); |
331 | return 0; | 336 | return 0; |
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 8d9343accf3c..abdd19fe990a 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c | |||
@@ -653,7 +653,8 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) | |||
653 | 653 | ||
654 | ap = host->ports[i]; | 654 | ap = host->ports[i]; |
655 | ocd = ap->dev->platform_data; | 655 | ocd = ap->dev->platform_data; |
656 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) | 656 | |
657 | if (ap->flags & ATA_FLAG_DISABLED) | ||
657 | continue; | 658 | continue; |
658 | 659 | ||
659 | ocd = ap->dev->platform_data; | 660 | ocd = ap->dev->platform_data; |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index f4d009ed50ac..dc99e26f8e5b 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -411,6 +411,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
411 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), | 411 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), |
412 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), | 412 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), |
413 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), | 413 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), |
414 | PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), | ||
414 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), | 415 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), |
415 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), | 416 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), |
416 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), | 417 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 23714aefb825..c19417e02208 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -2514,7 +2514,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) | |||
2514 | char *when = "idle"; | 2514 | char *when = "idle"; |
2515 | 2515 | ||
2516 | ata_ehi_clear_desc(ehi); | 2516 | ata_ehi_clear_desc(ehi); |
2517 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { | 2517 | if (ap->flags & ATA_FLAG_DISABLED) { |
2518 | when = "disabled"; | 2518 | when = "disabled"; |
2519 | } else if (edma_was_enabled) { | 2519 | } else if (edma_was_enabled) { |
2520 | when = "EDMA enabled"; | 2520 | when = "EDMA enabled"; |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 030ec079b184..35bd5cc7f285 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -532,7 +532,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance) | |||
532 | struct ata_port *ap = host->ports[i]; | 532 | struct ata_port *ap = host->ports[i]; |
533 | u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); | 533 | u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); |
534 | 534 | ||
535 | if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) | 535 | if (unlikely(ap->flags & ATA_FLAG_DISABLED)) |
536 | continue; | 536 | continue; |
537 | 537 | ||
538 | /* turn off SATA_IRQ if not supported */ | 538 | /* turn off SATA_IRQ if not supported */ |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index f285f441fab9..7376367bcb80 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -180,7 +180,6 @@ static ssize_t firmware_loading_store(struct device *dev, | |||
180 | goto err; | 180 | goto err; |
181 | } | 181 | } |
182 | /* Pages will be freed by vfree() */ | 182 | /* Pages will be freed by vfree() */ |
183 | fw_priv->pages = NULL; | ||
184 | fw_priv->page_array_size = 0; | 183 | fw_priv->page_array_size = 0; |
185 | fw_priv->nr_pages = 0; | 184 | fw_priv->nr_pages = 0; |
186 | complete(&fw_priv->completion); | 185 | complete(&fw_priv->completion); |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 79a9ae5238ac..0d903909af7e 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -275,9 +275,9 @@ int sysdev_register(struct sys_device *sysdev) | |||
275 | drv->add(sysdev); | 275 | drv->add(sysdev); |
276 | } | 276 | } |
277 | mutex_unlock(&sysdev_drivers_lock); | 277 | mutex_unlock(&sysdev_drivers_lock); |
278 | kobject_uevent(&sysdev->kobj, KOBJ_ADD); | ||
278 | } | 279 | } |
279 | 280 | ||
280 | kobject_uevent(&sysdev->kobj, KOBJ_ADD); | ||
281 | return error; | 281 | return error; |
282 | } | 282 | } |
283 | 283 | ||
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index ff47907ff1bf..973be2f44195 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
@@ -1583,6 +1583,7 @@ static int n_tty_open(struct tty_struct *tty) | |||
1583 | 1583 | ||
1584 | static inline int input_available_p(struct tty_struct *tty, int amt) | 1584 | static inline int input_available_p(struct tty_struct *tty, int amt) |
1585 | { | 1585 | { |
1586 | tty_flush_to_ldisc(tty); | ||
1586 | if (tty->icanon) { | 1587 | if (tty->icanon) { |
1587 | if (tty->canon_data) | 1588 | if (tty->canon_data) |
1588 | return 1; | 1589 | return 1; |
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 810ee25d66a4..3108991c5c8b 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c | |||
@@ -462,6 +462,19 @@ static void flush_to_ldisc(struct work_struct *work) | |||
462 | } | 462 | } |
463 | 463 | ||
464 | /** | 464 | /** |
465 | * tty_flush_to_ldisc | ||
466 | * @tty: tty to push | ||
467 | * | ||
468 | * Push the terminal flip buffers to the line discipline. | ||
469 | * | ||
470 | * Must not be called from IRQ context. | ||
471 | */ | ||
472 | void tty_flush_to_ldisc(struct tty_struct *tty) | ||
473 | { | ||
474 | flush_to_ldisc(&tty->buf.work.work); | ||
475 | } | ||
476 | |||
477 | /** | ||
465 | * tty_flip_buffer_push - terminal | 478 | * tty_flip_buffer_push - terminal |
466 | * @tty: tty to push | 479 | * @tty: tty to push |
467 | * | 480 | * |
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/drivers/char/vr41xx_giu.c +++ /dev/null | |||
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 858fe6037223..24964c1d0af9 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -970,7 +970,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |||
970 | } | 970 | } |
971 | 971 | ||
972 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | 972 | for (cs = 0; cs < pvt->num_dcsm; cs++) { |
973 | reg = K8_DCSB0 + (cs * 4); | 973 | reg = K8_DCSM0 + (cs * 4); |
974 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, | 974 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, |
975 | &pvt->dcsm0[cs]); | 975 | &pvt->dcsm0[cs]); |
976 | if (unlikely(err)) | 976 | if (unlikely(err)) |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 5fae1e074b4b..013d38059943 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \ | |||
13 | radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ | 13 | radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ |
14 | radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ | 14 | radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ |
15 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ | 15 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ |
16 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o | 16 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \ |
17 | radeon_test.o | ||
17 | 18 | ||
18 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 19 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
19 | 20 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c0080cc9bf8d..74d034f77c6b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -31,6 +31,132 @@ | |||
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
33 | 33 | ||
34 | static void atombios_overscan_setup(struct drm_crtc *crtc, | ||
35 | struct drm_display_mode *mode, | ||
36 | struct drm_display_mode *adjusted_mode) | ||
37 | { | ||
38 | struct drm_device *dev = crtc->dev; | ||
39 | struct radeon_device *rdev = dev->dev_private; | ||
40 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
41 | SET_CRTC_OVERSCAN_PS_ALLOCATION args; | ||
42 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); | ||
43 | int a1, a2; | ||
44 | |||
45 | memset(&args, 0, sizeof(args)); | ||
46 | |||
47 | args.usOverscanRight = 0; | ||
48 | args.usOverscanLeft = 0; | ||
49 | args.usOverscanBottom = 0; | ||
50 | args.usOverscanTop = 0; | ||
51 | args.ucCRTC = radeon_crtc->crtc_id; | ||
52 | |||
53 | switch (radeon_crtc->rmx_type) { | ||
54 | case RMX_CENTER: | ||
55 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
56 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
57 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
58 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
59 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
60 | break; | ||
61 | case RMX_ASPECT: | ||
62 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | ||
63 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | ||
64 | |||
65 | if (a1 > a2) { | ||
66 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
67 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
68 | } else if (a2 > a1) { | ||
69 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
70 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
71 | } | ||
72 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
73 | break; | ||
74 | case RMX_FULL: | ||
75 | default: | ||
76 | args.usOverscanRight = 0; | ||
77 | args.usOverscanLeft = 0; | ||
78 | args.usOverscanBottom = 0; | ||
79 | args.usOverscanTop = 0; | ||
80 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
81 | break; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static void atombios_scaler_setup(struct drm_crtc *crtc) | ||
86 | { | ||
87 | struct drm_device *dev = crtc->dev; | ||
88 | struct radeon_device *rdev = dev->dev_private; | ||
89 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
90 | ENABLE_SCALER_PS_ALLOCATION args; | ||
91 | int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); | ||
92 | /* fixme - fill in enc_priv for atom dac */ | ||
93 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
94 | |||
95 | if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) | ||
96 | return; | ||
97 | |||
98 | memset(&args, 0, sizeof(args)); | ||
99 | |||
100 | args.ucScaler = radeon_crtc->crtc_id; | ||
101 | |||
102 | if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
103 | switch (tv_std) { | ||
104 | case TV_STD_NTSC: | ||
105 | default: | ||
106 | args.ucTVStandard = ATOM_TV_NTSC; | ||
107 | break; | ||
108 | case TV_STD_PAL: | ||
109 | args.ucTVStandard = ATOM_TV_PAL; | ||
110 | break; | ||
111 | case TV_STD_PAL_M: | ||
112 | args.ucTVStandard = ATOM_TV_PALM; | ||
113 | break; | ||
114 | case TV_STD_PAL_60: | ||
115 | args.ucTVStandard = ATOM_TV_PAL60; | ||
116 | break; | ||
117 | case TV_STD_NTSC_J: | ||
118 | args.ucTVStandard = ATOM_TV_NTSCJ; | ||
119 | break; | ||
120 | case TV_STD_SCART_PAL: | ||
121 | args.ucTVStandard = ATOM_TV_PAL; /* ??? */ | ||
122 | break; | ||
123 | case TV_STD_SECAM: | ||
124 | args.ucTVStandard = ATOM_TV_SECAM; | ||
125 | break; | ||
126 | case TV_STD_PAL_CN: | ||
127 | args.ucTVStandard = ATOM_TV_PALCN; | ||
128 | break; | ||
129 | } | ||
130 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
131 | } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) { | ||
132 | args.ucTVStandard = ATOM_TV_CV; | ||
133 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
134 | } else { | ||
135 | switch (radeon_crtc->rmx_type) { | ||
136 | case RMX_FULL: | ||
137 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
138 | break; | ||
139 | case RMX_CENTER: | ||
140 | args.ucEnable = ATOM_SCALER_CENTER; | ||
141 | break; | ||
142 | case RMX_ASPECT: | ||
143 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
144 | break; | ||
145 | default: | ||
146 | if (ASIC_IS_AVIVO(rdev)) | ||
147 | args.ucEnable = ATOM_SCALER_DISABLE; | ||
148 | else | ||
149 | args.ucEnable = ATOM_SCALER_CENTER; | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
154 | if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) | ||
155 | && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { | ||
156 | atom_rv515_force_tv_scaler(rdev); | ||
157 | } | ||
158 | } | ||
159 | |||
34 | static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) | 160 | static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) |
35 | { | 161 | { |
36 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 162 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
@@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
203 | if (ASIC_IS_AVIVO(rdev)) { | 329 | if (ASIC_IS_AVIVO(rdev)) { |
204 | uint32_t ss_cntl; | 330 | uint32_t ss_cntl; |
205 | 331 | ||
332 | if ((rdev->family == CHIP_RS600) || | ||
333 | (rdev->family == CHIP_RS690) || | ||
334 | (rdev->family == CHIP_RS740)) | ||
335 | pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | ||
336 | RADEON_PLL_PREFER_CLOSEST_LOWER); | ||
337 | |||
206 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 338 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
207 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 339 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
208 | else | 340 | else |
@@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
321 | struct drm_gem_object *obj; | 453 | struct drm_gem_object *obj; |
322 | struct drm_radeon_gem_object *obj_priv; | 454 | struct drm_radeon_gem_object *obj_priv; |
323 | uint64_t fb_location; | 455 | uint64_t fb_location; |
324 | uint32_t fb_format, fb_pitch_pixels; | 456 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
325 | 457 | ||
326 | if (!crtc->fb) | 458 | if (!crtc->fb) |
327 | return -EINVAL; | 459 | return -EINVAL; |
@@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
358 | return -EINVAL; | 490 | return -EINVAL; |
359 | } | 491 | } |
360 | 492 | ||
361 | /* TODO tiling */ | 493 | radeon_object_get_tiling_flags(obj->driver_private, |
494 | &tiling_flags, NULL); | ||
495 | if (tiling_flags & RADEON_TILING_MACRO) | ||
496 | fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; | ||
497 | |||
498 | if (tiling_flags & RADEON_TILING_MICRO) | ||
499 | fb_format |= AVIVO_D1GRPH_TILED; | ||
500 | |||
362 | if (radeon_crtc->crtc_id == 0) | 501 | if (radeon_crtc->crtc_id == 0) |
363 | WREG32(AVIVO_D1VGA_CONTROL, 0); | 502 | WREG32(AVIVO_D1VGA_CONTROL, 0); |
364 | else | 503 | else |
@@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
509 | radeon_crtc_set_base(crtc, x, y, old_fb); | 648 | radeon_crtc_set_base(crtc, x, y, old_fb); |
510 | radeon_legacy_atom_set_surface(crtc); | 649 | radeon_legacy_atom_set_surface(crtc); |
511 | } | 650 | } |
651 | atombios_overscan_setup(crtc, mode, adjusted_mode); | ||
652 | atombios_scaler_setup(crtc); | ||
653 | radeon_bandwidth_update(rdev); | ||
512 | return 0; | 654 | return 0; |
513 | } | 655 | } |
514 | 656 | ||
@@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
516 | struct drm_display_mode *mode, | 658 | struct drm_display_mode *mode, |
517 | struct drm_display_mode *adjusted_mode) | 659 | struct drm_display_mode *adjusted_mode) |
518 | { | 660 | { |
661 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
662 | return false; | ||
519 | return true; | 663 | return true; |
520 | } | 664 | } |
521 | 665 | ||
@@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev, | |||
548 | AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; | 692 | AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; |
549 | drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); | 693 | drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); |
550 | } | 694 | } |
551 | |||
552 | void radeon_init_disp_bw_avivo(struct drm_device *dev, | ||
553 | struct drm_display_mode *mode1, | ||
554 | uint32_t pixel_bytes1, | ||
555 | struct drm_display_mode *mode2, | ||
556 | uint32_t pixel_bytes2) | ||
557 | { | ||
558 | struct radeon_device *rdev = dev->dev_private; | ||
559 | fixed20_12 min_mem_eff; | ||
560 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; | ||
561 | fixed20_12 sclk_ff, mclk_ff; | ||
562 | uint32_t dc_lb_memory_split, temp; | ||
563 | |||
564 | min_mem_eff.full = rfixed_const_8(0); | ||
565 | if (rdev->disp_priority == 2) { | ||
566 | uint32_t mc_init_misc_lat_timer = 0; | ||
567 | if (rdev->family == CHIP_RV515) | ||
568 | mc_init_misc_lat_timer = | ||
569 | RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER); | ||
570 | else if (rdev->family == CHIP_RS690) | ||
571 | mc_init_misc_lat_timer = | ||
572 | RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER); | ||
573 | |||
574 | mc_init_misc_lat_timer &= | ||
575 | ~(R300_MC_DISP1R_INIT_LAT_MASK << | ||
576 | R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
577 | mc_init_misc_lat_timer &= | ||
578 | ~(R300_MC_DISP0R_INIT_LAT_MASK << | ||
579 | R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
580 | |||
581 | if (mode2) | ||
582 | mc_init_misc_lat_timer |= | ||
583 | (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
584 | if (mode1) | ||
585 | mc_init_misc_lat_timer |= | ||
586 | (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
587 | |||
588 | if (rdev->family == CHIP_RV515) | ||
589 | WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER, | ||
590 | mc_init_misc_lat_timer); | ||
591 | else if (rdev->family == CHIP_RS690) | ||
592 | WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER, | ||
593 | mc_init_misc_lat_timer); | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * determine is there is enough bw for current mode | ||
598 | */ | ||
599 | temp_ff.full = rfixed_const(100); | ||
600 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
601 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
602 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
603 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
604 | |||
605 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
606 | temp_ff.full = rfixed_const(temp); | ||
607 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
608 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
609 | |||
610 | pix_clk.full = 0; | ||
611 | pix_clk2.full = 0; | ||
612 | peak_disp_bw.full = 0; | ||
613 | if (mode1) { | ||
614 | temp_ff.full = rfixed_const(1000); | ||
615 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
616 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
617 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
618 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
619 | } | ||
620 | if (mode2) { | ||
621 | temp_ff.full = rfixed_const(1000); | ||
622 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
623 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
624 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
625 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
626 | } | ||
627 | |||
628 | if (peak_disp_bw.full >= mem_bw.full) { | ||
629 | DRM_ERROR | ||
630 | ("You may not have enough display bandwidth for current mode\n" | ||
631 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
632 | printk("peak disp bw %d, mem_bw %d\n", | ||
633 | rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw)); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Line Buffer Setup | ||
638 | * There is a single line buffer shared by both display controllers. | ||
639 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display | ||
640 | * controllers. The paritioning can either be done manually or via one of four | ||
641 | * preset allocations specified in bits 1:0: | ||
642 | * 0 - line buffer is divided in half and shared between each display controller | ||
643 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | ||
644 | * 2 - D1 gets the whole buffer | ||
645 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | ||
646 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode. | ||
647 | * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits | ||
648 | * 14:4; D2 allocation follows D1. | ||
649 | */ | ||
650 | |||
651 | /* is auto or manual better ? */ | ||
652 | dc_lb_memory_split = | ||
653 | RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK; | ||
654 | dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
655 | #if 1 | ||
656 | /* auto */ | ||
657 | if (mode1 && mode2) { | ||
658 | if (mode1->hdisplay > mode2->hdisplay) { | ||
659 | if (mode1->hdisplay > 2560) | ||
660 | dc_lb_memory_split |= | ||
661 | AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | ||
662 | else | ||
663 | dc_lb_memory_split |= | ||
664 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
665 | } else if (mode2->hdisplay > mode1->hdisplay) { | ||
666 | if (mode2->hdisplay > 2560) | ||
667 | dc_lb_memory_split |= | ||
668 | AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
669 | else | ||
670 | dc_lb_memory_split |= | ||
671 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
672 | } else | ||
673 | dc_lb_memory_split |= | ||
674 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
675 | } else if (mode1) { | ||
676 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY; | ||
677 | } else if (mode2) { | ||
678 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
679 | } | ||
680 | #else | ||
681 | /* manual */ | ||
682 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
683 | dc_lb_memory_split &= | ||
684 | ~(AVIVO_DC_LB_DISP1_END_ADR_MASK << | ||
685 | AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
686 | if (mode1) { | ||
687 | dc_lb_memory_split |= | ||
688 | ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK) | ||
689 | << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
690 | } else if (mode2) { | ||
691 | dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
692 | } | ||
693 | #endif | ||
694 | WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split); | ||
695 | } | ||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c550932a108f..05a44896dffb 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
110 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 110 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
111 | return -EINVAL; | 111 | return -EINVAL; |
112 | } | 112 | } |
113 | rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); | 113 | rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
@@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev) | |||
173 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); | 173 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); |
174 | } | 174 | } |
175 | /* Write VRAM size in case we are limiting it */ | 175 | /* Write VRAM size in case we are limiting it */ |
176 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 176 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
177 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 177 | /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, |
178 | * if the aperture is 64MB but we have 32MB VRAM | ||
179 | * we report only 32MB VRAM but we have to set MC_FB_LOCATION | ||
180 | * to 64MB, otherwise the gpu accidentially dies */ | ||
181 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
178 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | 182 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
179 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | 183 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
180 | WREG32(RADEON_MC_FB_LOCATION, tmp); | 184 | WREG32(RADEON_MC_FB_LOCATION, tmp); |
@@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev) | |||
215 | r100_pci_gart_disable(rdev); | 219 | r100_pci_gart_disable(rdev); |
216 | 220 | ||
217 | /* Setup GPU memory space */ | 221 | /* Setup GPU memory space */ |
218 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
219 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 222 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
220 | if (rdev->flags & RADEON_IS_AGP) { | 223 | if (rdev->flags & RADEON_IS_AGP) { |
221 | r = radeon_agp_init(rdev); | 224 | r = radeon_agp_init(rdev); |
@@ -753,6 +756,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
753 | } | 756 | } |
754 | 757 | ||
755 | /** | 758 | /** |
759 | * r100_cs_packet_next_vline() - parse userspace VLINE packet | ||
760 | * @parser: parser structure holding parsing context. | ||
761 | * | ||
762 | * Userspace sends a special sequence for VLINE waits. | ||
763 | * PACKET0 - VLINE_START_END + value | ||
764 | * PACKET0 - WAIT_UNTIL +_value | ||
765 | * RELOC (P3) - crtc_id in reloc. | ||
766 | * | ||
767 | * This function parses this and relocates the VLINE START END | ||
768 | * and WAIT UNTIL packets to the correct crtc. | ||
769 | * It also detects a switched off crtc and nulls out the | ||
770 | * wait in that case. | ||
771 | */ | ||
772 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | ||
773 | { | ||
774 | struct radeon_cs_chunk *ib_chunk; | ||
775 | struct drm_mode_object *obj; | ||
776 | struct drm_crtc *crtc; | ||
777 | struct radeon_crtc *radeon_crtc; | ||
778 | struct radeon_cs_packet p3reloc, waitreloc; | ||
779 | int crtc_id; | ||
780 | int r; | ||
781 | uint32_t header, h_idx, reg; | ||
782 | |||
783 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
784 | |||
785 | /* parse the wait until */ | ||
786 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); | ||
787 | if (r) | ||
788 | return r; | ||
789 | |||
790 | /* check its a wait until and only 1 count */ | ||
791 | if (waitreloc.reg != RADEON_WAIT_UNTIL || | ||
792 | waitreloc.count != 0) { | ||
793 | DRM_ERROR("vline wait had illegal wait until segment\n"); | ||
794 | r = -EINVAL; | ||
795 | return r; | ||
796 | } | ||
797 | |||
798 | if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { | ||
799 | DRM_ERROR("vline wait had illegal wait until\n"); | ||
800 | r = -EINVAL; | ||
801 | return r; | ||
802 | } | ||
803 | |||
804 | /* jump over the NOP */ | ||
805 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | ||
806 | if (r) | ||
807 | return r; | ||
808 | |||
809 | h_idx = p->idx - 2; | ||
810 | p->idx += waitreloc.count; | ||
811 | p->idx += p3reloc.count; | ||
812 | |||
813 | header = ib_chunk->kdata[h_idx]; | ||
814 | crtc_id = ib_chunk->kdata[h_idx + 5]; | ||
815 | reg = ib_chunk->kdata[h_idx] >> 2; | ||
816 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
817 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | ||
818 | if (!obj) { | ||
819 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | ||
820 | r = -EINVAL; | ||
821 | goto out; | ||
822 | } | ||
823 | crtc = obj_to_crtc(obj); | ||
824 | radeon_crtc = to_radeon_crtc(crtc); | ||
825 | crtc_id = radeon_crtc->crtc_id; | ||
826 | |||
827 | if (!crtc->enabled) { | ||
828 | /* if the CRTC isn't enabled - we need to nop out the wait until */ | ||
829 | ib_chunk->kdata[h_idx + 2] = PACKET2(0); | ||
830 | ib_chunk->kdata[h_idx + 3] = PACKET2(0); | ||
831 | } else if (crtc_id == 1) { | ||
832 | switch (reg) { | ||
833 | case AVIVO_D1MODE_VLINE_START_END: | ||
834 | header &= R300_CP_PACKET0_REG_MASK; | ||
835 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; | ||
836 | break; | ||
837 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
838 | header &= R300_CP_PACKET0_REG_MASK; | ||
839 | header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; | ||
840 | break; | ||
841 | default: | ||
842 | DRM_ERROR("unknown crtc reloc\n"); | ||
843 | r = -EINVAL; | ||
844 | goto out; | ||
845 | } | ||
846 | ib_chunk->kdata[h_idx] = header; | ||
847 | ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | ||
848 | } | ||
849 | out: | ||
850 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
851 | return r; | ||
852 | } | ||
853 | |||
854 | /** | ||
756 | * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 | 855 | * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 |
757 | * @parser: parser structure holding parsing context. | 856 | * @parser: parser structure holding parsing context. |
758 | * @data: pointer to relocation data | 857 | * @data: pointer to relocation data |
@@ -814,6 +913,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
814 | unsigned idx; | 913 | unsigned idx; |
815 | bool onereg; | 914 | bool onereg; |
816 | int r; | 915 | int r; |
916 | u32 tile_flags = 0; | ||
817 | 917 | ||
818 | ib = p->ib->ptr; | 918 | ib = p->ib->ptr; |
819 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 919 | ib_chunk = &p->chunks[p->chunk_ib_idx]; |
@@ -825,6 +925,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
825 | } | 925 | } |
826 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { | 926 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { |
827 | switch (reg) { | 927 | switch (reg) { |
928 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
929 | r = r100_cs_packet_parse_vline(p); | ||
930 | if (r) { | ||
931 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
932 | idx, reg); | ||
933 | r100_cs_dump_packet(p, pkt); | ||
934 | return r; | ||
935 | } | ||
936 | break; | ||
828 | /* FIXME: only allow PACKET3 blit? easier to check for out of | 937 | /* FIXME: only allow PACKET3 blit? easier to check for out of |
829 | * range access */ | 938 | * range access */ |
830 | case RADEON_DST_PITCH_OFFSET: | 939 | case RADEON_DST_PITCH_OFFSET: |
@@ -838,7 +947,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
838 | } | 947 | } |
839 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 948 | tmp = ib_chunk->kdata[idx] & 0x003fffff; |
840 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 949 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
841 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | 950 | |
951 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
952 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
953 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
954 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
955 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
956 | r100_cs_dump_packet(p, pkt); | ||
957 | return -EINVAL; | ||
958 | } | ||
959 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
960 | } | ||
961 | |||
962 | tmp |= tile_flags; | ||
963 | ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | ||
842 | break; | 964 | break; |
843 | case RADEON_RB3D_DEPTHOFFSET: | 965 | case RADEON_RB3D_DEPTHOFFSET: |
844 | case RADEON_RB3D_COLOROFFSET: | 966 | case RADEON_RB3D_COLOROFFSET: |
@@ -869,6 +991,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
869 | case R300_TX_OFFSET_0+52: | 991 | case R300_TX_OFFSET_0+52: |
870 | case R300_TX_OFFSET_0+56: | 992 | case R300_TX_OFFSET_0+56: |
871 | case R300_TX_OFFSET_0+60: | 993 | case R300_TX_OFFSET_0+60: |
994 | /* rn50 has no 3D engine so fail on any 3d setup */ | ||
995 | if (ASIC_IS_RN50(p->rdev)) { | ||
996 | DRM_ERROR("attempt to use RN50 3D engine failed\n"); | ||
997 | return -EINVAL; | ||
998 | } | ||
872 | r = r100_cs_packet_next_reloc(p, &reloc); | 999 | r = r100_cs_packet_next_reloc(p, &reloc); |
873 | if (r) { | 1000 | if (r) { |
874 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | 1001 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
@@ -878,6 +1005,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
878 | } | 1005 | } |
879 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1006 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
880 | break; | 1007 | break; |
1008 | case R300_RB3D_COLORPITCH0: | ||
1009 | case RADEON_RB3D_COLORPITCH: | ||
1010 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1011 | if (r) { | ||
1012 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1013 | idx, reg); | ||
1014 | r100_cs_dump_packet(p, pkt); | ||
1015 | return r; | ||
1016 | } | ||
1017 | |||
1018 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1019 | tile_flags |= RADEON_COLOR_TILE_ENABLE; | ||
1020 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1021 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | ||
1022 | |||
1023 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1024 | tmp |= tile_flags; | ||
1025 | ib[idx] = tmp; | ||
1026 | break; | ||
881 | default: | 1027 | default: |
882 | /* FIXME: we don't want to allow anyothers packet */ | 1028 | /* FIXME: we don't want to allow anyothers packet */ |
883 | break; | 1029 | break; |
@@ -1256,29 +1402,100 @@ static void r100_vram_get_type(struct radeon_device *rdev) | |||
1256 | } | 1402 | } |
1257 | } | 1403 | } |
1258 | 1404 | ||
1259 | void r100_vram_info(struct radeon_device *rdev) | 1405 | static u32 r100_get_accessible_vram(struct radeon_device *rdev) |
1260 | { | 1406 | { |
1261 | r100_vram_get_type(rdev); | 1407 | u32 aper_size; |
1408 | u8 byte; | ||
1409 | |||
1410 | aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | ||
1411 | |||
1412 | /* Set HDP_APER_CNTL only on cards that are known not to be broken, | ||
1413 | * that is has the 2nd generation multifunction PCI interface | ||
1414 | */ | ||
1415 | if (rdev->family == CHIP_RV280 || | ||
1416 | rdev->family >= CHIP_RV350) { | ||
1417 | WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, | ||
1418 | ~RADEON_HDP_APER_CNTL); | ||
1419 | DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); | ||
1420 | return aper_size * 2; | ||
1421 | } | ||
1422 | |||
1423 | /* Older cards have all sorts of funny issues to deal with. First | ||
1424 | * check if it's a multifunction card by reading the PCI config | ||
1425 | * header type... Limit those to one aperture size | ||
1426 | */ | ||
1427 | pci_read_config_byte(rdev->pdev, 0xe, &byte); | ||
1428 | if (byte & 0x80) { | ||
1429 | DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); | ||
1430 | DRM_INFO("Limiting VRAM to one aperture\n"); | ||
1431 | return aper_size; | ||
1432 | } | ||
1433 | |||
1434 | /* Single function older card. We read HDP_APER_CNTL to see how the BIOS | ||
1435 | * have set it up. We don't write this as it's broken on some ASICs but | ||
1436 | * we expect the BIOS to have done the right thing (might be too optimistic...) | ||
1437 | */ | ||
1438 | if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) | ||
1439 | return aper_size * 2; | ||
1440 | return aper_size; | ||
1441 | } | ||
1442 | |||
1443 | void r100_vram_init_sizes(struct radeon_device *rdev) | ||
1444 | { | ||
1445 | u64 config_aper_size; | ||
1446 | u32 accessible; | ||
1447 | |||
1448 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | ||
1262 | 1449 | ||
1263 | if (rdev->flags & RADEON_IS_IGP) { | 1450 | if (rdev->flags & RADEON_IS_IGP) { |
1264 | uint32_t tom; | 1451 | uint32_t tom; |
1265 | /* read NB_TOM to get the amount of ram stolen for the GPU */ | 1452 | /* read NB_TOM to get the amount of ram stolen for the GPU */ |
1266 | tom = RREG32(RADEON_NB_TOM); | 1453 | tom = RREG32(RADEON_NB_TOM); |
1267 | rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); | 1454 | rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
1268 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 1455 | /* for IGPs we need to keep VRAM where it was put by the BIOS */ |
1456 | rdev->mc.vram_location = (tom & 0xffff) << 16; | ||
1457 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
1458 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
1269 | } else { | 1459 | } else { |
1270 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 1460 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
1271 | /* Some production boards of m6 will report 0 | 1461 | /* Some production boards of m6 will report 0 |
1272 | * if it's 8 MB | 1462 | * if it's 8 MB |
1273 | */ | 1463 | */ |
1274 | if (rdev->mc.vram_size == 0) { | 1464 | if (rdev->mc.real_vram_size == 0) { |
1275 | rdev->mc.vram_size = 8192 * 1024; | 1465 | rdev->mc.real_vram_size = 8192 * 1024; |
1276 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 1466 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
1277 | } | 1467 | } |
1468 | /* let driver place VRAM */ | ||
1469 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
1470 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - | ||
1471 | * Novell bug 204882 + along with lots of ubuntu ones */ | ||
1472 | if (config_aper_size > rdev->mc.real_vram_size) | ||
1473 | rdev->mc.mc_vram_size = config_aper_size; | ||
1474 | else | ||
1475 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
1278 | } | 1476 | } |
1279 | 1477 | ||
1478 | /* work out accessible VRAM */ | ||
1479 | accessible = r100_get_accessible_vram(rdev); | ||
1480 | |||
1280 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 1481 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
1281 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 1482 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
1483 | |||
1484 | if (accessible > rdev->mc.aper_size) | ||
1485 | accessible = rdev->mc.aper_size; | ||
1486 | |||
1487 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
1488 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
1489 | |||
1490 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
1491 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
1492 | } | ||
1493 | |||
1494 | void r100_vram_info(struct radeon_device *rdev) | ||
1495 | { | ||
1496 | r100_vram_get_type(rdev); | ||
1497 | |||
1498 | r100_vram_init_sizes(rdev); | ||
1282 | } | 1499 | } |
1283 | 1500 | ||
1284 | 1501 | ||
@@ -1533,3 +1750,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev) | |||
1533 | return 0; | 1750 | return 0; |
1534 | #endif | 1751 | #endif |
1535 | } | 1752 | } |
1753 | |||
1754 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | ||
1755 | uint32_t tiling_flags, uint32_t pitch, | ||
1756 | uint32_t offset, uint32_t obj_size) | ||
1757 | { | ||
1758 | int surf_index = reg * 16; | ||
1759 | int flags = 0; | ||
1760 | |||
1761 | /* r100/r200 divide by 16 */ | ||
1762 | if (rdev->family < CHIP_R300) | ||
1763 | flags = pitch / 16; | ||
1764 | else | ||
1765 | flags = pitch / 8; | ||
1766 | |||
1767 | if (rdev->family <= CHIP_RS200) { | ||
1768 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | ||
1769 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | ||
1770 | flags |= RADEON_SURF_TILE_COLOR_BOTH; | ||
1771 | if (tiling_flags & RADEON_TILING_MACRO) | ||
1772 | flags |= RADEON_SURF_TILE_COLOR_MACRO; | ||
1773 | } else if (rdev->family <= CHIP_RV280) { | ||
1774 | if (tiling_flags & (RADEON_TILING_MACRO)) | ||
1775 | flags |= R200_SURF_TILE_COLOR_MACRO; | ||
1776 | if (tiling_flags & RADEON_TILING_MICRO) | ||
1777 | flags |= R200_SURF_TILE_COLOR_MICRO; | ||
1778 | } else { | ||
1779 | if (tiling_flags & RADEON_TILING_MACRO) | ||
1780 | flags |= R300_SURF_TILE_MACRO; | ||
1781 | if (tiling_flags & RADEON_TILING_MICRO) | ||
1782 | flags |= R300_SURF_TILE_MICRO; | ||
1783 | } | ||
1784 | |||
1785 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); | ||
1786 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); | ||
1787 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); | ||
1788 | WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); | ||
1789 | return 0; | ||
1790 | } | ||
1791 | |||
1792 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg) | ||
1793 | { | ||
1794 | int surf_index = reg * 16; | ||
1795 | WREG32(RADEON_SURFACE0_INFO + surf_index, 0); | ||
1796 | } | ||
1797 | |||
1798 | void r100_bandwidth_update(struct radeon_device *rdev) | ||
1799 | { | ||
1800 | fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; | ||
1801 | fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; | ||
1802 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | ||
1803 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | ||
1804 | fixed20_12 memtcas_ff[8] = { | ||
1805 | fixed_init(1), | ||
1806 | fixed_init(2), | ||
1807 | fixed_init(3), | ||
1808 | fixed_init(0), | ||
1809 | fixed_init_half(1), | ||
1810 | fixed_init_half(2), | ||
1811 | fixed_init(0), | ||
1812 | }; | ||
1813 | fixed20_12 memtcas_rs480_ff[8] = { | ||
1814 | fixed_init(0), | ||
1815 | fixed_init(1), | ||
1816 | fixed_init(2), | ||
1817 | fixed_init(3), | ||
1818 | fixed_init(0), | ||
1819 | fixed_init_half(1), | ||
1820 | fixed_init_half(2), | ||
1821 | fixed_init_half(3), | ||
1822 | }; | ||
1823 | fixed20_12 memtcas2_ff[8] = { | ||
1824 | fixed_init(0), | ||
1825 | fixed_init(1), | ||
1826 | fixed_init(2), | ||
1827 | fixed_init(3), | ||
1828 | fixed_init(4), | ||
1829 | fixed_init(5), | ||
1830 | fixed_init(6), | ||
1831 | fixed_init(7), | ||
1832 | }; | ||
1833 | fixed20_12 memtrbs[8] = { | ||
1834 | fixed_init(1), | ||
1835 | fixed_init_half(1), | ||
1836 | fixed_init(2), | ||
1837 | fixed_init_half(2), | ||
1838 | fixed_init(3), | ||
1839 | fixed_init_half(3), | ||
1840 | fixed_init(4), | ||
1841 | fixed_init_half(4) | ||
1842 | }; | ||
1843 | fixed20_12 memtrbs_r4xx[8] = { | ||
1844 | fixed_init(4), | ||
1845 | fixed_init(5), | ||
1846 | fixed_init(6), | ||
1847 | fixed_init(7), | ||
1848 | fixed_init(8), | ||
1849 | fixed_init(9), | ||
1850 | fixed_init(10), | ||
1851 | fixed_init(11) | ||
1852 | }; | ||
1853 | fixed20_12 min_mem_eff; | ||
1854 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | ||
1855 | fixed20_12 cur_latency_mclk, cur_latency_sclk; | ||
1856 | fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, | ||
1857 | disp_drain_rate2, read_return_rate; | ||
1858 | fixed20_12 time_disp1_drop_priority; | ||
1859 | int c; | ||
1860 | int cur_size = 16; /* in octawords */ | ||
1861 | int critical_point = 0, critical_point2; | ||
1862 | /* uint32_t read_return_rate, time_disp1_drop_priority; */ | ||
1863 | int stop_req, max_stop_req; | ||
1864 | struct drm_display_mode *mode1 = NULL; | ||
1865 | struct drm_display_mode *mode2 = NULL; | ||
1866 | uint32_t pixel_bytes1 = 0; | ||
1867 | uint32_t pixel_bytes2 = 0; | ||
1868 | |||
1869 | if (rdev->mode_info.crtcs[0]->base.enabled) { | ||
1870 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | ||
1871 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | ||
1872 | } | ||
1873 | if (rdev->mode_info.crtcs[1]->base.enabled) { | ||
1874 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; | ||
1875 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | ||
1876 | } | ||
1877 | |||
1878 | min_mem_eff.full = rfixed_const_8(0); | ||
1879 | /* get modes */ | ||
1880 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | ||
1881 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | ||
1882 | mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
1883 | mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
1884 | /* check crtc enables */ | ||
1885 | if (mode2) | ||
1886 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
1887 | if (mode1) | ||
1888 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
1889 | WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); | ||
1890 | } | ||
1891 | |||
1892 | /* | ||
1893 | * determine is there is enough bw for current mode | ||
1894 | */ | ||
1895 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
1896 | temp_ff.full = rfixed_const(100); | ||
1897 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
1898 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
1899 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
1900 | |||
1901 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
1902 | temp_ff.full = rfixed_const(temp); | ||
1903 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
1904 | |||
1905 | pix_clk.full = 0; | ||
1906 | pix_clk2.full = 0; | ||
1907 | peak_disp_bw.full = 0; | ||
1908 | if (mode1) { | ||
1909 | temp_ff.full = rfixed_const(1000); | ||
1910 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
1911 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
1912 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
1913 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
1914 | } | ||
1915 | if (mode2) { | ||
1916 | temp_ff.full = rfixed_const(1000); | ||
1917 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
1918 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
1919 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
1920 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
1921 | } | ||
1922 | |||
1923 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
1924 | if (peak_disp_bw.full >= mem_bw.full) { | ||
1925 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | ||
1926 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
1927 | } | ||
1928 | |||
1929 | /* Get values from the EXT_MEM_CNTL register...converting its contents. */ | ||
1930 | temp = RREG32(RADEON_MEM_TIMING_CNTL); | ||
1931 | if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ | ||
1932 | mem_trcd = ((temp >> 2) & 0x3) + 1; | ||
1933 | mem_trp = ((temp & 0x3)) + 1; | ||
1934 | mem_tras = ((temp & 0x70) >> 4) + 1; | ||
1935 | } else if (rdev->family == CHIP_R300 || | ||
1936 | rdev->family == CHIP_R350) { /* r300, r350 */ | ||
1937 | mem_trcd = (temp & 0x7) + 1; | ||
1938 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
1939 | mem_tras = ((temp >> 11) & 0xf) + 4; | ||
1940 | } else if (rdev->family == CHIP_RV350 || | ||
1941 | rdev->family <= CHIP_RV380) { | ||
1942 | /* rv3x0 */ | ||
1943 | mem_trcd = (temp & 0x7) + 3; | ||
1944 | mem_trp = ((temp >> 8) & 0x7) + 3; | ||
1945 | mem_tras = ((temp >> 11) & 0xf) + 6; | ||
1946 | } else if (rdev->family == CHIP_R420 || | ||
1947 | rdev->family == CHIP_R423 || | ||
1948 | rdev->family == CHIP_RV410) { | ||
1949 | /* r4xx */ | ||
1950 | mem_trcd = (temp & 0xf) + 3; | ||
1951 | if (mem_trcd > 15) | ||
1952 | mem_trcd = 15; | ||
1953 | mem_trp = ((temp >> 8) & 0xf) + 3; | ||
1954 | if (mem_trp > 15) | ||
1955 | mem_trp = 15; | ||
1956 | mem_tras = ((temp >> 12) & 0x1f) + 6; | ||
1957 | if (mem_tras > 31) | ||
1958 | mem_tras = 31; | ||
1959 | } else { /* RV200, R200 */ | ||
1960 | mem_trcd = (temp & 0x7) + 1; | ||
1961 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
1962 | mem_tras = ((temp >> 12) & 0xf) + 4; | ||
1963 | } | ||
1964 | /* convert to FF */ | ||
1965 | trcd_ff.full = rfixed_const(mem_trcd); | ||
1966 | trp_ff.full = rfixed_const(mem_trp); | ||
1967 | tras_ff.full = rfixed_const(mem_tras); | ||
1968 | |||
1969 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | ||
1970 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | ||
1971 | data = (temp & (7 << 20)) >> 20; | ||
1972 | if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { | ||
1973 | if (rdev->family == CHIP_RS480) /* don't think rs400 */ | ||
1974 | tcas_ff = memtcas_rs480_ff[data]; | ||
1975 | else | ||
1976 | tcas_ff = memtcas_ff[data]; | ||
1977 | } else | ||
1978 | tcas_ff = memtcas2_ff[data]; | ||
1979 | |||
1980 | if (rdev->family == CHIP_RS400 || | ||
1981 | rdev->family == CHIP_RS480) { | ||
1982 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | ||
1983 | data = (temp >> 23) & 0x7; | ||
1984 | if (data < 5) | ||
1985 | tcas_ff.full += rfixed_const(data); | ||
1986 | } | ||
1987 | |||
1988 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | ||
1989 | /* on the R300, Tcas is included in Trbs. | ||
1990 | */ | ||
1991 | temp = RREG32(RADEON_MEM_CNTL); | ||
1992 | data = (R300_MEM_NUM_CHANNELS_MASK & temp); | ||
1993 | if (data == 1) { | ||
1994 | if (R300_MEM_USE_CD_CH_ONLY & temp) { | ||
1995 | temp = RREG32(R300_MC_IND_INDEX); | ||
1996 | temp &= ~R300_MC_IND_ADDR_MASK; | ||
1997 | temp |= R300_MC_READ_CNTL_CD_mcind; | ||
1998 | WREG32(R300_MC_IND_INDEX, temp); | ||
1999 | temp = RREG32(R300_MC_IND_DATA); | ||
2000 | data = (R300_MEM_RBS_POSITION_C_MASK & temp); | ||
2001 | } else { | ||
2002 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
2003 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
2004 | } | ||
2005 | } else { | ||
2006 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
2007 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
2008 | } | ||
2009 | if (rdev->family == CHIP_RV410 || | ||
2010 | rdev->family == CHIP_R420 || | ||
2011 | rdev->family == CHIP_R423) | ||
2012 | trbs_ff = memtrbs_r4xx[data]; | ||
2013 | else | ||
2014 | trbs_ff = memtrbs[data]; | ||
2015 | tcas_ff.full += trbs_ff.full; | ||
2016 | } | ||
2017 | |||
2018 | sclk_eff_ff.full = sclk_ff.full; | ||
2019 | |||
2020 | if (rdev->flags & RADEON_IS_AGP) { | ||
2021 | fixed20_12 agpmode_ff; | ||
2022 | agpmode_ff.full = rfixed_const(radeon_agpmode); | ||
2023 | temp_ff.full = rfixed_const_666(16); | ||
2024 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | ||
2025 | } | ||
2026 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | ||
2027 | |||
2028 | if (ASIC_IS_R300(rdev)) { | ||
2029 | sclk_delay_ff.full = rfixed_const(250); | ||
2030 | } else { | ||
2031 | if ((rdev->family == CHIP_RV100) || | ||
2032 | rdev->flags & RADEON_IS_IGP) { | ||
2033 | if (rdev->mc.vram_is_ddr) | ||
2034 | sclk_delay_ff.full = rfixed_const(41); | ||
2035 | else | ||
2036 | sclk_delay_ff.full = rfixed_const(33); | ||
2037 | } else { | ||
2038 | if (rdev->mc.vram_width == 128) | ||
2039 | sclk_delay_ff.full = rfixed_const(57); | ||
2040 | else | ||
2041 | sclk_delay_ff.full = rfixed_const(41); | ||
2042 | } | ||
2043 | } | ||
2044 | |||
2045 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | ||
2046 | |||
2047 | if (rdev->mc.vram_is_ddr) { | ||
2048 | if (rdev->mc.vram_width == 32) { | ||
2049 | k1.full = rfixed_const(40); | ||
2050 | c = 3; | ||
2051 | } else { | ||
2052 | k1.full = rfixed_const(20); | ||
2053 | c = 1; | ||
2054 | } | ||
2055 | } else { | ||
2056 | k1.full = rfixed_const(40); | ||
2057 | c = 3; | ||
2058 | } | ||
2059 | |||
2060 | temp_ff.full = rfixed_const(2); | ||
2061 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | ||
2062 | temp_ff.full = rfixed_const(c); | ||
2063 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | ||
2064 | temp_ff.full = rfixed_const(4); | ||
2065 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | ||
2066 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | ||
2067 | mc_latency_mclk.full += k1.full; | ||
2068 | |||
2069 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | ||
2070 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | ||
2071 | |||
2072 | /* | ||
2073 | HW cursor time assuming worst case of full size colour cursor. | ||
2074 | */ | ||
2075 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | ||
2076 | temp_ff.full += trcd_ff.full; | ||
2077 | if (temp_ff.full < tras_ff.full) | ||
2078 | temp_ff.full = tras_ff.full; | ||
2079 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | ||
2080 | |||
2081 | temp_ff.full = rfixed_const(cur_size); | ||
2082 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | ||
2083 | /* | ||
2084 | Find the total latency for the display data. | ||
2085 | */ | ||
2086 | disp_latency_overhead.full = rfixed_const(80); | ||
2087 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | ||
2088 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | ||
2089 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | ||
2090 | |||
2091 | if (mc_latency_mclk.full > mc_latency_sclk.full) | ||
2092 | disp_latency.full = mc_latency_mclk.full; | ||
2093 | else | ||
2094 | disp_latency.full = mc_latency_sclk.full; | ||
2095 | |||
2096 | /* setup Max GRPH_STOP_REQ default value */ | ||
2097 | if (ASIC_IS_RV100(rdev)) | ||
2098 | max_stop_req = 0x5c; | ||
2099 | else | ||
2100 | max_stop_req = 0x7c; | ||
2101 | |||
2102 | if (mode1) { | ||
2103 | /* CRTC1 | ||
2104 | Set GRPH_BUFFER_CNTL register using h/w defined optimal values. | ||
2105 | GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] | ||
2106 | */ | ||
2107 | stop_req = mode1->hdisplay * pixel_bytes1 / 16; | ||
2108 | |||
2109 | if (stop_req > max_stop_req) | ||
2110 | stop_req = max_stop_req; | ||
2111 | |||
2112 | /* | ||
2113 | Find the drain rate of the display buffer. | ||
2114 | */ | ||
2115 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | ||
2116 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | ||
2117 | |||
2118 | /* | ||
2119 | Find the critical point of the display buffer. | ||
2120 | */ | ||
2121 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | ||
2122 | crit_point_ff.full += rfixed_const_half(0); | ||
2123 | |||
2124 | critical_point = rfixed_trunc(crit_point_ff); | ||
2125 | |||
2126 | if (rdev->disp_priority == 2) { | ||
2127 | critical_point = 0; | ||
2128 | } | ||
2129 | |||
2130 | /* | ||
2131 | The critical point should never be above max_stop_req-4. Setting | ||
2132 | GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. | ||
2133 | */ | ||
2134 | if (max_stop_req - critical_point < 4) | ||
2135 | critical_point = 0; | ||
2136 | |||
2137 | if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { | ||
2138 | /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ | ||
2139 | critical_point = 0x10; | ||
2140 | } | ||
2141 | |||
2142 | temp = RREG32(RADEON_GRPH_BUFFER_CNTL); | ||
2143 | temp &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
2144 | temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
2145 | temp &= ~(RADEON_GRPH_START_REQ_MASK); | ||
2146 | if ((rdev->family == CHIP_R350) && | ||
2147 | (stop_req > 0x15)) { | ||
2148 | stop_req -= 0x10; | ||
2149 | } | ||
2150 | temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
2151 | temp |= RADEON_GRPH_BUFFER_SIZE; | ||
2152 | temp &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
2153 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
2154 | RADEON_GRPH_STOP_CNTL); | ||
2155 | /* | ||
2156 | Write the result into the register. | ||
2157 | */ | ||
2158 | WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
2159 | (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
2160 | |||
2161 | #if 0 | ||
2162 | if ((rdev->family == CHIP_RS400) || | ||
2163 | (rdev->family == CHIP_RS480)) { | ||
2164 | /* attempt to program RS400 disp regs correctly ??? */ | ||
2165 | temp = RREG32(RS400_DISP1_REG_CNTL); | ||
2166 | temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | | ||
2167 | RS400_DISP1_STOP_REQ_LEVEL_MASK); | ||
2168 | WREG32(RS400_DISP1_REQ_CNTL1, (temp | | ||
2169 | (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
2170 | (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
2171 | temp = RREG32(RS400_DMIF_MEM_CNTL1); | ||
2172 | temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | | ||
2173 | RS400_DISP1_CRITICAL_POINT_STOP_MASK); | ||
2174 | WREG32(RS400_DMIF_MEM_CNTL1, (temp | | ||
2175 | (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | | ||
2176 | (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); | ||
2177 | } | ||
2178 | #endif | ||
2179 | |||
2180 | DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", | ||
2181 | /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ | ||
2182 | (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); | ||
2183 | } | ||
2184 | |||
2185 | if (mode2) { | ||
2186 | u32 grph2_cntl; | ||
2187 | stop_req = mode2->hdisplay * pixel_bytes2 / 16; | ||
2188 | |||
2189 | if (stop_req > max_stop_req) | ||
2190 | stop_req = max_stop_req; | ||
2191 | |||
2192 | /* | ||
2193 | Find the drain rate of the display buffer. | ||
2194 | */ | ||
2195 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | ||
2196 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | ||
2197 | |||
2198 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | ||
2199 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
2200 | grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
2201 | grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); | ||
2202 | if ((rdev->family == CHIP_R350) && | ||
2203 | (stop_req > 0x15)) { | ||
2204 | stop_req -= 0x10; | ||
2205 | } | ||
2206 | grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
2207 | grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; | ||
2208 | grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
2209 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
2210 | RADEON_GRPH_STOP_CNTL); | ||
2211 | |||
2212 | if ((rdev->family == CHIP_RS100) || | ||
2213 | (rdev->family == CHIP_RS200)) | ||
2214 | critical_point2 = 0; | ||
2215 | else { | ||
2216 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | ||
2217 | temp_ff.full = rfixed_const(temp); | ||
2218 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | ||
2219 | if (sclk_ff.full < temp_ff.full) | ||
2220 | temp_ff.full = sclk_ff.full; | ||
2221 | |||
2222 | read_return_rate.full = temp_ff.full; | ||
2223 | |||
2224 | if (mode1) { | ||
2225 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | ||
2226 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | ||
2227 | } else { | ||
2228 | time_disp1_drop_priority.full = 0; | ||
2229 | } | ||
2230 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | ||
2231 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | ||
2232 | crit_point_ff.full += rfixed_const_half(0); | ||
2233 | |||
2234 | critical_point2 = rfixed_trunc(crit_point_ff); | ||
2235 | |||
2236 | if (rdev->disp_priority == 2) { | ||
2237 | critical_point2 = 0; | ||
2238 | } | ||
2239 | |||
2240 | if (max_stop_req - critical_point2 < 4) | ||
2241 | critical_point2 = 0; | ||
2242 | |||
2243 | } | ||
2244 | |||
2245 | if (critical_point2 == 0 && rdev->family == CHIP_R300) { | ||
2246 | /* some R300 cards have problem with this set to 0 */ | ||
2247 | critical_point2 = 0x10; | ||
2248 | } | ||
2249 | |||
2250 | WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
2251 | (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
2252 | |||
2253 | if ((rdev->family == CHIP_RS400) || | ||
2254 | (rdev->family == CHIP_RS480)) { | ||
2255 | #if 0 | ||
2256 | /* attempt to program RS400 disp2 regs correctly ??? */ | ||
2257 | temp = RREG32(RS400_DISP2_REQ_CNTL1); | ||
2258 | temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | | ||
2259 | RS400_DISP2_STOP_REQ_LEVEL_MASK); | ||
2260 | WREG32(RS400_DISP2_REQ_CNTL1, (temp | | ||
2261 | (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
2262 | (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
2263 | temp = RREG32(RS400_DISP2_REQ_CNTL2); | ||
2264 | temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | | ||
2265 | RS400_DISP2_CRITICAL_POINT_STOP_MASK); | ||
2266 | WREG32(RS400_DISP2_REQ_CNTL2, (temp | | ||
2267 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | | ||
2268 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); | ||
2269 | #endif | ||
2270 | WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); | ||
2271 | WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); | ||
2272 | WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); | ||
2273 | WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); | ||
2274 | } | ||
2275 | |||
2276 | DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", | ||
2277 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | ||
2278 | } | ||
2279 | } | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index e2ed5bc08170..9c8d41534a5d 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_drm.h" | ||
34 | #include "radeon_share.h" | ||
33 | 35 | ||
34 | /* r300,r350,rv350,rv370,rv380 depends on : */ | 36 | /* r300,r350,rv350,rv370,rv380 depends on : */ |
35 | void r100_hdp_reset(struct radeon_device *rdev); | 37 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev); | |||
44 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | 46 | int r100_cs_packet_parse(struct radeon_cs_parser *p, |
45 | struct radeon_cs_packet *pkt, | 47 | struct radeon_cs_packet *pkt, |
46 | unsigned idx); | 48 | unsigned idx); |
49 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); | ||
47 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | 50 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
48 | struct radeon_cs_reloc **cs_reloc); | 51 | struct radeon_cs_reloc **cs_reloc); |
49 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | 52 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
@@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
150 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 153 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
151 | return -EINVAL; | 154 | return -EINVAL; |
152 | } | 155 | } |
153 | addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; | 156 | addr = (lower_32_bits(addr) >> 8) | |
154 | writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); | 157 | ((upper_32_bits(addr) & 0xff) << 24) | |
158 | 0xc; | ||
159 | /* on x86 we want this to be CPU endian, on powerpc | ||
160 | * on powerpc without HW swappers, it'll get swapped on way | ||
161 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | ||
162 | writel(addr, ((void __iomem *)ptr) + (i * 4)); | ||
155 | return 0; | 163 | return 0; |
156 | } | 164 | } |
157 | 165 | ||
@@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev) | |||
579 | } else { | 587 | } else { |
580 | rdev->mc.vram_width = 64; | 588 | rdev->mc.vram_width = 64; |
581 | } | 589 | } |
582 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
583 | 590 | ||
584 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 591 | r100_vram_init_sizes(rdev); |
585 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
586 | } | 592 | } |
587 | 593 | ||
588 | 594 | ||
@@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track) | |||
970 | 976 | ||
971 | static const unsigned r300_reg_safe_bm[159] = { | 977 | static const unsigned r300_reg_safe_bm[159] = { |
972 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 978 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
973 | 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, | 979 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
974 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 980 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
975 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 981 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
976 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 982 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
@@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1019 | struct radeon_cs_reloc *reloc; | 1025 | struct radeon_cs_reloc *reloc; |
1020 | struct r300_cs_track *track; | 1026 | struct r300_cs_track *track; |
1021 | volatile uint32_t *ib; | 1027 | volatile uint32_t *ib; |
1022 | uint32_t tmp; | 1028 | uint32_t tmp, tile_flags = 0; |
1023 | unsigned i; | 1029 | unsigned i; |
1024 | int r; | 1030 | int r; |
1025 | 1031 | ||
@@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1027 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 1033 | ib_chunk = &p->chunks[p->chunk_ib_idx]; |
1028 | track = (struct r300_cs_track*)p->track; | 1034 | track = (struct r300_cs_track*)p->track; |
1029 | switch(reg) { | 1035 | switch(reg) { |
1036 | case AVIVO_D1MODE_VLINE_START_END: | ||
1037 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
1038 | r = r100_cs_packet_parse_vline(p); | ||
1039 | if (r) { | ||
1040 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1041 | idx, reg); | ||
1042 | r100_cs_dump_packet(p, pkt); | ||
1043 | return r; | ||
1044 | } | ||
1045 | break; | ||
1030 | case RADEON_DST_PITCH_OFFSET: | 1046 | case RADEON_DST_PITCH_OFFSET: |
1031 | case RADEON_SRC_PITCH_OFFSET: | 1047 | case RADEON_SRC_PITCH_OFFSET: |
1032 | r = r100_cs_packet_next_reloc(p, &reloc); | 1048 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1038 | } | 1054 | } |
1039 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 1055 | tmp = ib_chunk->kdata[idx] & 0x003fffff; |
1040 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 1056 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
1041 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | 1057 | |
1058 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1059 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
1060 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
1061 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
1062 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
1063 | r100_cs_dump_packet(p, pkt); | ||
1064 | return -EINVAL; | ||
1065 | } | ||
1066 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
1067 | } | ||
1068 | tmp |= tile_flags; | ||
1069 | ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | ||
1042 | break; | 1070 | break; |
1043 | case R300_RB3D_COLOROFFSET0: | 1071 | case R300_RB3D_COLOROFFSET0: |
1044 | case R300_RB3D_COLOROFFSET1: | 1072 | case R300_RB3D_COLOROFFSET1: |
@@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1127 | /* RB3D_COLORPITCH1 */ | 1155 | /* RB3D_COLORPITCH1 */ |
1128 | /* RB3D_COLORPITCH2 */ | 1156 | /* RB3D_COLORPITCH2 */ |
1129 | /* RB3D_COLORPITCH3 */ | 1157 | /* RB3D_COLORPITCH3 */ |
1158 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1159 | if (r) { | ||
1160 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1161 | idx, reg); | ||
1162 | r100_cs_dump_packet(p, pkt); | ||
1163 | return r; | ||
1164 | } | ||
1165 | |||
1166 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1167 | tile_flags |= R300_COLOR_TILE_ENABLE; | ||
1168 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1169 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | ||
1170 | |||
1171 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1172 | tmp |= tile_flags; | ||
1173 | ib[idx] = tmp; | ||
1174 | |||
1130 | i = (reg - 0x4E38) >> 2; | 1175 | i = (reg - 0x4E38) >> 2; |
1131 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; | 1176 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; |
1132 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { | 1177 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { |
@@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1182 | break; | 1227 | break; |
1183 | case 0x4F24: | 1228 | case 0x4F24: |
1184 | /* ZB_DEPTHPITCH */ | 1229 | /* ZB_DEPTHPITCH */ |
1230 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1231 | if (r) { | ||
1232 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1233 | idx, reg); | ||
1234 | r100_cs_dump_packet(p, pkt); | ||
1235 | return r; | ||
1236 | } | ||
1237 | |||
1238 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1239 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; | ||
1240 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1241 | tile_flags |= R300_DEPTHMICROTILE_TILED;; | ||
1242 | |||
1243 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1244 | tmp |= tile_flags; | ||
1245 | ib[idx] = tmp; | ||
1246 | |||
1185 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; | 1247 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; |
1186 | break; | 1248 | break; |
1187 | case 0x4104: | 1249 | case 0x4104: |
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 70f48609515e..4b7afef35a65 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
@@ -27,7 +27,9 @@ | |||
27 | #ifndef _R300_REG_H_ | 27 | #ifndef _R300_REG_H_ |
28 | #define _R300_REG_H_ | 28 | #define _R300_REG_H_ |
29 | 29 | ||
30 | 30 | #define R300_SURF_TILE_MACRO (1<<16) | |
31 | #define R300_SURF_TILE_MICRO (2<<16) | ||
32 | #define R300_SURF_TILE_BOTH (3<<16) | ||
31 | 33 | ||
32 | 34 | ||
33 | #define R300_MC_INIT_MISC_LAT_TIMER 0x180 | 35 | #define R300_MC_INIT_MISC_LAT_TIMER 0x180 |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 9070a1c2ce23..036691b38cb7 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -445,6 +445,7 @@ | |||
445 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 | 445 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 |
446 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) | 446 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) |
447 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C | 447 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C |
448 | #define AVIVO_D1MODE_VLINE_START_END 0x6538 | ||
448 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 | 449 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 |
449 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 | 450 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 |
450 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 | 451 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 |
@@ -496,6 +497,7 @@ | |||
496 | #define AVIVO_D2CUR_SIZE 0x6c10 | 497 | #define AVIVO_D2CUR_SIZE 0x6c10 |
497 | #define AVIVO_D2CUR_POSITION 0x6c14 | 498 | #define AVIVO_D2CUR_POSITION 0x6c14 |
498 | 499 | ||
500 | #define AVIVO_D2MODE_VLINE_START_END 0x6d38 | ||
499 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 | 501 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 |
500 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 | 502 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 |
501 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 | 503 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 570a244bd88b..09fb0b6ec7dd 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_share.h" | ||
31 | 32 | ||
32 | /* r520,rv530,rv560,rv570,r580 depends on : */ | 33 | /* r520,rv530,rv560,rv570,r580 depends on : */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); | 34 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev) | |||
94 | "programming pipes. Bad things might happen.\n"); | 95 | "programming pipes. Bad things might happen.\n"); |
95 | } | 96 | } |
96 | /* Write VRAM size in case we are limiting it */ | 97 | /* Write VRAM size in case we are limiting it */ |
97 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 98 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
98 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 99 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
99 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); | 100 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); |
100 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); | 101 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); |
101 | WREG32_MC(R520_MC_FB_LOCATION, tmp); | 102 | WREG32_MC(R520_MC_FB_LOCATION, tmp); |
@@ -226,9 +227,20 @@ static void r520_vram_get_type(struct radeon_device *rdev) | |||
226 | 227 | ||
227 | void r520_vram_info(struct radeon_device *rdev) | 228 | void r520_vram_info(struct radeon_device *rdev) |
228 | { | 229 | { |
230 | fixed20_12 a; | ||
231 | |||
229 | r520_vram_get_type(rdev); | 232 | r520_vram_get_type(rdev); |
230 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
231 | 233 | ||
232 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 234 | r100_vram_init_sizes(rdev); |
233 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 235 | /* FIXME: we should enforce default clock in case GPU is not in |
236 | * default setup | ||
237 | */ | ||
238 | a.full = rfixed_const(100); | ||
239 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
240 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
241 | } | ||
242 | |||
243 | void r520_bandwidth_update(struct radeon_device *rdev) | ||
244 | { | ||
245 | rv515_bandwidth_avivo_update(rdev); | ||
234 | } | 246 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c45559fc97fd..538cd907df69 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
67 | "programming pipes. Bad things might happen.\n"); | 67 | "programming pipes. Bad things might happen.\n"); |
68 | } | 68 | } |
69 | 69 | ||
70 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 70 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
71 | tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); | 71 | tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); |
72 | tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); | 72 | tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); |
73 | WREG32(R600_MC_VM_FB_LOCATION, tmp); | 73 | WREG32(R600_MC_VM_FB_LOCATION, tmp); |
@@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev) | |||
140 | void r600_vram_info(struct radeon_device *rdev) | 140 | void r600_vram_info(struct radeon_device *rdev) |
141 | { | 141 | { |
142 | r600_vram_get_type(rdev); | 142 | r600_vram_get_type(rdev); |
143 | rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); | 143 | rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE); |
144 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
144 | 145 | ||
145 | /* Could aper size report 0 ? */ | 146 | /* Could aper size report 0 ? */ |
146 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 147 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d61f2fc61df5..b1d945b8ed6c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -64,6 +64,7 @@ extern int radeon_agpmode; | |||
64 | extern int radeon_vram_limit; | 64 | extern int radeon_vram_limit; |
65 | extern int radeon_gart_size; | 65 | extern int radeon_gart_size; |
66 | extern int radeon_benchmarking; | 66 | extern int radeon_benchmarking; |
67 | extern int radeon_testing; | ||
67 | extern int radeon_connector_table; | 68 | extern int radeon_connector_table; |
68 | 69 | ||
69 | /* | 70 | /* |
@@ -113,6 +114,7 @@ enum radeon_family { | |||
113 | CHIP_RV770, | 114 | CHIP_RV770, |
114 | CHIP_RV730, | 115 | CHIP_RV730, |
115 | CHIP_RV710, | 116 | CHIP_RV710, |
117 | CHIP_RS880, | ||
116 | CHIP_LAST, | 118 | CHIP_LAST, |
117 | }; | 119 | }; |
118 | 120 | ||
@@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev); | |||
201 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); | 203 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); |
202 | void radeon_fence_unref(struct radeon_fence **fence); | 204 | void radeon_fence_unref(struct radeon_fence **fence); |
203 | 205 | ||
206 | /* | ||
207 | * Tiling registers | ||
208 | */ | ||
209 | struct radeon_surface_reg { | ||
210 | struct radeon_object *robj; | ||
211 | }; | ||
212 | |||
213 | #define RADEON_GEM_MAX_SURFACES 8 | ||
204 | 214 | ||
205 | /* | 215 | /* |
206 | * Radeon buffer. | 216 | * Radeon buffer. |
@@ -213,6 +223,7 @@ struct radeon_object_list { | |||
213 | uint64_t gpu_offset; | 223 | uint64_t gpu_offset; |
214 | unsigned rdomain; | 224 | unsigned rdomain; |
215 | unsigned wdomain; | 225 | unsigned wdomain; |
226 | uint32_t tiling_flags; | ||
216 | }; | 227 | }; |
217 | 228 | ||
218 | int radeon_object_init(struct radeon_device *rdev); | 229 | int radeon_object_init(struct radeon_device *rdev); |
@@ -242,8 +253,15 @@ void radeon_object_list_clean(struct list_head *head); | |||
242 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | 253 | int radeon_object_fbdev_mmap(struct radeon_object *robj, |
243 | struct vm_area_struct *vma); | 254 | struct vm_area_struct *vma); |
244 | unsigned long radeon_object_size(struct radeon_object *robj); | 255 | unsigned long radeon_object_size(struct radeon_object *robj); |
245 | 256 | void radeon_object_clear_surface_reg(struct radeon_object *robj); | |
246 | 257 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | |
258 | bool force_drop); | ||
259 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
260 | uint32_t tiling_flags, uint32_t pitch); | ||
261 | void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); | ||
262 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
263 | struct ttm_mem_reg *mem); | ||
264 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
247 | /* | 265 | /* |
248 | * GEM objects. | 266 | * GEM objects. |
249 | */ | 267 | */ |
@@ -315,8 +333,11 @@ struct radeon_mc { | |||
315 | unsigned gtt_location; | 333 | unsigned gtt_location; |
316 | unsigned gtt_size; | 334 | unsigned gtt_size; |
317 | unsigned vram_location; | 335 | unsigned vram_location; |
318 | unsigned vram_size; | 336 | /* for some chips with <= 32MB we need to lie |
337 | * about vram size near mc fb location */ | ||
338 | unsigned mc_vram_size; | ||
319 | unsigned vram_width; | 339 | unsigned vram_width; |
340 | unsigned real_vram_size; | ||
320 | int vram_mtrr; | 341 | int vram_mtrr; |
321 | bool vram_is_ddr; | 342 | bool vram_is_ddr; |
322 | }; | 343 | }; |
@@ -474,6 +495,39 @@ struct radeon_wb { | |||
474 | uint64_t gpu_addr; | 495 | uint64_t gpu_addr; |
475 | }; | 496 | }; |
476 | 497 | ||
498 | /** | ||
499 | * struct radeon_pm - power management datas | ||
500 | * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) | ||
501 | * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) | ||
502 | * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) | ||
503 | * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) | ||
504 | * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) | ||
505 | * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) | ||
506 | * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) | ||
507 | * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) | ||
508 | * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) | ||
509 | * @sclk: GPU clock Mhz (core bandwith depends of this clock) | ||
510 | * @needed_bandwidth: current bandwidth needs | ||
511 | * | ||
512 | * It keeps track of various data needed to take powermanagement decision. | ||
513 | * Bandwith need is used to determine minimun clock of the GPU and memory. | ||
514 | * Equation between gpu/memory clock and available bandwidth is hw dependent | ||
515 | * (type of memory, bus size, efficiency, ...) | ||
516 | */ | ||
517 | struct radeon_pm { | ||
518 | fixed20_12 max_bandwidth; | ||
519 | fixed20_12 igp_sideport_mclk; | ||
520 | fixed20_12 igp_system_mclk; | ||
521 | fixed20_12 igp_ht_link_clk; | ||
522 | fixed20_12 igp_ht_link_width; | ||
523 | fixed20_12 k8_bandwidth; | ||
524 | fixed20_12 sideport_bandwidth; | ||
525 | fixed20_12 ht_bandwidth; | ||
526 | fixed20_12 core_bandwidth; | ||
527 | fixed20_12 sclk; | ||
528 | fixed20_12 needed_bandwidth; | ||
529 | }; | ||
530 | |||
477 | 531 | ||
478 | /* | 532 | /* |
479 | * Benchmarking | 533 | * Benchmarking |
@@ -482,6 +536,12 @@ void radeon_benchmark(struct radeon_device *rdev); | |||
482 | 536 | ||
483 | 537 | ||
484 | /* | 538 | /* |
539 | * Testing | ||
540 | */ | ||
541 | void radeon_test_moves(struct radeon_device *rdev); | ||
542 | |||
543 | |||
544 | /* | ||
485 | * Debugfs | 545 | * Debugfs |
486 | */ | 546 | */ |
487 | int radeon_debugfs_add_files(struct radeon_device *rdev, | 547 | int radeon_debugfs_add_files(struct radeon_device *rdev, |
@@ -535,6 +595,11 @@ struct radeon_asic { | |||
535 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); | 595 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); |
536 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); | 596 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); |
537 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); | 597 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); |
598 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, | ||
599 | uint32_t tiling_flags, uint32_t pitch, | ||
600 | uint32_t offset, uint32_t obj_size); | ||
601 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | ||
602 | void (*bandwidth_update)(struct radeon_device *rdev); | ||
538 | }; | 603 | }; |
539 | 604 | ||
540 | union radeon_asic_config { | 605 | union radeon_asic_config { |
@@ -566,6 +631,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
566 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | 631 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
567 | struct drm_file *filp); | 632 | struct drm_file *filp); |
568 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 633 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
634 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | ||
635 | struct drm_file *filp); | ||
636 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | ||
637 | struct drm_file *filp); | ||
569 | 638 | ||
570 | 639 | ||
571 | /* | 640 | /* |
@@ -594,8 +663,8 @@ struct radeon_device { | |||
594 | struct radeon_object *fbdev_robj; | 663 | struct radeon_object *fbdev_robj; |
595 | struct radeon_framebuffer *fbdev_rfb; | 664 | struct radeon_framebuffer *fbdev_rfb; |
596 | /* Register mmio */ | 665 | /* Register mmio */ |
597 | unsigned long rmmio_base; | 666 | resource_size_t rmmio_base; |
598 | unsigned long rmmio_size; | 667 | resource_size_t rmmio_size; |
599 | void *rmmio; | 668 | void *rmmio; |
600 | radeon_rreg_t mm_rreg; | 669 | radeon_rreg_t mm_rreg; |
601 | radeon_wreg_t mm_wreg; | 670 | radeon_wreg_t mm_wreg; |
@@ -619,11 +688,14 @@ struct radeon_device { | |||
619 | struct radeon_irq irq; | 688 | struct radeon_irq irq; |
620 | struct radeon_asic *asic; | 689 | struct radeon_asic *asic; |
621 | struct radeon_gem gem; | 690 | struct radeon_gem gem; |
691 | struct radeon_pm pm; | ||
622 | struct mutex cs_mutex; | 692 | struct mutex cs_mutex; |
623 | struct radeon_wb wb; | 693 | struct radeon_wb wb; |
624 | bool gpu_lockup; | 694 | bool gpu_lockup; |
625 | bool shutdown; | 695 | bool shutdown; |
626 | bool suspend; | 696 | bool suspend; |
697 | bool need_dma32; | ||
698 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | ||
627 | }; | 699 | }; |
628 | 700 | ||
629 | int radeon_device_init(struct radeon_device *rdev, | 701 | int radeon_device_init(struct radeon_device *rdev, |
@@ -670,6 +742,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
670 | /* | 742 | /* |
671 | * ASICs helpers. | 743 | * ASICs helpers. |
672 | */ | 744 | */ |
745 | #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ | ||
746 | (rdev->pdev->device == 0x5969)) | ||
673 | #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ | 747 | #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ |
674 | (rdev->family == CHIP_RV200) || \ | 748 | (rdev->family == CHIP_RV200) || \ |
675 | (rdev->family == CHIP_RS100) || \ | 749 | (rdev->family == CHIP_RS100) || \ |
@@ -796,5 +870,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
796 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 870 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
797 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) | 871 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
798 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) | 872 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
873 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) | ||
874 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) | ||
875 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) | ||
799 | 876 | ||
800 | #endif | 877 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e2e567395df8..9a75876e0c3b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -71,6 +71,11 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
71 | uint64_t dst_offset, | 71 | uint64_t dst_offset, |
72 | unsigned num_pages, | 72 | unsigned num_pages, |
73 | struct radeon_fence *fence); | 73 | struct radeon_fence *fence); |
74 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | ||
75 | uint32_t tiling_flags, uint32_t pitch, | ||
76 | uint32_t offset, uint32_t obj_size); | ||
77 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | ||
78 | void r100_bandwidth_update(struct radeon_device *rdev); | ||
74 | 79 | ||
75 | static struct radeon_asic r100_asic = { | 80 | static struct radeon_asic r100_asic = { |
76 | .init = &r100_init, | 81 | .init = &r100_init, |
@@ -100,6 +105,9 @@ static struct radeon_asic r100_asic = { | |||
100 | .set_memory_clock = NULL, | 105 | .set_memory_clock = NULL, |
101 | .set_pcie_lanes = NULL, | 106 | .set_pcie_lanes = NULL, |
102 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 107 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
108 | .set_surface_reg = r100_set_surface_reg, | ||
109 | .clear_surface_reg = r100_clear_surface_reg, | ||
110 | .bandwidth_update = &r100_bandwidth_update, | ||
103 | }; | 111 | }; |
104 | 112 | ||
105 | 113 | ||
@@ -128,6 +136,7 @@ int r300_copy_dma(struct radeon_device *rdev, | |||
128 | uint64_t dst_offset, | 136 | uint64_t dst_offset, |
129 | unsigned num_pages, | 137 | unsigned num_pages, |
130 | struct radeon_fence *fence); | 138 | struct radeon_fence *fence); |
139 | |||
131 | static struct radeon_asic r300_asic = { | 140 | static struct radeon_asic r300_asic = { |
132 | .init = &r300_init, | 141 | .init = &r300_init, |
133 | .errata = &r300_errata, | 142 | .errata = &r300_errata, |
@@ -156,6 +165,9 @@ static struct radeon_asic r300_asic = { | |||
156 | .set_memory_clock = NULL, | 165 | .set_memory_clock = NULL, |
157 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 166 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 167 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
168 | .set_surface_reg = r100_set_surface_reg, | ||
169 | .clear_surface_reg = r100_clear_surface_reg, | ||
170 | .bandwidth_update = &r100_bandwidth_update, | ||
159 | }; | 171 | }; |
160 | 172 | ||
161 | /* | 173 | /* |
@@ -193,6 +205,9 @@ static struct radeon_asic r420_asic = { | |||
193 | .set_memory_clock = &radeon_atom_set_memory_clock, | 205 | .set_memory_clock = &radeon_atom_set_memory_clock, |
194 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 206 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
195 | .set_clock_gating = &radeon_atom_set_clock_gating, | 207 | .set_clock_gating = &radeon_atom_set_clock_gating, |
208 | .set_surface_reg = r100_set_surface_reg, | ||
209 | .clear_surface_reg = r100_clear_surface_reg, | ||
210 | .bandwidth_update = &r100_bandwidth_update, | ||
196 | }; | 211 | }; |
197 | 212 | ||
198 | 213 | ||
@@ -237,6 +252,9 @@ static struct radeon_asic rs400_asic = { | |||
237 | .set_memory_clock = NULL, | 252 | .set_memory_clock = NULL, |
238 | .set_pcie_lanes = NULL, | 253 | .set_pcie_lanes = NULL, |
239 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 254 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
255 | .set_surface_reg = r100_set_surface_reg, | ||
256 | .clear_surface_reg = r100_clear_surface_reg, | ||
257 | .bandwidth_update = &r100_bandwidth_update, | ||
240 | }; | 258 | }; |
241 | 259 | ||
242 | 260 | ||
@@ -254,6 +272,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev); | |||
254 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 272 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
255 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 273 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
256 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 274 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
275 | void rs600_bandwidth_update(struct radeon_device *rdev); | ||
257 | static struct radeon_asic rs600_asic = { | 276 | static struct radeon_asic rs600_asic = { |
258 | .init = &r300_init, | 277 | .init = &r300_init, |
259 | .errata = &rs600_errata, | 278 | .errata = &rs600_errata, |
@@ -282,6 +301,7 @@ static struct radeon_asic rs600_asic = { | |||
282 | .set_memory_clock = &radeon_atom_set_memory_clock, | 301 | .set_memory_clock = &radeon_atom_set_memory_clock, |
283 | .set_pcie_lanes = NULL, | 302 | .set_pcie_lanes = NULL, |
284 | .set_clock_gating = &radeon_atom_set_clock_gating, | 303 | .set_clock_gating = &radeon_atom_set_clock_gating, |
304 | .bandwidth_update = &rs600_bandwidth_update, | ||
285 | }; | 305 | }; |
286 | 306 | ||
287 | 307 | ||
@@ -294,6 +314,7 @@ int rs690_mc_init(struct radeon_device *rdev); | |||
294 | void rs690_mc_fini(struct radeon_device *rdev); | 314 | void rs690_mc_fini(struct radeon_device *rdev); |
295 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 315 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
296 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 316 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
317 | void rs690_bandwidth_update(struct radeon_device *rdev); | ||
297 | static struct radeon_asic rs690_asic = { | 318 | static struct radeon_asic rs690_asic = { |
298 | .init = &r300_init, | 319 | .init = &r300_init, |
299 | .errata = &rs690_errata, | 320 | .errata = &rs690_errata, |
@@ -322,6 +343,9 @@ static struct radeon_asic rs690_asic = { | |||
322 | .set_memory_clock = &radeon_atom_set_memory_clock, | 343 | .set_memory_clock = &radeon_atom_set_memory_clock, |
323 | .set_pcie_lanes = NULL, | 344 | .set_pcie_lanes = NULL, |
324 | .set_clock_gating = &radeon_atom_set_clock_gating, | 345 | .set_clock_gating = &radeon_atom_set_clock_gating, |
346 | .set_surface_reg = r100_set_surface_reg, | ||
347 | .clear_surface_reg = r100_clear_surface_reg, | ||
348 | .bandwidth_update = &rs690_bandwidth_update, | ||
325 | }; | 349 | }; |
326 | 350 | ||
327 | 351 | ||
@@ -339,6 +363,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
339 | void rv515_ring_start(struct radeon_device *rdev); | 363 | void rv515_ring_start(struct radeon_device *rdev); |
340 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | 364 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
341 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 365 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
366 | void rv515_bandwidth_update(struct radeon_device *rdev); | ||
342 | static struct radeon_asic rv515_asic = { | 367 | static struct radeon_asic rv515_asic = { |
343 | .init = &rv515_init, | 368 | .init = &rv515_init, |
344 | .errata = &rv515_errata, | 369 | .errata = &rv515_errata, |
@@ -367,6 +392,9 @@ static struct radeon_asic rv515_asic = { | |||
367 | .set_memory_clock = &radeon_atom_set_memory_clock, | 392 | .set_memory_clock = &radeon_atom_set_memory_clock, |
368 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 393 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
369 | .set_clock_gating = &radeon_atom_set_clock_gating, | 394 | .set_clock_gating = &radeon_atom_set_clock_gating, |
395 | .set_surface_reg = r100_set_surface_reg, | ||
396 | .clear_surface_reg = r100_clear_surface_reg, | ||
397 | .bandwidth_update = &rv515_bandwidth_update, | ||
370 | }; | 398 | }; |
371 | 399 | ||
372 | 400 | ||
@@ -377,6 +405,7 @@ void r520_errata(struct radeon_device *rdev); | |||
377 | void r520_vram_info(struct radeon_device *rdev); | 405 | void r520_vram_info(struct radeon_device *rdev); |
378 | int r520_mc_init(struct radeon_device *rdev); | 406 | int r520_mc_init(struct radeon_device *rdev); |
379 | void r520_mc_fini(struct radeon_device *rdev); | 407 | void r520_mc_fini(struct radeon_device *rdev); |
408 | void r520_bandwidth_update(struct radeon_device *rdev); | ||
380 | static struct radeon_asic r520_asic = { | 409 | static struct radeon_asic r520_asic = { |
381 | .init = &rv515_init, | 410 | .init = &rv515_init, |
382 | .errata = &r520_errata, | 411 | .errata = &r520_errata, |
@@ -405,6 +434,9 @@ static struct radeon_asic r520_asic = { | |||
405 | .set_memory_clock = &radeon_atom_set_memory_clock, | 434 | .set_memory_clock = &radeon_atom_set_memory_clock, |
406 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 435 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
407 | .set_clock_gating = &radeon_atom_set_clock_gating, | 436 | .set_clock_gating = &radeon_atom_set_clock_gating, |
437 | .set_surface_reg = r100_set_surface_reg, | ||
438 | .clear_surface_reg = r100_clear_surface_reg, | ||
439 | .bandwidth_update = &r520_bandwidth_update, | ||
408 | }; | 440 | }; |
409 | 441 | ||
410 | /* | 442 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1f5a1a490984..fcfe5c02d744 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device | |||
103 | static bool radeon_atom_apply_quirks(struct drm_device *dev, | 103 | static bool radeon_atom_apply_quirks(struct drm_device *dev, |
104 | uint32_t supported_device, | 104 | uint32_t supported_device, |
105 | int *connector_type, | 105 | int *connector_type, |
106 | struct radeon_i2c_bus_rec *i2c_bus) | 106 | struct radeon_i2c_bus_rec *i2c_bus, |
107 | uint8_t *line_mux) | ||
107 | { | 108 | { |
108 | 109 | ||
109 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ | 110 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
@@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
127 | if ((dev->pdev->device == 0x5653) && | 128 | if ((dev->pdev->device == 0x5653) && |
128 | (dev->pdev->subsystem_vendor == 0x1462) && | 129 | (dev->pdev->subsystem_vendor == 0x1462) && |
129 | (dev->pdev->subsystem_device == 0x0291)) { | 130 | (dev->pdev->subsystem_device == 0x0291)) { |
130 | if (*connector_type == DRM_MODE_CONNECTOR_LVDS) | 131 | if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { |
131 | i2c_bus->valid = false; | 132 | i2c_bus->valid = false; |
133 | *line_mux = 53; | ||
134 | } | ||
132 | } | 135 | } |
133 | 136 | ||
134 | /* Funky macbooks */ | 137 | /* Funky macbooks */ |
@@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
526 | 529 | ||
527 | if (!radeon_atom_apply_quirks | 530 | if (!radeon_atom_apply_quirks |
528 | (dev, (1 << i), &bios_connectors[i].connector_type, | 531 | (dev, (1 << i), &bios_connectors[i].connector_type, |
529 | &bios_connectors[i].ddc_bus)) | 532 | &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) |
530 | continue; | 533 | continue; |
531 | 534 | ||
532 | bios_connectors[i].valid = true; | 535 | bios_connectors[i].valid = true; |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index c44403a2ca76..2e938f7496fb 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
63 | if (r) { | 63 | if (r) { |
64 | goto out_cleanup; | 64 | goto out_cleanup; |
65 | } | 65 | } |
66 | r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence); | 66 | r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); |
67 | if (r) { | 67 | if (r) { |
68 | goto out_cleanup; | 68 | goto out_cleanup; |
69 | } | 69 | } |
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
88 | if (r) { | 88 | if (r) { |
89 | goto out_cleanup; | 89 | goto out_cleanup; |
90 | } | 90 | } |
91 | r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence); | 91 | r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); |
92 | if (r) { | 92 | if (r) { |
93 | goto out_cleanup; | 93 | goto out_cleanup; |
94 | } | 94 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index b843f9bdfb14..a169067efc4e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
127 | sizeof(struct drm_radeon_cs_chunk))) { | 127 | sizeof(struct drm_radeon_cs_chunk))) { |
128 | return -EFAULT; | 128 | return -EFAULT; |
129 | } | 129 | } |
130 | p->chunks[i].length_dw = user_chunk.length_dw; | ||
131 | p->chunks[i].kdata = NULL; | ||
130 | p->chunks[i].chunk_id = user_chunk.chunk_id; | 132 | p->chunks[i].chunk_id = user_chunk.chunk_id; |
133 | |||
131 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { | 134 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { |
132 | p->chunk_relocs_idx = i; | 135 | p->chunk_relocs_idx = i; |
133 | } | 136 | } |
134 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { | 137 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { |
135 | p->chunk_ib_idx = i; | 138 | p->chunk_ib_idx = i; |
139 | /* zero length IB isn't useful */ | ||
140 | if (p->chunks[i].length_dw == 0) | ||
141 | return -EINVAL; | ||
136 | } | 142 | } |
143 | |||
137 | p->chunks[i].length_dw = user_chunk.length_dw; | 144 | p->chunks[i].length_dw = user_chunk.length_dw; |
138 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; | 145 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; |
139 | 146 | ||
140 | p->chunks[i].kdata = NULL; | ||
141 | size = p->chunks[i].length_dw * sizeof(uint32_t); | 147 | size = p->chunks[i].length_dw * sizeof(uint32_t); |
142 | p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); | 148 | p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); |
143 | if (p->chunks[i].kdata == NULL) { | 149 | if (p->chunks[i].kdata == NULL) { |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5232441f119b..b13c79e38bc0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | |||
111 | 111 | ||
112 | if (ASIC_IS_AVIVO(rdev)) | 112 | if (ASIC_IS_AVIVO(rdev)) |
113 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 113 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); |
114 | else | 114 | else { |
115 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; | ||
115 | /* offset is from DISP(2)_BASE_ADDRESS */ | 116 | /* offset is from DISP(2)_BASE_ADDRESS */ |
116 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr); | 117 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); |
118 | } | ||
117 | } | 119 | } |
118 | 120 | ||
119 | int radeon_crtc_cursor_set(struct drm_crtc *crtc, | 121 | int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
@@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
245 | (RADEON_CUR_LOCK | 247 | (RADEON_CUR_LOCK |
246 | | ((xorigin ? 0 : x) << 16) | 248 | | ((xorigin ? 0 : x) << 16) |
247 | | (yorigin ? 0 : y))); | 249 | | (yorigin ? 0 : y))); |
250 | /* offset is from DISP(2)_BASE_ADDRESS */ | ||
251 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | ||
252 | (yorigin * 256))); | ||
248 | } | 253 | } |
249 | radeon_lock_cursor(crtc, false); | 254 | radeon_lock_cursor(crtc, false); |
250 | 255 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f97563db4e59..a162ade74b7f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -48,6 +48,8 @@ static void radeon_surface_init(struct radeon_device *rdev) | |||
48 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), | 48 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), |
49 | 0); | 49 | 0); |
50 | } | 50 | } |
51 | /* enable surfaces */ | ||
52 | WREG32(RADEON_SURFACE_CNTL, 0); | ||
51 | } | 53 | } |
52 | } | 54 | } |
53 | 55 | ||
@@ -119,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
119 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { | 121 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { |
120 | /* vram location was already setup try to put gtt after | 122 | /* vram location was already setup try to put gtt after |
121 | * if it fits */ | 123 | * if it fits */ |
122 | tmp = rdev->mc.vram_location + rdev->mc.vram_size; | 124 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; |
123 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); | 125 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
124 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { | 126 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
125 | rdev->mc.gtt_location = tmp; | 127 | rdev->mc.gtt_location = tmp; |
@@ -134,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
134 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { | 136 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { |
135 | /* gtt location was already setup try to put vram before | 137 | /* gtt location was already setup try to put vram before |
136 | * if it fits */ | 138 | * if it fits */ |
137 | if (rdev->mc.vram_size < rdev->mc.gtt_location) { | 139 | if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { |
138 | rdev->mc.vram_location = 0; | 140 | rdev->mc.vram_location = 0; |
139 | } else { | 141 | } else { |
140 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; | 142 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; |
141 | tmp += (rdev->mc.vram_size - 1); | 143 | tmp += (rdev->mc.mc_vram_size - 1); |
142 | tmp &= ~(rdev->mc.vram_size - 1); | 144 | tmp &= ~(rdev->mc.mc_vram_size - 1); |
143 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { | 145 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { |
144 | rdev->mc.vram_location = tmp; | 146 | rdev->mc.vram_location = tmp; |
145 | } else { | 147 | } else { |
146 | printk(KERN_ERR "[drm] vram too big to fit " | 148 | printk(KERN_ERR "[drm] vram too big to fit " |
@@ -150,12 +152,14 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
150 | } | 152 | } |
151 | } else { | 153 | } else { |
152 | rdev->mc.vram_location = 0; | 154 | rdev->mc.vram_location = 0; |
153 | rdev->mc.gtt_location = rdev->mc.vram_size; | 155 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
154 | } | 156 | } |
155 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); | 157 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); |
156 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", | 158 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
157 | rdev->mc.vram_location, | 159 | rdev->mc.vram_location, |
158 | rdev->mc.vram_location + rdev->mc.vram_size - 1); | 160 | rdev->mc.vram_location + rdev->mc.mc_vram_size - 1); |
161 | if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size) | ||
162 | DRM_INFO("radeon: VRAM less than aperture workaround enabled\n"); | ||
159 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); | 163 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); |
160 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", | 164 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
161 | rdev->mc.gtt_location, | 165 | rdev->mc.gtt_location, |
@@ -450,6 +454,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
450 | uint32_t flags) | 454 | uint32_t flags) |
451 | { | 455 | { |
452 | int r, ret; | 456 | int r, ret; |
457 | int dma_bits; | ||
453 | 458 | ||
454 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | 459 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
455 | rdev->shutdown = false; | 460 | rdev->shutdown = false; |
@@ -492,8 +497,20 @@ int radeon_device_init(struct radeon_device *rdev, | |||
492 | return r; | 497 | return r; |
493 | } | 498 | } |
494 | 499 | ||
495 | /* Report DMA addressing limitation */ | 500 | /* set DMA mask + need_dma32 flags. |
496 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); | 501 | * PCIE - can handle 40-bits. |
502 | * IGP - can handle 40-bits (in theory) | ||
503 | * AGP - generally dma32 is safest | ||
504 | * PCI - only dma32 | ||
505 | */ | ||
506 | rdev->need_dma32 = false; | ||
507 | if (rdev->flags & RADEON_IS_AGP) | ||
508 | rdev->need_dma32 = true; | ||
509 | if (rdev->flags & RADEON_IS_PCI) | ||
510 | rdev->need_dma32 = true; | ||
511 | |||
512 | dma_bits = rdev->need_dma32 ? 32 : 40; | ||
513 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | ||
497 | if (r) { | 514 | if (r) { |
498 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); | 515 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
499 | } | 516 | } |
@@ -546,27 +563,22 @@ int radeon_device_init(struct radeon_device *rdev, | |||
546 | radeon_combios_asic_init(rdev->ddev); | 563 | radeon_combios_asic_init(rdev->ddev); |
547 | } | 564 | } |
548 | } | 565 | } |
566 | /* Initialize clocks */ | ||
567 | r = radeon_clocks_init(rdev); | ||
568 | if (r) { | ||
569 | return r; | ||
570 | } | ||
549 | /* Get vram informations */ | 571 | /* Get vram informations */ |
550 | radeon_vram_info(rdev); | 572 | radeon_vram_info(rdev); |
551 | /* Device is severly broken if aper size > vram size. | 573 | |
552 | * for RN50/M6/M7 - Novell bug 204882 ? | ||
553 | */ | ||
554 | if (rdev->mc.vram_size < rdev->mc.aper_size) { | ||
555 | rdev->mc.aper_size = rdev->mc.vram_size; | ||
556 | } | ||
557 | /* Add an MTRR for the VRAM */ | 574 | /* Add an MTRR for the VRAM */ |
558 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | 575 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
559 | MTRR_TYPE_WRCOMB, 1); | 576 | MTRR_TYPE_WRCOMB, 1); |
560 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", | 577 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", |
561 | rdev->mc.vram_size >> 20, | 578 | rdev->mc.real_vram_size >> 20, |
562 | (unsigned)rdev->mc.aper_size >> 20); | 579 | (unsigned)rdev->mc.aper_size >> 20); |
563 | DRM_INFO("RAM width %dbits %cDR\n", | 580 | DRM_INFO("RAM width %dbits %cDR\n", |
564 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | 581 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
565 | /* Initialize clocks */ | ||
566 | r = radeon_clocks_init(rdev); | ||
567 | if (r) { | ||
568 | return r; | ||
569 | } | ||
570 | /* Initialize memory controller (also test AGP) */ | 582 | /* Initialize memory controller (also test AGP) */ |
571 | r = radeon_mc_init(rdev); | 583 | r = radeon_mc_init(rdev); |
572 | if (r) { | 584 | if (r) { |
@@ -626,6 +638,9 @@ int radeon_device_init(struct radeon_device *rdev, | |||
626 | if (!ret) { | 638 | if (!ret) { |
627 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | 639 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
628 | } | 640 | } |
641 | if (radeon_testing) { | ||
642 | radeon_test_moves(rdev); | ||
643 | } | ||
629 | if (radeon_benchmarking) { | 644 | if (radeon_benchmarking) { |
630 | radeon_benchmark(rdev); | 645 | radeon_benchmark(rdev); |
631 | } | 646 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3efcf1a526be..a8fa1bb84cf7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
187 | 187 | ||
188 | drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); | 188 | drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); |
189 | radeon_crtc->crtc_id = index; | 189 | radeon_crtc->crtc_id = index; |
190 | rdev->mode_info.crtcs[index] = radeon_crtc; | ||
190 | 191 | ||
191 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; | 192 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
192 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); | 193 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); |
@@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
491 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; | 492 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
492 | current_freq = radeon_div(tmp, ref_div * post_div); | 493 | current_freq = radeon_div(tmp, ref_div * post_div); |
493 | 494 | ||
494 | error = abs(current_freq - freq); | 495 | if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
496 | error = freq - current_freq; | ||
497 | error = error < 0 ? 0xffffffff : error; | ||
498 | } else | ||
499 | error = abs(current_freq - freq); | ||
495 | vco_diff = abs(vco - best_vco); | 500 | vco_diff = abs(vco - best_vco); |
496 | 501 | ||
497 | if ((best_vco == 0 && error < best_error) || | 502 | if ((best_vco == 0 && error < best_error) || |
@@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
657 | } | 662 | } |
658 | } | 663 | } |
659 | 664 | ||
660 | void radeon_init_disp_bandwidth(struct drm_device *dev) | 665 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
666 | struct drm_display_mode *mode, | ||
667 | struct drm_display_mode *adjusted_mode) | ||
661 | { | 668 | { |
662 | struct radeon_device *rdev = dev->dev_private; | 669 | struct drm_device *dev = crtc->dev; |
663 | struct drm_display_mode *modes[2]; | 670 | struct drm_encoder *encoder; |
664 | int pixel_bytes[2]; | 671 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
665 | struct drm_crtc *crtc; | 672 | struct radeon_encoder *radeon_encoder; |
666 | 673 | bool first = true; | |
667 | pixel_bytes[0] = pixel_bytes[1] = 0; | ||
668 | modes[0] = modes[1] = NULL; | ||
669 | |||
670 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
671 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
672 | 674 | ||
673 | if (crtc->enabled && crtc->fb) { | 675 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
674 | modes[radeon_crtc->crtc_id] = &crtc->mode; | 676 | radeon_encoder = to_radeon_encoder(encoder); |
675 | pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; | 677 | if (encoder->crtc != crtc) |
678 | continue; | ||
679 | if (first) { | ||
680 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | ||
681 | radeon_crtc->devices = radeon_encoder->devices; | ||
682 | memcpy(&radeon_crtc->native_mode, | ||
683 | &radeon_encoder->native_mode, | ||
684 | sizeof(struct radeon_native_mode)); | ||
685 | first = false; | ||
686 | } else { | ||
687 | if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { | ||
688 | /* WARNING: Right now this can't happen but | ||
689 | * in the future we need to check that scaling | ||
690 | * are consistent accross different encoder | ||
691 | * (ie all encoder can work with the same | ||
692 | * scaling). | ||
693 | */ | ||
694 | DRM_ERROR("Scaling not consistent accross encoder.\n"); | ||
695 | return false; | ||
696 | } | ||
676 | } | 697 | } |
677 | } | 698 | } |
678 | 699 | if (radeon_crtc->rmx_type != RMX_OFF) { | |
679 | if (ASIC_IS_AVIVO(rdev)) { | 700 | fixed20_12 a, b; |
680 | radeon_init_disp_bw_avivo(dev, | 701 | a.full = rfixed_const(crtc->mode.vdisplay); |
681 | modes[0], | 702 | b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); |
682 | pixel_bytes[0], | 703 | radeon_crtc->vsc.full = rfixed_div(a, b); |
683 | modes[1], | 704 | a.full = rfixed_const(crtc->mode.hdisplay); |
684 | pixel_bytes[1]); | 705 | b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); |
706 | radeon_crtc->hsc.full = rfixed_div(a, b); | ||
685 | } else { | 707 | } else { |
686 | radeon_init_disp_bw_legacy(dev, | 708 | radeon_crtc->vsc.full = rfixed_const(1); |
687 | modes[0], | 709 | radeon_crtc->hsc.full = rfixed_const(1); |
688 | pixel_bytes[0], | ||
689 | modes[1], | ||
690 | pixel_bytes[1]); | ||
691 | } | 710 | } |
711 | return true; | ||
692 | } | 712 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 84ba69f48784..3cfcee17dc56 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -89,6 +89,7 @@ int radeon_agpmode = 0; | |||
89 | int radeon_vram_limit = 0; | 89 | int radeon_vram_limit = 0; |
90 | int radeon_gart_size = 512; /* default gart size */ | 90 | int radeon_gart_size = 512; /* default gart size */ |
91 | int radeon_benchmarking = 0; | 91 | int radeon_benchmarking = 0; |
92 | int radeon_testing = 0; | ||
92 | int radeon_connector_table = 0; | 93 | int radeon_connector_table = 0; |
93 | #endif | 94 | #endif |
94 | 95 | ||
@@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600); | |||
117 | MODULE_PARM_DESC(benchmark, "Run benchmark"); | 118 | MODULE_PARM_DESC(benchmark, "Run benchmark"); |
118 | module_param_named(benchmark, radeon_benchmarking, int, 0444); | 119 | module_param_named(benchmark, radeon_benchmarking, int, 0444); |
119 | 120 | ||
121 | MODULE_PARM_DESC(test, "Run tests"); | ||
122 | module_param_named(test, radeon_testing, int, 0444); | ||
123 | |||
120 | MODULE_PARM_DESC(connector_table, "Force connector table"); | 124 | MODULE_PARM_DESC(connector_table, "Force connector table"); |
121 | module_param_named(connector_table, radeon_connector_table, int, 0444); | 125 | module_param_named(connector_table, radeon_connector_table, int, 0444); |
122 | #endif | 126 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c8ef0d14ffab..0a92706eac19 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -154,7 +154,6 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | |||
154 | 154 | ||
155 | if (mode->hdisplay < native_mode->panel_xres || | 155 | if (mode->hdisplay < native_mode->panel_xres || |
156 | mode->vdisplay < native_mode->panel_yres) { | 156 | mode->vdisplay < native_mode->panel_yres) { |
157 | radeon_encoder->flags |= RADEON_USE_RMX; | ||
158 | if (ASIC_IS_AVIVO(rdev)) { | 157 | if (ASIC_IS_AVIVO(rdev)) { |
159 | adjusted_mode->hdisplay = native_mode->panel_xres; | 158 | adjusted_mode->hdisplay = native_mode->panel_xres; |
160 | adjusted_mode->vdisplay = native_mode->panel_yres; | 159 | adjusted_mode->vdisplay = native_mode->panel_yres; |
@@ -197,15 +196,13 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | |||
197 | } | 196 | } |
198 | } | 197 | } |
199 | 198 | ||
199 | |||
200 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | 200 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, |
201 | struct drm_display_mode *mode, | 201 | struct drm_display_mode *mode, |
202 | struct drm_display_mode *adjusted_mode) | 202 | struct drm_display_mode *adjusted_mode) |
203 | { | 203 | { |
204 | |||
205 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 204 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
206 | 205 | ||
207 | radeon_encoder->flags &= ~RADEON_USE_RMX; | ||
208 | |||
209 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 206 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
210 | 207 | ||
211 | if (radeon_encoder->rmx_type != RMX_OFF) | 208 | if (radeon_encoder->rmx_type != RMX_OFF) |
@@ -808,234 +805,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
808 | 805 | ||
809 | } | 806 | } |
810 | 807 | ||
811 | static void atom_rv515_force_tv_scaler(struct radeon_device *rdev) | ||
812 | { | ||
813 | |||
814 | WREG32(0x659C, 0x0); | ||
815 | WREG32(0x6594, 0x705); | ||
816 | WREG32(0x65A4, 0x10001); | ||
817 | WREG32(0x65D8, 0x0); | ||
818 | WREG32(0x65B0, 0x0); | ||
819 | WREG32(0x65C0, 0x0); | ||
820 | WREG32(0x65D4, 0x0); | ||
821 | WREG32(0x6578, 0x0); | ||
822 | WREG32(0x657C, 0x841880A8); | ||
823 | WREG32(0x6578, 0x1); | ||
824 | WREG32(0x657C, 0x84208680); | ||
825 | WREG32(0x6578, 0x2); | ||
826 | WREG32(0x657C, 0xBFF880B0); | ||
827 | WREG32(0x6578, 0x100); | ||
828 | WREG32(0x657C, 0x83D88088); | ||
829 | WREG32(0x6578, 0x101); | ||
830 | WREG32(0x657C, 0x84608680); | ||
831 | WREG32(0x6578, 0x102); | ||
832 | WREG32(0x657C, 0xBFF080D0); | ||
833 | WREG32(0x6578, 0x200); | ||
834 | WREG32(0x657C, 0x83988068); | ||
835 | WREG32(0x6578, 0x201); | ||
836 | WREG32(0x657C, 0x84A08680); | ||
837 | WREG32(0x6578, 0x202); | ||
838 | WREG32(0x657C, 0xBFF080F8); | ||
839 | WREG32(0x6578, 0x300); | ||
840 | WREG32(0x657C, 0x83588058); | ||
841 | WREG32(0x6578, 0x301); | ||
842 | WREG32(0x657C, 0x84E08660); | ||
843 | WREG32(0x6578, 0x302); | ||
844 | WREG32(0x657C, 0xBFF88120); | ||
845 | WREG32(0x6578, 0x400); | ||
846 | WREG32(0x657C, 0x83188040); | ||
847 | WREG32(0x6578, 0x401); | ||
848 | WREG32(0x657C, 0x85008660); | ||
849 | WREG32(0x6578, 0x402); | ||
850 | WREG32(0x657C, 0xBFF88150); | ||
851 | WREG32(0x6578, 0x500); | ||
852 | WREG32(0x657C, 0x82D88030); | ||
853 | WREG32(0x6578, 0x501); | ||
854 | WREG32(0x657C, 0x85408640); | ||
855 | WREG32(0x6578, 0x502); | ||
856 | WREG32(0x657C, 0xBFF88180); | ||
857 | WREG32(0x6578, 0x600); | ||
858 | WREG32(0x657C, 0x82A08018); | ||
859 | WREG32(0x6578, 0x601); | ||
860 | WREG32(0x657C, 0x85808620); | ||
861 | WREG32(0x6578, 0x602); | ||
862 | WREG32(0x657C, 0xBFF081B8); | ||
863 | WREG32(0x6578, 0x700); | ||
864 | WREG32(0x657C, 0x82608010); | ||
865 | WREG32(0x6578, 0x701); | ||
866 | WREG32(0x657C, 0x85A08600); | ||
867 | WREG32(0x6578, 0x702); | ||
868 | WREG32(0x657C, 0x800081F0); | ||
869 | WREG32(0x6578, 0x800); | ||
870 | WREG32(0x657C, 0x8228BFF8); | ||
871 | WREG32(0x6578, 0x801); | ||
872 | WREG32(0x657C, 0x85E085E0); | ||
873 | WREG32(0x6578, 0x802); | ||
874 | WREG32(0x657C, 0xBFF88228); | ||
875 | WREG32(0x6578, 0x10000); | ||
876 | WREG32(0x657C, 0x82A8BF00); | ||
877 | WREG32(0x6578, 0x10001); | ||
878 | WREG32(0x657C, 0x82A08CC0); | ||
879 | WREG32(0x6578, 0x10002); | ||
880 | WREG32(0x657C, 0x8008BEF8); | ||
881 | WREG32(0x6578, 0x10100); | ||
882 | WREG32(0x657C, 0x81F0BF28); | ||
883 | WREG32(0x6578, 0x10101); | ||
884 | WREG32(0x657C, 0x83608CA0); | ||
885 | WREG32(0x6578, 0x10102); | ||
886 | WREG32(0x657C, 0x8018BED0); | ||
887 | WREG32(0x6578, 0x10200); | ||
888 | WREG32(0x657C, 0x8148BF38); | ||
889 | WREG32(0x6578, 0x10201); | ||
890 | WREG32(0x657C, 0x84408C80); | ||
891 | WREG32(0x6578, 0x10202); | ||
892 | WREG32(0x657C, 0x8008BEB8); | ||
893 | WREG32(0x6578, 0x10300); | ||
894 | WREG32(0x657C, 0x80B0BF78); | ||
895 | WREG32(0x6578, 0x10301); | ||
896 | WREG32(0x657C, 0x85008C20); | ||
897 | WREG32(0x6578, 0x10302); | ||
898 | WREG32(0x657C, 0x8020BEA0); | ||
899 | WREG32(0x6578, 0x10400); | ||
900 | WREG32(0x657C, 0x8028BF90); | ||
901 | WREG32(0x6578, 0x10401); | ||
902 | WREG32(0x657C, 0x85E08BC0); | ||
903 | WREG32(0x6578, 0x10402); | ||
904 | WREG32(0x657C, 0x8018BE90); | ||
905 | WREG32(0x6578, 0x10500); | ||
906 | WREG32(0x657C, 0xBFB8BFB0); | ||
907 | WREG32(0x6578, 0x10501); | ||
908 | WREG32(0x657C, 0x86C08B40); | ||
909 | WREG32(0x6578, 0x10502); | ||
910 | WREG32(0x657C, 0x8010BE90); | ||
911 | WREG32(0x6578, 0x10600); | ||
912 | WREG32(0x657C, 0xBF58BFC8); | ||
913 | WREG32(0x6578, 0x10601); | ||
914 | WREG32(0x657C, 0x87A08AA0); | ||
915 | WREG32(0x6578, 0x10602); | ||
916 | WREG32(0x657C, 0x8010BE98); | ||
917 | WREG32(0x6578, 0x10700); | ||
918 | WREG32(0x657C, 0xBF10BFF0); | ||
919 | WREG32(0x6578, 0x10701); | ||
920 | WREG32(0x657C, 0x886089E0); | ||
921 | WREG32(0x6578, 0x10702); | ||
922 | WREG32(0x657C, 0x8018BEB0); | ||
923 | WREG32(0x6578, 0x10800); | ||
924 | WREG32(0x657C, 0xBED8BFE8); | ||
925 | WREG32(0x6578, 0x10801); | ||
926 | WREG32(0x657C, 0x89408940); | ||
927 | WREG32(0x6578, 0x10802); | ||
928 | WREG32(0x657C, 0xBFE8BED8); | ||
929 | WREG32(0x6578, 0x20000); | ||
930 | WREG32(0x657C, 0x80008000); | ||
931 | WREG32(0x6578, 0x20001); | ||
932 | WREG32(0x657C, 0x90008000); | ||
933 | WREG32(0x6578, 0x20002); | ||
934 | WREG32(0x657C, 0x80008000); | ||
935 | WREG32(0x6578, 0x20003); | ||
936 | WREG32(0x657C, 0x80008000); | ||
937 | WREG32(0x6578, 0x20100); | ||
938 | WREG32(0x657C, 0x80108000); | ||
939 | WREG32(0x6578, 0x20101); | ||
940 | WREG32(0x657C, 0x8FE0BF70); | ||
941 | WREG32(0x6578, 0x20102); | ||
942 | WREG32(0x657C, 0xBFE880C0); | ||
943 | WREG32(0x6578, 0x20103); | ||
944 | WREG32(0x657C, 0x80008000); | ||
945 | WREG32(0x6578, 0x20200); | ||
946 | WREG32(0x657C, 0x8018BFF8); | ||
947 | WREG32(0x6578, 0x20201); | ||
948 | WREG32(0x657C, 0x8F80BF08); | ||
949 | WREG32(0x6578, 0x20202); | ||
950 | WREG32(0x657C, 0xBFD081A0); | ||
951 | WREG32(0x6578, 0x20203); | ||
952 | WREG32(0x657C, 0xBFF88000); | ||
953 | WREG32(0x6578, 0x20300); | ||
954 | WREG32(0x657C, 0x80188000); | ||
955 | WREG32(0x6578, 0x20301); | ||
956 | WREG32(0x657C, 0x8EE0BEC0); | ||
957 | WREG32(0x6578, 0x20302); | ||
958 | WREG32(0x657C, 0xBFB082A0); | ||
959 | WREG32(0x6578, 0x20303); | ||
960 | WREG32(0x657C, 0x80008000); | ||
961 | WREG32(0x6578, 0x20400); | ||
962 | WREG32(0x657C, 0x80188000); | ||
963 | WREG32(0x6578, 0x20401); | ||
964 | WREG32(0x657C, 0x8E00BEA0); | ||
965 | WREG32(0x6578, 0x20402); | ||
966 | WREG32(0x657C, 0xBF8883C0); | ||
967 | WREG32(0x6578, 0x20403); | ||
968 | WREG32(0x657C, 0x80008000); | ||
969 | WREG32(0x6578, 0x20500); | ||
970 | WREG32(0x657C, 0x80188000); | ||
971 | WREG32(0x6578, 0x20501); | ||
972 | WREG32(0x657C, 0x8D00BE90); | ||
973 | WREG32(0x6578, 0x20502); | ||
974 | WREG32(0x657C, 0xBF588500); | ||
975 | WREG32(0x6578, 0x20503); | ||
976 | WREG32(0x657C, 0x80008008); | ||
977 | WREG32(0x6578, 0x20600); | ||
978 | WREG32(0x657C, 0x80188000); | ||
979 | WREG32(0x6578, 0x20601); | ||
980 | WREG32(0x657C, 0x8BC0BE98); | ||
981 | WREG32(0x6578, 0x20602); | ||
982 | WREG32(0x657C, 0xBF308660); | ||
983 | WREG32(0x6578, 0x20603); | ||
984 | WREG32(0x657C, 0x80008008); | ||
985 | WREG32(0x6578, 0x20700); | ||
986 | WREG32(0x657C, 0x80108000); | ||
987 | WREG32(0x6578, 0x20701); | ||
988 | WREG32(0x657C, 0x8A80BEB0); | ||
989 | WREG32(0x6578, 0x20702); | ||
990 | WREG32(0x657C, 0xBF0087C0); | ||
991 | WREG32(0x6578, 0x20703); | ||
992 | WREG32(0x657C, 0x80008008); | ||
993 | WREG32(0x6578, 0x20800); | ||
994 | WREG32(0x657C, 0x80108000); | ||
995 | WREG32(0x6578, 0x20801); | ||
996 | WREG32(0x657C, 0x8920BED0); | ||
997 | WREG32(0x6578, 0x20802); | ||
998 | WREG32(0x657C, 0xBED08920); | ||
999 | WREG32(0x6578, 0x20803); | ||
1000 | WREG32(0x657C, 0x80008010); | ||
1001 | WREG32(0x6578, 0x30000); | ||
1002 | WREG32(0x657C, 0x90008000); | ||
1003 | WREG32(0x6578, 0x30001); | ||
1004 | WREG32(0x657C, 0x80008000); | ||
1005 | WREG32(0x6578, 0x30100); | ||
1006 | WREG32(0x657C, 0x8FE0BF90); | ||
1007 | WREG32(0x6578, 0x30101); | ||
1008 | WREG32(0x657C, 0xBFF880A0); | ||
1009 | WREG32(0x6578, 0x30200); | ||
1010 | WREG32(0x657C, 0x8F60BF40); | ||
1011 | WREG32(0x6578, 0x30201); | ||
1012 | WREG32(0x657C, 0xBFE88180); | ||
1013 | WREG32(0x6578, 0x30300); | ||
1014 | WREG32(0x657C, 0x8EC0BF00); | ||
1015 | WREG32(0x6578, 0x30301); | ||
1016 | WREG32(0x657C, 0xBFC88280); | ||
1017 | WREG32(0x6578, 0x30400); | ||
1018 | WREG32(0x657C, 0x8DE0BEE0); | ||
1019 | WREG32(0x6578, 0x30401); | ||
1020 | WREG32(0x657C, 0xBFA083A0); | ||
1021 | WREG32(0x6578, 0x30500); | ||
1022 | WREG32(0x657C, 0x8CE0BED0); | ||
1023 | WREG32(0x6578, 0x30501); | ||
1024 | WREG32(0x657C, 0xBF7884E0); | ||
1025 | WREG32(0x6578, 0x30600); | ||
1026 | WREG32(0x657C, 0x8BA0BED8); | ||
1027 | WREG32(0x6578, 0x30601); | ||
1028 | WREG32(0x657C, 0xBF508640); | ||
1029 | WREG32(0x6578, 0x30700); | ||
1030 | WREG32(0x657C, 0x8A60BEE8); | ||
1031 | WREG32(0x6578, 0x30701); | ||
1032 | WREG32(0x657C, 0xBF2087A0); | ||
1033 | WREG32(0x6578, 0x30800); | ||
1034 | WREG32(0x657C, 0x8900BF00); | ||
1035 | WREG32(0x6578, 0x30801); | ||
1036 | WREG32(0x657C, 0xBF008900); | ||
1037 | } | ||
1038 | |||
1039 | static void | 808 | static void |
1040 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | 809 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
1041 | { | 810 | { |
@@ -1074,129 +843,6 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | |||
1074 | } | 843 | } |
1075 | 844 | ||
1076 | static void | 845 | static void |
1077 | atombios_overscan_setup(struct drm_encoder *encoder, | ||
1078 | struct drm_display_mode *mode, | ||
1079 | struct drm_display_mode *adjusted_mode) | ||
1080 | { | ||
1081 | struct drm_device *dev = encoder->dev; | ||
1082 | struct radeon_device *rdev = dev->dev_private; | ||
1083 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1084 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1085 | SET_CRTC_OVERSCAN_PS_ALLOCATION args; | ||
1086 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); | ||
1087 | |||
1088 | memset(&args, 0, sizeof(args)); | ||
1089 | |||
1090 | args.usOverscanRight = 0; | ||
1091 | args.usOverscanLeft = 0; | ||
1092 | args.usOverscanBottom = 0; | ||
1093 | args.usOverscanTop = 0; | ||
1094 | args.ucCRTC = radeon_crtc->crtc_id; | ||
1095 | |||
1096 | if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
1097 | if (radeon_encoder->rmx_type == RMX_FULL) { | ||
1098 | args.usOverscanRight = 0; | ||
1099 | args.usOverscanLeft = 0; | ||
1100 | args.usOverscanBottom = 0; | ||
1101 | args.usOverscanTop = 0; | ||
1102 | } else if (radeon_encoder->rmx_type == RMX_CENTER) { | ||
1103 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
1104 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
1105 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
1106 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
1107 | } else if (radeon_encoder->rmx_type == RMX_ASPECT) { | ||
1108 | int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | ||
1109 | int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | ||
1110 | |||
1111 | if (a1 > a2) { | ||
1112 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
1113 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
1114 | } else if (a2 > a1) { | ||
1115 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
1116 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
1117 | } | ||
1118 | } | ||
1119 | } | ||
1120 | |||
1121 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1122 | |||
1123 | } | ||
1124 | |||
1125 | static void | ||
1126 | atombios_scaler_setup(struct drm_encoder *encoder) | ||
1127 | { | ||
1128 | struct drm_device *dev = encoder->dev; | ||
1129 | struct radeon_device *rdev = dev->dev_private; | ||
1130 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1131 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1132 | ENABLE_SCALER_PS_ALLOCATION args; | ||
1133 | int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); | ||
1134 | /* fixme - fill in enc_priv for atom dac */ | ||
1135 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
1136 | |||
1137 | if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) | ||
1138 | return; | ||
1139 | |||
1140 | memset(&args, 0, sizeof(args)); | ||
1141 | |||
1142 | args.ucScaler = radeon_crtc->crtc_id; | ||
1143 | |||
1144 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
1145 | switch (tv_std) { | ||
1146 | case TV_STD_NTSC: | ||
1147 | default: | ||
1148 | args.ucTVStandard = ATOM_TV_NTSC; | ||
1149 | break; | ||
1150 | case TV_STD_PAL: | ||
1151 | args.ucTVStandard = ATOM_TV_PAL; | ||
1152 | break; | ||
1153 | case TV_STD_PAL_M: | ||
1154 | args.ucTVStandard = ATOM_TV_PALM; | ||
1155 | break; | ||
1156 | case TV_STD_PAL_60: | ||
1157 | args.ucTVStandard = ATOM_TV_PAL60; | ||
1158 | break; | ||
1159 | case TV_STD_NTSC_J: | ||
1160 | args.ucTVStandard = ATOM_TV_NTSCJ; | ||
1161 | break; | ||
1162 | case TV_STD_SCART_PAL: | ||
1163 | args.ucTVStandard = ATOM_TV_PAL; /* ??? */ | ||
1164 | break; | ||
1165 | case TV_STD_SECAM: | ||
1166 | args.ucTVStandard = ATOM_TV_SECAM; | ||
1167 | break; | ||
1168 | case TV_STD_PAL_CN: | ||
1169 | args.ucTVStandard = ATOM_TV_PALCN; | ||
1170 | break; | ||
1171 | } | ||
1172 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
1173 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) { | ||
1174 | args.ucTVStandard = ATOM_TV_CV; | ||
1175 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
1176 | } else if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
1177 | if (radeon_encoder->rmx_type == RMX_FULL) | ||
1178 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
1179 | else if (radeon_encoder->rmx_type == RMX_CENTER) | ||
1180 | args.ucEnable = ATOM_SCALER_CENTER; | ||
1181 | else if (radeon_encoder->rmx_type == RMX_ASPECT) | ||
1182 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
1183 | } else { | ||
1184 | if (ASIC_IS_AVIVO(rdev)) | ||
1185 | args.ucEnable = ATOM_SCALER_DISABLE; | ||
1186 | else | ||
1187 | args.ucEnable = ATOM_SCALER_CENTER; | ||
1188 | } | ||
1189 | |||
1190 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1191 | |||
1192 | if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) | ||
1193 | && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { | ||
1194 | atom_rv515_force_tv_scaler(rdev); | ||
1195 | } | ||
1196 | |||
1197 | } | ||
1198 | |||
1199 | static void | ||
1200 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | 846 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) |
1201 | { | 847 | { |
1202 | struct drm_device *dev = encoder->dev; | 848 | struct drm_device *dev = encoder->dev; |
@@ -1448,8 +1094,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1448 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1094 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1449 | 1095 | ||
1450 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | 1096 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
1451 | atombios_overscan_setup(encoder, mode, adjusted_mode); | ||
1452 | atombios_scaler_setup(encoder); | ||
1453 | atombios_set_encoder_crtc_source(encoder); | 1097 | atombios_set_encoder_crtc_source(encoder); |
1454 | 1098 | ||
1455 | if (ASIC_IS_AVIVO(rdev)) { | 1099 | if (ASIC_IS_AVIVO(rdev)) { |
@@ -1667,6 +1311,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1667 | 1311 | ||
1668 | radeon_encoder->encoder_id = encoder_id; | 1312 | radeon_encoder->encoder_id = encoder_id; |
1669 | radeon_encoder->devices = supported_device; | 1313 | radeon_encoder->devices = supported_device; |
1314 | radeon_encoder->rmx_type = RMX_OFF; | ||
1670 | 1315 | ||
1671 | switch (radeon_encoder->encoder_id) { | 1316 | switch (radeon_encoder->encoder_id) { |
1672 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1317 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 9e8f191eb64a..3206c0ad7b6c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -101,9 +101,10 @@ static int radeonfb_setcolreg(unsigned regno, | |||
101 | break; | 101 | break; |
102 | case 24: | 102 | case 24: |
103 | case 32: | 103 | case 32: |
104 | fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | | 104 | fb->pseudo_palette[regno] = |
105 | (green & 0xff00) | | 105 | (((red >> 8) & 0xff) << info->var.red.offset) | |
106 | ((blue & 0xff00) >> 8); | 106 | (((green >> 8) & 0xff) << info->var.green.offset) | |
107 | (((blue >> 8) & 0xff) << info->var.blue.offset); | ||
107 | break; | 108 | break; |
108 | } | 109 | } |
109 | } | 110 | } |
@@ -154,6 +155,7 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, | |||
154 | var->transp.length = 0; | 155 | var->transp.length = 0; |
155 | var->transp.offset = 0; | 156 | var->transp.offset = 0; |
156 | break; | 157 | break; |
158 | #ifdef __LITTLE_ENDIAN | ||
157 | case 15: | 159 | case 15: |
158 | var->red.offset = 10; | 160 | var->red.offset = 10; |
159 | var->green.offset = 5; | 161 | var->green.offset = 5; |
@@ -194,6 +196,28 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, | |||
194 | var->transp.length = 8; | 196 | var->transp.length = 8; |
195 | var->transp.offset = 24; | 197 | var->transp.offset = 24; |
196 | break; | 198 | break; |
199 | #else | ||
200 | case 24: | ||
201 | var->red.offset = 8; | ||
202 | var->green.offset = 16; | ||
203 | var->blue.offset = 24; | ||
204 | var->red.length = 8; | ||
205 | var->green.length = 8; | ||
206 | var->blue.length = 8; | ||
207 | var->transp.length = 0; | ||
208 | var->transp.offset = 0; | ||
209 | break; | ||
210 | case 32: | ||
211 | var->red.offset = 8; | ||
212 | var->green.offset = 16; | ||
213 | var->blue.offset = 24; | ||
214 | var->red.length = 8; | ||
215 | var->green.length = 8; | ||
216 | var->blue.length = 8; | ||
217 | var->transp.length = 8; | ||
218 | var->transp.offset = 0; | ||
219 | break; | ||
220 | #endif | ||
197 | default: | 221 | default: |
198 | return -EINVAL; | 222 | return -EINVAL; |
199 | } | 223 | } |
@@ -447,10 +471,10 @@ static struct notifier_block paniced = { | |||
447 | .notifier_call = radeonfb_panic, | 471 | .notifier_call = radeonfb_panic, |
448 | }; | 472 | }; |
449 | 473 | ||
450 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) | 474 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
451 | { | 475 | { |
452 | int aligned = width; | 476 | int aligned = width; |
453 | int align_large = (ASIC_IS_AVIVO(rdev)); | 477 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
454 | int pitch_mask = 0; | 478 | int pitch_mask = 0; |
455 | 479 | ||
456 | switch (bpp / 8) { | 480 | switch (bpp / 8) { |
@@ -488,12 +512,13 @@ int radeonfb_create(struct radeon_device *rdev, | |||
488 | u64 fb_gpuaddr; | 512 | u64 fb_gpuaddr; |
489 | void *fbptr = NULL; | 513 | void *fbptr = NULL; |
490 | unsigned long tmp; | 514 | unsigned long tmp; |
515 | bool fb_tiled = false; /* useful for testing */ | ||
491 | 516 | ||
492 | mode_cmd.width = surface_width; | 517 | mode_cmd.width = surface_width; |
493 | mode_cmd.height = surface_height; | 518 | mode_cmd.height = surface_height; |
494 | mode_cmd.bpp = 32; | 519 | mode_cmd.bpp = 32; |
495 | /* need to align pitch with crtc limits */ | 520 | /* need to align pitch with crtc limits */ |
496 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); | 521 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
497 | mode_cmd.depth = 24; | 522 | mode_cmd.depth = 24; |
498 | 523 | ||
499 | size = mode_cmd.pitch * mode_cmd.height; | 524 | size = mode_cmd.pitch * mode_cmd.height; |
@@ -511,6 +536,8 @@ int radeonfb_create(struct radeon_device *rdev, | |||
511 | } | 536 | } |
512 | robj = gobj->driver_private; | 537 | robj = gobj->driver_private; |
513 | 538 | ||
539 | if (fb_tiled) | ||
540 | radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); | ||
514 | mutex_lock(&rdev->ddev->struct_mutex); | 541 | mutex_lock(&rdev->ddev->struct_mutex); |
515 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); | 542 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
516 | if (fb == NULL) { | 543 | if (fb == NULL) { |
@@ -539,6 +566,9 @@ int radeonfb_create(struct radeon_device *rdev, | |||
539 | } | 566 | } |
540 | rfbdev = info->par; | 567 | rfbdev = info->par; |
541 | 568 | ||
569 | if (fb_tiled) | ||
570 | radeon_object_check_tiling(robj, 0, 0); | ||
571 | |||
542 | ret = radeon_object_kmap(robj, &fbptr); | 572 | ret = radeon_object_kmap(robj, &fbptr); |
543 | if (ret) { | 573 | if (ret) { |
544 | goto out_unref; | 574 | goto out_unref; |
@@ -572,6 +602,11 @@ int radeonfb_create(struct radeon_device *rdev, | |||
572 | info->var.width = -1; | 602 | info->var.width = -1; |
573 | info->var.xres = fb_width; | 603 | info->var.xres = fb_width; |
574 | info->var.yres = fb_height; | 604 | info->var.yres = fb_height; |
605 | |||
606 | /* setup aperture base/size for vesafb takeover */ | ||
607 | info->aperture_base = rdev->ddev->mode_config.fb_base; | ||
608 | info->aperture_size = rdev->mc.real_vram_size; | ||
609 | |||
575 | info->fix.mmio_start = 0; | 610 | info->fix.mmio_start = 0; |
576 | info->fix.mmio_len = 0; | 611 | info->fix.mmio_len = 0; |
577 | info->pixmap.size = 64*1024; | 612 | info->pixmap.size = 64*1024; |
@@ -600,6 +635,7 @@ int radeonfb_create(struct radeon_device *rdev, | |||
600 | info->var.transp.offset = 0; | 635 | info->var.transp.offset = 0; |
601 | info->var.transp.length = 0; | 636 | info->var.transp.length = 0; |
602 | break; | 637 | break; |
638 | #ifdef __LITTLE_ENDIAN | ||
603 | case 15: | 639 | case 15: |
604 | info->var.red.offset = 10; | 640 | info->var.red.offset = 10; |
605 | info->var.green.offset = 5; | 641 | info->var.green.offset = 5; |
@@ -639,7 +675,29 @@ int radeonfb_create(struct radeon_device *rdev, | |||
639 | info->var.transp.offset = 24; | 675 | info->var.transp.offset = 24; |
640 | info->var.transp.length = 8; | 676 | info->var.transp.length = 8; |
641 | break; | 677 | break; |
678 | #else | ||
679 | case 24: | ||
680 | info->var.red.offset = 8; | ||
681 | info->var.green.offset = 16; | ||
682 | info->var.blue.offset = 24; | ||
683 | info->var.red.length = 8; | ||
684 | info->var.green.length = 8; | ||
685 | info->var.blue.length = 8; | ||
686 | info->var.transp.offset = 0; | ||
687 | info->var.transp.length = 0; | ||
688 | break; | ||
689 | case 32: | ||
690 | info->var.red.offset = 8; | ||
691 | info->var.green.offset = 16; | ||
692 | info->var.blue.offset = 24; | ||
693 | info->var.red.length = 8; | ||
694 | info->var.green.length = 8; | ||
695 | info->var.blue.length = 8; | ||
696 | info->var.transp.offset = 0; | ||
697 | info->var.transp.length = 8; | ||
698 | break; | ||
642 | default: | 699 | default: |
700 | #endif | ||
643 | break; | 701 | break; |
644 | } | 702 | } |
645 | 703 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 96afbf5ae2ad..b4e48dd2e859 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -195,7 +195,7 @@ retry: | |||
195 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, | 195 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
196 | radeon_fence_signaled(fence), timeout); | 196 | radeon_fence_signaled(fence), timeout); |
197 | if (unlikely(r == -ERESTARTSYS)) { | 197 | if (unlikely(r == -ERESTARTSYS)) { |
198 | return -ERESTART; | 198 | return -EBUSY; |
199 | } | 199 | } |
200 | } else { | 200 | } else { |
201 | r = wait_event_timeout(rdev->fence_drv.queue, | 201 | r = wait_event_timeout(rdev->fence_drv.queue, |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d343a15316ec..2977539880fb 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -177,7 +177,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
177 | return -ENOMEM; | 177 | return -ENOMEM; |
178 | } | 178 | } |
179 | rdev->gart.pages[p] = pagelist[i]; | 179 | rdev->gart.pages[p] = pagelist[i]; |
180 | page_base = (uint32_t)rdev->gart.pages_addr[p]; | 180 | page_base = rdev->gart.pages_addr[p]; |
181 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { | 181 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { |
182 | radeon_gart_set_page(rdev, t, page_base); | 182 | radeon_gart_set_page(rdev, t, page_base); |
183 | page_base += 4096; | 183 | page_base += 4096; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index eb516034235d..cded5180c752 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -157,9 +157,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
157 | struct radeon_device *rdev = dev->dev_private; | 157 | struct radeon_device *rdev = dev->dev_private; |
158 | struct drm_radeon_gem_info *args = data; | 158 | struct drm_radeon_gem_info *args = data; |
159 | 159 | ||
160 | args->vram_size = rdev->mc.vram_size; | 160 | args->vram_size = rdev->mc.real_vram_size; |
161 | /* FIXME: report somethings that makes sense */ | 161 | /* FIXME: report somethings that makes sense */ |
162 | args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); | 162 | args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); |
163 | args->gart_size = rdev->mc.gtt_size; | 163 | args->gart_size = rdev->mc.gtt_size; |
164 | return 0; | 164 | return 0; |
165 | } | 165 | } |
@@ -285,3 +285,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
285 | mutex_unlock(&dev->struct_mutex); | 285 | mutex_unlock(&dev->struct_mutex); |
286 | return r; | 286 | return r; |
287 | } | 287 | } |
288 | |||
289 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | ||
290 | struct drm_file *filp) | ||
291 | { | ||
292 | struct drm_radeon_gem_set_tiling *args = data; | ||
293 | struct drm_gem_object *gobj; | ||
294 | struct radeon_object *robj; | ||
295 | int r = 0; | ||
296 | |||
297 | DRM_DEBUG("%d \n", args->handle); | ||
298 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
299 | if (gobj == NULL) | ||
300 | return -EINVAL; | ||
301 | robj = gobj->driver_private; | ||
302 | radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); | ||
303 | mutex_lock(&dev->struct_mutex); | ||
304 | drm_gem_object_unreference(gobj); | ||
305 | mutex_unlock(&dev->struct_mutex); | ||
306 | return r; | ||
307 | } | ||
308 | |||
309 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | ||
310 | struct drm_file *filp) | ||
311 | { | ||
312 | struct drm_radeon_gem_get_tiling *args = data; | ||
313 | struct drm_gem_object *gobj; | ||
314 | struct radeon_object *robj; | ||
315 | int r = 0; | ||
316 | |||
317 | DRM_DEBUG("\n"); | ||
318 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
319 | if (gobj == NULL) | ||
320 | return -EINVAL; | ||
321 | robj = gobj->driver_private; | ||
322 | radeon_object_get_tiling_flags(robj, &args->tiling_flags, | ||
323 | &args->pitch); | ||
324 | mutex_lock(&dev->struct_mutex); | ||
325 | drm_gem_object_unreference(gobj); | ||
326 | mutex_unlock(&dev->struct_mutex); | ||
327 | return r; | ||
328 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4612a7c146d1..937a2f1cdb46 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -291,5 +291,7 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { | |||
291 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), | 291 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), |
292 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), | 292 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), |
293 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), | 293 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), |
294 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), | ||
295 | DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), | ||
294 | }; | 296 | }; |
295 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); | 297 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8086ecf7f03d..7d06dc98a42a 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -29,6 +29,171 @@ | |||
29 | #include "radeon_fixed.h" | 29 | #include "radeon_fixed.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | 31 | ||
32 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | ||
33 | struct drm_display_mode *mode, | ||
34 | struct drm_display_mode *adjusted_mode) | ||
35 | { | ||
36 | struct drm_device *dev = crtc->dev; | ||
37 | struct radeon_device *rdev = dev->dev_private; | ||
38 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
39 | int xres = mode->hdisplay; | ||
40 | int yres = mode->vdisplay; | ||
41 | bool hscale = true, vscale = true; | ||
42 | int hsync_wid; | ||
43 | int vsync_wid; | ||
44 | int hsync_start; | ||
45 | int blank_width; | ||
46 | u32 scale, inc, crtc_more_cntl; | ||
47 | u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; | ||
48 | u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; | ||
49 | u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; | ||
50 | struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; | ||
51 | |||
52 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & | ||
53 | (RADEON_VERT_STRETCH_RESERVED | | ||
54 | RADEON_VERT_AUTO_RATIO_INC); | ||
55 | fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & | ||
56 | (RADEON_HORZ_FP_LOOP_STRETCH | | ||
57 | RADEON_HORZ_AUTO_RATIO_INC); | ||
58 | |||
59 | crtc_more_cntl = 0; | ||
60 | if ((rdev->family == CHIP_RS100) || | ||
61 | (rdev->family == CHIP_RS200)) { | ||
62 | /* This is to workaround the asic bug for RMX, some versions | ||
63 | of BIOS dosen't have this register initialized correctly. */ | ||
64 | crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; | ||
65 | } | ||
66 | |||
67 | |||
68 | fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ||
69 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
70 | |||
71 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
72 | if (!hsync_wid) | ||
73 | hsync_wid = 1; | ||
74 | hsync_start = mode->crtc_hsync_start - 8; | ||
75 | |||
76 | fp_h_sync_strt_wid = ((hsync_start & 0x1fff) | ||
77 | | ((hsync_wid & 0x3f) << 16) | ||
78 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
79 | ? RADEON_CRTC_H_SYNC_POL | ||
80 | : 0)); | ||
81 | |||
82 | fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ||
83 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
84 | |||
85 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
86 | if (!vsync_wid) | ||
87 | vsync_wid = 1; | ||
88 | |||
89 | fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ||
90 | | ((vsync_wid & 0x1f) << 16) | ||
91 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
92 | ? RADEON_CRTC_V_SYNC_POL | ||
93 | : 0)); | ||
94 | |||
95 | fp_horz_vert_active = 0; | ||
96 | |||
97 | if (native_mode->panel_xres == 0 || | ||
98 | native_mode->panel_yres == 0) { | ||
99 | hscale = false; | ||
100 | vscale = false; | ||
101 | } else { | ||
102 | if (xres > native_mode->panel_xres) | ||
103 | xres = native_mode->panel_xres; | ||
104 | if (yres > native_mode->panel_yres) | ||
105 | yres = native_mode->panel_yres; | ||
106 | |||
107 | if (xres == native_mode->panel_xres) | ||
108 | hscale = false; | ||
109 | if (yres == native_mode->panel_yres) | ||
110 | vscale = false; | ||
111 | } | ||
112 | |||
113 | switch (radeon_crtc->rmx_type) { | ||
114 | case RMX_FULL: | ||
115 | case RMX_ASPECT: | ||
116 | if (!hscale) | ||
117 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
118 | else { | ||
119 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; | ||
120 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) | ||
121 | / native_mode->panel_xres + 1; | ||
122 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | | ||
123 | RADEON_HORZ_STRETCH_BLEND | | ||
124 | RADEON_HORZ_STRETCH_ENABLE | | ||
125 | ((native_mode->panel_xres/8-1) << 16)); | ||
126 | } | ||
127 | |||
128 | if (!vscale) | ||
129 | fp_vert_stretch |= ((yres-1) << 12); | ||
130 | else { | ||
131 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; | ||
132 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) | ||
133 | / native_mode->panel_yres + 1; | ||
134 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | | ||
135 | RADEON_VERT_STRETCH_ENABLE | | ||
136 | RADEON_VERT_STRETCH_BLEND | | ||
137 | ((native_mode->panel_yres-1) << 12)); | ||
138 | } | ||
139 | break; | ||
140 | case RMX_CENTER: | ||
141 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
142 | fp_vert_stretch |= ((yres-1) << 12); | ||
143 | |||
144 | crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | | ||
145 | RADEON_CRTC_AUTO_VERT_CENTER_EN); | ||
146 | |||
147 | blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; | ||
148 | if (blank_width > 110) | ||
149 | blank_width = 110; | ||
150 | |||
151 | fp_crtc_h_total_disp = (((blank_width) & 0x3ff) | ||
152 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
153 | |||
154 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
155 | if (!hsync_wid) | ||
156 | hsync_wid = 1; | ||
157 | |||
158 | fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) | ||
159 | | ((hsync_wid & 0x3f) << 16) | ||
160 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
161 | ? RADEON_CRTC_H_SYNC_POL | ||
162 | : 0)); | ||
163 | |||
164 | fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) | ||
165 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
166 | |||
167 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
168 | if (!vsync_wid) | ||
169 | vsync_wid = 1; | ||
170 | |||
171 | fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) | ||
172 | | ((vsync_wid & 0x1f) << 16) | ||
173 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
174 | ? RADEON_CRTC_V_SYNC_POL | ||
175 | : 0))); | ||
176 | |||
177 | fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | | ||
178 | (((native_mode->panel_xres / 8) & 0x1ff) << 16)); | ||
179 | break; | ||
180 | case RMX_OFF: | ||
181 | default: | ||
182 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
183 | fp_vert_stretch |= ((yres-1) << 12); | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); | ||
188 | WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); | ||
189 | WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); | ||
190 | WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); | ||
191 | WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); | ||
192 | WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); | ||
193 | WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); | ||
194 | WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); | ||
195 | } | ||
196 | |||
32 | void radeon_restore_common_regs(struct drm_device *dev) | 197 | void radeon_restore_common_regs(struct drm_device *dev) |
33 | { | 198 | { |
34 | /* don't need this yet */ | 199 | /* don't need this yet */ |
@@ -235,6 +400,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
235 | uint64_t base; | 400 | uint64_t base; |
236 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; | 401 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; |
237 | uint32_t crtc_pitch, pitch_pixels; | 402 | uint32_t crtc_pitch, pitch_pixels; |
403 | uint32_t tiling_flags; | ||
238 | 404 | ||
239 | DRM_DEBUG("\n"); | 405 | DRM_DEBUG("\n"); |
240 | 406 | ||
@@ -244,7 +410,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
244 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { | 410 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { |
245 | return -EINVAL; | 411 | return -EINVAL; |
246 | } | 412 | } |
247 | crtc_offset = (u32)base; | 413 | /* if scanout was in GTT this really wouldn't work */ |
414 | /* crtc offset is from display base addr not FB location */ | ||
415 | radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; | ||
416 | |||
417 | base -= radeon_crtc->legacy_display_base_addr; | ||
418 | |||
248 | crtc_offset_cntl = 0; | 419 | crtc_offset_cntl = 0; |
249 | 420 | ||
250 | pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); | 421 | pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); |
@@ -253,8 +424,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
253 | (crtc->fb->bits_per_pixel * 8)); | 424 | (crtc->fb->bits_per_pixel * 8)); |
254 | crtc_pitch |= crtc_pitch << 16; | 425 | crtc_pitch |= crtc_pitch << 16; |
255 | 426 | ||
256 | /* TODO tiling */ | 427 | radeon_object_get_tiling_flags(obj->driver_private, |
257 | if (0) { | 428 | &tiling_flags, NULL); |
429 | if (tiling_flags & RADEON_TILING_MICRO) | ||
430 | DRM_ERROR("trying to scanout microtiled buffer\n"); | ||
431 | |||
432 | if (tiling_flags & RADEON_TILING_MACRO) { | ||
258 | if (ASIC_IS_R300(rdev)) | 433 | if (ASIC_IS_R300(rdev)) |
259 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | | 434 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | |
260 | R300_CRTC_MICRO_TILE_BUFFER_DIS | | 435 | R300_CRTC_MICRO_TILE_BUFFER_DIS | |
@@ -270,15 +445,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
270 | crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; | 445 | crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; |
271 | } | 446 | } |
272 | 447 | ||
273 | 448 | if (tiling_flags & RADEON_TILING_MACRO) { | |
274 | /* TODO more tiling */ | ||
275 | if (0) { | ||
276 | if (ASIC_IS_R300(rdev)) { | 449 | if (ASIC_IS_R300(rdev)) { |
277 | crtc_tile_x0_y0 = x | (y << 16); | 450 | crtc_tile_x0_y0 = x | (y << 16); |
278 | base &= ~0x7ff; | 451 | base &= ~0x7ff; |
279 | } else { | 452 | } else { |
280 | int byteshift = crtc->fb->bits_per_pixel >> 4; | 453 | int byteshift = crtc->fb->bits_per_pixel >> 4; |
281 | int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; | 454 | int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; |
282 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); | 455 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); |
283 | crtc_offset_cntl |= (y % 16); | 456 | crtc_offset_cntl |= (y % 16); |
284 | } | 457 | } |
@@ -303,11 +476,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
303 | 476 | ||
304 | base &= ~7; | 477 | base &= ~7; |
305 | 478 | ||
306 | /* update sarea TODO */ | ||
307 | |||
308 | crtc_offset = (u32)base; | 479 | crtc_offset = (u32)base; |
309 | 480 | ||
310 | WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); | 481 | WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); |
311 | 482 | ||
312 | if (ASIC_IS_R300(rdev)) { | 483 | if (ASIC_IS_R300(rdev)) { |
313 | if (radeon_crtc->crtc_id) | 484 | if (radeon_crtc->crtc_id) |
@@ -751,6 +922,8 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, | |||
751 | struct drm_display_mode *mode, | 922 | struct drm_display_mode *mode, |
752 | struct drm_display_mode *adjusted_mode) | 923 | struct drm_display_mode *adjusted_mode) |
753 | { | 924 | { |
925 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
926 | return false; | ||
754 | return true; | 927 | return true; |
755 | } | 928 | } |
756 | 929 | ||
@@ -759,16 +932,25 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
759 | struct drm_display_mode *adjusted_mode, | 932 | struct drm_display_mode *adjusted_mode, |
760 | int x, int y, struct drm_framebuffer *old_fb) | 933 | int x, int y, struct drm_framebuffer *old_fb) |
761 | { | 934 | { |
762 | 935 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | |
763 | DRM_DEBUG("\n"); | 936 | struct drm_device *dev = crtc->dev; |
937 | struct radeon_device *rdev = dev->dev_private; | ||
764 | 938 | ||
765 | /* TODO TV */ | 939 | /* TODO TV */ |
766 | |||
767 | radeon_crtc_set_base(crtc, x, y, old_fb); | 940 | radeon_crtc_set_base(crtc, x, y, old_fb); |
768 | radeon_set_crtc_timing(crtc, adjusted_mode); | 941 | radeon_set_crtc_timing(crtc, adjusted_mode); |
769 | radeon_set_pll(crtc, adjusted_mode); | 942 | radeon_set_pll(crtc, adjusted_mode); |
770 | radeon_init_disp_bandwidth(crtc->dev); | 943 | radeon_bandwidth_update(rdev); |
771 | 944 | if (radeon_crtc->crtc_id == 0) { | |
945 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); | ||
946 | } else { | ||
947 | if (radeon_crtc->rmx_type != RMX_OFF) { | ||
948 | /* FIXME: only first crtc has rmx what should we | ||
949 | * do ? | ||
950 | */ | ||
951 | DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); | ||
952 | } | ||
953 | } | ||
772 | return 0; | 954 | return 0; |
773 | } | 955 | } |
774 | 956 | ||
@@ -799,478 +981,3 @@ void radeon_legacy_init_crtc(struct drm_device *dev, | |||
799 | radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; | 981 | radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; |
800 | drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); | 982 | drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); |
801 | } | 983 | } |
802 | |||
803 | void radeon_init_disp_bw_legacy(struct drm_device *dev, | ||
804 | struct drm_display_mode *mode1, | ||
805 | uint32_t pixel_bytes1, | ||
806 | struct drm_display_mode *mode2, | ||
807 | uint32_t pixel_bytes2) | ||
808 | { | ||
809 | struct radeon_device *rdev = dev->dev_private; | ||
810 | fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; | ||
811 | fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; | ||
812 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | ||
813 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | ||
814 | fixed20_12 memtcas_ff[8] = { | ||
815 | fixed_init(1), | ||
816 | fixed_init(2), | ||
817 | fixed_init(3), | ||
818 | fixed_init(0), | ||
819 | fixed_init_half(1), | ||
820 | fixed_init_half(2), | ||
821 | fixed_init(0), | ||
822 | }; | ||
823 | fixed20_12 memtcas_rs480_ff[8] = { | ||
824 | fixed_init(0), | ||
825 | fixed_init(1), | ||
826 | fixed_init(2), | ||
827 | fixed_init(3), | ||
828 | fixed_init(0), | ||
829 | fixed_init_half(1), | ||
830 | fixed_init_half(2), | ||
831 | fixed_init_half(3), | ||
832 | }; | ||
833 | fixed20_12 memtcas2_ff[8] = { | ||
834 | fixed_init(0), | ||
835 | fixed_init(1), | ||
836 | fixed_init(2), | ||
837 | fixed_init(3), | ||
838 | fixed_init(4), | ||
839 | fixed_init(5), | ||
840 | fixed_init(6), | ||
841 | fixed_init(7), | ||
842 | }; | ||
843 | fixed20_12 memtrbs[8] = { | ||
844 | fixed_init(1), | ||
845 | fixed_init_half(1), | ||
846 | fixed_init(2), | ||
847 | fixed_init_half(2), | ||
848 | fixed_init(3), | ||
849 | fixed_init_half(3), | ||
850 | fixed_init(4), | ||
851 | fixed_init_half(4) | ||
852 | }; | ||
853 | fixed20_12 memtrbs_r4xx[8] = { | ||
854 | fixed_init(4), | ||
855 | fixed_init(5), | ||
856 | fixed_init(6), | ||
857 | fixed_init(7), | ||
858 | fixed_init(8), | ||
859 | fixed_init(9), | ||
860 | fixed_init(10), | ||
861 | fixed_init(11) | ||
862 | }; | ||
863 | fixed20_12 min_mem_eff; | ||
864 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | ||
865 | fixed20_12 cur_latency_mclk, cur_latency_sclk; | ||
866 | fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, | ||
867 | disp_drain_rate2, read_return_rate; | ||
868 | fixed20_12 time_disp1_drop_priority; | ||
869 | int c; | ||
870 | int cur_size = 16; /* in octawords */ | ||
871 | int critical_point = 0, critical_point2; | ||
872 | /* uint32_t read_return_rate, time_disp1_drop_priority; */ | ||
873 | int stop_req, max_stop_req; | ||
874 | |||
875 | min_mem_eff.full = rfixed_const_8(0); | ||
876 | /* get modes */ | ||
877 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | ||
878 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | ||
879 | mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
880 | mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
881 | /* check crtc enables */ | ||
882 | if (mode2) | ||
883 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
884 | if (mode1) | ||
885 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
886 | WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * determine is there is enough bw for current mode | ||
891 | */ | ||
892 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
893 | temp_ff.full = rfixed_const(100); | ||
894 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
895 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
896 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
897 | |||
898 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
899 | temp_ff.full = rfixed_const(temp); | ||
900 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
901 | |||
902 | pix_clk.full = 0; | ||
903 | pix_clk2.full = 0; | ||
904 | peak_disp_bw.full = 0; | ||
905 | if (mode1) { | ||
906 | temp_ff.full = rfixed_const(1000); | ||
907 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
908 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
909 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
910 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
911 | } | ||
912 | if (mode2) { | ||
913 | temp_ff.full = rfixed_const(1000); | ||
914 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
915 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
916 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
917 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
918 | } | ||
919 | |||
920 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
921 | if (peak_disp_bw.full >= mem_bw.full) { | ||
922 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | ||
923 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
924 | } | ||
925 | |||
926 | /* Get values from the EXT_MEM_CNTL register...converting its contents. */ | ||
927 | temp = RREG32(RADEON_MEM_TIMING_CNTL); | ||
928 | if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ | ||
929 | mem_trcd = ((temp >> 2) & 0x3) + 1; | ||
930 | mem_trp = ((temp & 0x3)) + 1; | ||
931 | mem_tras = ((temp & 0x70) >> 4) + 1; | ||
932 | } else if (rdev->family == CHIP_R300 || | ||
933 | rdev->family == CHIP_R350) { /* r300, r350 */ | ||
934 | mem_trcd = (temp & 0x7) + 1; | ||
935 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
936 | mem_tras = ((temp >> 11) & 0xf) + 4; | ||
937 | } else if (rdev->family == CHIP_RV350 || | ||
938 | rdev->family <= CHIP_RV380) { | ||
939 | /* rv3x0 */ | ||
940 | mem_trcd = (temp & 0x7) + 3; | ||
941 | mem_trp = ((temp >> 8) & 0x7) + 3; | ||
942 | mem_tras = ((temp >> 11) & 0xf) + 6; | ||
943 | } else if (rdev->family == CHIP_R420 || | ||
944 | rdev->family == CHIP_R423 || | ||
945 | rdev->family == CHIP_RV410) { | ||
946 | /* r4xx */ | ||
947 | mem_trcd = (temp & 0xf) + 3; | ||
948 | if (mem_trcd > 15) | ||
949 | mem_trcd = 15; | ||
950 | mem_trp = ((temp >> 8) & 0xf) + 3; | ||
951 | if (mem_trp > 15) | ||
952 | mem_trp = 15; | ||
953 | mem_tras = ((temp >> 12) & 0x1f) + 6; | ||
954 | if (mem_tras > 31) | ||
955 | mem_tras = 31; | ||
956 | } else { /* RV200, R200 */ | ||
957 | mem_trcd = (temp & 0x7) + 1; | ||
958 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
959 | mem_tras = ((temp >> 12) & 0xf) + 4; | ||
960 | } | ||
961 | /* convert to FF */ | ||
962 | trcd_ff.full = rfixed_const(mem_trcd); | ||
963 | trp_ff.full = rfixed_const(mem_trp); | ||
964 | tras_ff.full = rfixed_const(mem_tras); | ||
965 | |||
966 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | ||
967 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | ||
968 | data = (temp & (7 << 20)) >> 20; | ||
969 | if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { | ||
970 | if (rdev->family == CHIP_RS480) /* don't think rs400 */ | ||
971 | tcas_ff = memtcas_rs480_ff[data]; | ||
972 | else | ||
973 | tcas_ff = memtcas_ff[data]; | ||
974 | } else | ||
975 | tcas_ff = memtcas2_ff[data]; | ||
976 | |||
977 | if (rdev->family == CHIP_RS400 || | ||
978 | rdev->family == CHIP_RS480) { | ||
979 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | ||
980 | data = (temp >> 23) & 0x7; | ||
981 | if (data < 5) | ||
982 | tcas_ff.full += rfixed_const(data); | ||
983 | } | ||
984 | |||
985 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | ||
986 | /* on the R300, Tcas is included in Trbs. | ||
987 | */ | ||
988 | temp = RREG32(RADEON_MEM_CNTL); | ||
989 | data = (R300_MEM_NUM_CHANNELS_MASK & temp); | ||
990 | if (data == 1) { | ||
991 | if (R300_MEM_USE_CD_CH_ONLY & temp) { | ||
992 | temp = RREG32(R300_MC_IND_INDEX); | ||
993 | temp &= ~R300_MC_IND_ADDR_MASK; | ||
994 | temp |= R300_MC_READ_CNTL_CD_mcind; | ||
995 | WREG32(R300_MC_IND_INDEX, temp); | ||
996 | temp = RREG32(R300_MC_IND_DATA); | ||
997 | data = (R300_MEM_RBS_POSITION_C_MASK & temp); | ||
998 | } else { | ||
999 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
1000 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
1001 | } | ||
1002 | } else { | ||
1003 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
1004 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
1005 | } | ||
1006 | if (rdev->family == CHIP_RV410 || | ||
1007 | rdev->family == CHIP_R420 || | ||
1008 | rdev->family == CHIP_R423) | ||
1009 | trbs_ff = memtrbs_r4xx[data]; | ||
1010 | else | ||
1011 | trbs_ff = memtrbs[data]; | ||
1012 | tcas_ff.full += trbs_ff.full; | ||
1013 | } | ||
1014 | |||
1015 | sclk_eff_ff.full = sclk_ff.full; | ||
1016 | |||
1017 | if (rdev->flags & RADEON_IS_AGP) { | ||
1018 | fixed20_12 agpmode_ff; | ||
1019 | agpmode_ff.full = rfixed_const(radeon_agpmode); | ||
1020 | temp_ff.full = rfixed_const_666(16); | ||
1021 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | ||
1022 | } | ||
1023 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | ||
1024 | |||
1025 | if (ASIC_IS_R300(rdev)) { | ||
1026 | sclk_delay_ff.full = rfixed_const(250); | ||
1027 | } else { | ||
1028 | if ((rdev->family == CHIP_RV100) || | ||
1029 | rdev->flags & RADEON_IS_IGP) { | ||
1030 | if (rdev->mc.vram_is_ddr) | ||
1031 | sclk_delay_ff.full = rfixed_const(41); | ||
1032 | else | ||
1033 | sclk_delay_ff.full = rfixed_const(33); | ||
1034 | } else { | ||
1035 | if (rdev->mc.vram_width == 128) | ||
1036 | sclk_delay_ff.full = rfixed_const(57); | ||
1037 | else | ||
1038 | sclk_delay_ff.full = rfixed_const(41); | ||
1039 | } | ||
1040 | } | ||
1041 | |||
1042 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | ||
1043 | |||
1044 | if (rdev->mc.vram_is_ddr) { | ||
1045 | if (rdev->mc.vram_width == 32) { | ||
1046 | k1.full = rfixed_const(40); | ||
1047 | c = 3; | ||
1048 | } else { | ||
1049 | k1.full = rfixed_const(20); | ||
1050 | c = 1; | ||
1051 | } | ||
1052 | } else { | ||
1053 | k1.full = rfixed_const(40); | ||
1054 | c = 3; | ||
1055 | } | ||
1056 | |||
1057 | temp_ff.full = rfixed_const(2); | ||
1058 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | ||
1059 | temp_ff.full = rfixed_const(c); | ||
1060 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | ||
1061 | temp_ff.full = rfixed_const(4); | ||
1062 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | ||
1063 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | ||
1064 | mc_latency_mclk.full += k1.full; | ||
1065 | |||
1066 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | ||
1067 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | ||
1068 | |||
1069 | /* | ||
1070 | HW cursor time assuming worst case of full size colour cursor. | ||
1071 | */ | ||
1072 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | ||
1073 | temp_ff.full += trcd_ff.full; | ||
1074 | if (temp_ff.full < tras_ff.full) | ||
1075 | temp_ff.full = tras_ff.full; | ||
1076 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | ||
1077 | |||
1078 | temp_ff.full = rfixed_const(cur_size); | ||
1079 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | ||
1080 | /* | ||
1081 | Find the total latency for the display data. | ||
1082 | */ | ||
1083 | disp_latency_overhead.full = rfixed_const(80); | ||
1084 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | ||
1085 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | ||
1086 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | ||
1087 | |||
1088 | if (mc_latency_mclk.full > mc_latency_sclk.full) | ||
1089 | disp_latency.full = mc_latency_mclk.full; | ||
1090 | else | ||
1091 | disp_latency.full = mc_latency_sclk.full; | ||
1092 | |||
1093 | /* setup Max GRPH_STOP_REQ default value */ | ||
1094 | if (ASIC_IS_RV100(rdev)) | ||
1095 | max_stop_req = 0x5c; | ||
1096 | else | ||
1097 | max_stop_req = 0x7c; | ||
1098 | |||
1099 | if (mode1) { | ||
1100 | /* CRTC1 | ||
1101 | Set GRPH_BUFFER_CNTL register using h/w defined optimal values. | ||
1102 | GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] | ||
1103 | */ | ||
1104 | stop_req = mode1->hdisplay * pixel_bytes1 / 16; | ||
1105 | |||
1106 | if (stop_req > max_stop_req) | ||
1107 | stop_req = max_stop_req; | ||
1108 | |||
1109 | /* | ||
1110 | Find the drain rate of the display buffer. | ||
1111 | */ | ||
1112 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | ||
1113 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | ||
1114 | |||
1115 | /* | ||
1116 | Find the critical point of the display buffer. | ||
1117 | */ | ||
1118 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | ||
1119 | crit_point_ff.full += rfixed_const_half(0); | ||
1120 | |||
1121 | critical_point = rfixed_trunc(crit_point_ff); | ||
1122 | |||
1123 | if (rdev->disp_priority == 2) { | ||
1124 | critical_point = 0; | ||
1125 | } | ||
1126 | |||
1127 | /* | ||
1128 | The critical point should never be above max_stop_req-4. Setting | ||
1129 | GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. | ||
1130 | */ | ||
1131 | if (max_stop_req - critical_point < 4) | ||
1132 | critical_point = 0; | ||
1133 | |||
1134 | if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { | ||
1135 | /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ | ||
1136 | critical_point = 0x10; | ||
1137 | } | ||
1138 | |||
1139 | temp = RREG32(RADEON_GRPH_BUFFER_CNTL); | ||
1140 | temp &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
1141 | temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
1142 | temp &= ~(RADEON_GRPH_START_REQ_MASK); | ||
1143 | if ((rdev->family == CHIP_R350) && | ||
1144 | (stop_req > 0x15)) { | ||
1145 | stop_req -= 0x10; | ||
1146 | } | ||
1147 | temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
1148 | temp |= RADEON_GRPH_BUFFER_SIZE; | ||
1149 | temp &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
1150 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
1151 | RADEON_GRPH_STOP_CNTL); | ||
1152 | /* | ||
1153 | Write the result into the register. | ||
1154 | */ | ||
1155 | WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
1156 | (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
1157 | |||
1158 | #if 0 | ||
1159 | if ((rdev->family == CHIP_RS400) || | ||
1160 | (rdev->family == CHIP_RS480)) { | ||
1161 | /* attempt to program RS400 disp regs correctly ??? */ | ||
1162 | temp = RREG32(RS400_DISP1_REG_CNTL); | ||
1163 | temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | | ||
1164 | RS400_DISP1_STOP_REQ_LEVEL_MASK); | ||
1165 | WREG32(RS400_DISP1_REQ_CNTL1, (temp | | ||
1166 | (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
1167 | (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
1168 | temp = RREG32(RS400_DMIF_MEM_CNTL1); | ||
1169 | temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | | ||
1170 | RS400_DISP1_CRITICAL_POINT_STOP_MASK); | ||
1171 | WREG32(RS400_DMIF_MEM_CNTL1, (temp | | ||
1172 | (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | | ||
1173 | (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); | ||
1174 | } | ||
1175 | #endif | ||
1176 | |||
1177 | DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", | ||
1178 | /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ | ||
1179 | (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); | ||
1180 | } | ||
1181 | |||
1182 | if (mode2) { | ||
1183 | u32 grph2_cntl; | ||
1184 | stop_req = mode2->hdisplay * pixel_bytes2 / 16; | ||
1185 | |||
1186 | if (stop_req > max_stop_req) | ||
1187 | stop_req = max_stop_req; | ||
1188 | |||
1189 | /* | ||
1190 | Find the drain rate of the display buffer. | ||
1191 | */ | ||
1192 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | ||
1193 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | ||
1194 | |||
1195 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | ||
1196 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
1197 | grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
1198 | grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); | ||
1199 | if ((rdev->family == CHIP_R350) && | ||
1200 | (stop_req > 0x15)) { | ||
1201 | stop_req -= 0x10; | ||
1202 | } | ||
1203 | grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
1204 | grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; | ||
1205 | grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
1206 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
1207 | RADEON_GRPH_STOP_CNTL); | ||
1208 | |||
1209 | if ((rdev->family == CHIP_RS100) || | ||
1210 | (rdev->family == CHIP_RS200)) | ||
1211 | critical_point2 = 0; | ||
1212 | else { | ||
1213 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | ||
1214 | temp_ff.full = rfixed_const(temp); | ||
1215 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | ||
1216 | if (sclk_ff.full < temp_ff.full) | ||
1217 | temp_ff.full = sclk_ff.full; | ||
1218 | |||
1219 | read_return_rate.full = temp_ff.full; | ||
1220 | |||
1221 | if (mode1) { | ||
1222 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | ||
1223 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | ||
1224 | } else { | ||
1225 | time_disp1_drop_priority.full = 0; | ||
1226 | } | ||
1227 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | ||
1228 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | ||
1229 | crit_point_ff.full += rfixed_const_half(0); | ||
1230 | |||
1231 | critical_point2 = rfixed_trunc(crit_point_ff); | ||
1232 | |||
1233 | if (rdev->disp_priority == 2) { | ||
1234 | critical_point2 = 0; | ||
1235 | } | ||
1236 | |||
1237 | if (max_stop_req - critical_point2 < 4) | ||
1238 | critical_point2 = 0; | ||
1239 | |||
1240 | } | ||
1241 | |||
1242 | if (critical_point2 == 0 && rdev->family == CHIP_R300) { | ||
1243 | /* some R300 cards have problem with this set to 0 */ | ||
1244 | critical_point2 = 0x10; | ||
1245 | } | ||
1246 | |||
1247 | WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
1248 | (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
1249 | |||
1250 | if ((rdev->family == CHIP_RS400) || | ||
1251 | (rdev->family == CHIP_RS480)) { | ||
1252 | #if 0 | ||
1253 | /* attempt to program RS400 disp2 regs correctly ??? */ | ||
1254 | temp = RREG32(RS400_DISP2_REQ_CNTL1); | ||
1255 | temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | | ||
1256 | RS400_DISP2_STOP_REQ_LEVEL_MASK); | ||
1257 | WREG32(RS400_DISP2_REQ_CNTL1, (temp | | ||
1258 | (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
1259 | (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
1260 | temp = RREG32(RS400_DISP2_REQ_CNTL2); | ||
1261 | temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | | ||
1262 | RS400_DISP2_CRITICAL_POINT_STOP_MASK); | ||
1263 | WREG32(RS400_DISP2_REQ_CNTL2, (temp | | ||
1264 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | | ||
1265 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); | ||
1266 | #endif | ||
1267 | WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); | ||
1268 | WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); | ||
1269 | WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); | ||
1270 | WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); | ||
1271 | } | ||
1272 | |||
1273 | DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", | ||
1274 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | ||
1275 | } | ||
1276 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2c2f42de1d4c..34d0f58eb944 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -30,170 +30,6 @@ | |||
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | 31 | ||
32 | 32 | ||
33 | static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, | ||
34 | struct drm_display_mode *mode, | ||
35 | struct drm_display_mode *adjusted_mode) | ||
36 | { | ||
37 | struct drm_device *dev = encoder->dev; | ||
38 | struct radeon_device *rdev = dev->dev_private; | ||
39 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
40 | int xres = mode->hdisplay; | ||
41 | int yres = mode->vdisplay; | ||
42 | bool hscale = true, vscale = true; | ||
43 | int hsync_wid; | ||
44 | int vsync_wid; | ||
45 | int hsync_start; | ||
46 | uint32_t scale, inc; | ||
47 | uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; | ||
48 | uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; | ||
49 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; | ||
50 | |||
51 | DRM_DEBUG("\n"); | ||
52 | |||
53 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & | ||
54 | (RADEON_VERT_STRETCH_RESERVED | | ||
55 | RADEON_VERT_AUTO_RATIO_INC); | ||
56 | fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & | ||
57 | (RADEON_HORZ_FP_LOOP_STRETCH | | ||
58 | RADEON_HORZ_AUTO_RATIO_INC); | ||
59 | |||
60 | crtc_more_cntl = 0; | ||
61 | if ((rdev->family == CHIP_RS100) || | ||
62 | (rdev->family == CHIP_RS200)) { | ||
63 | /* This is to workaround the asic bug for RMX, some versions | ||
64 | of BIOS dosen't have this register initialized correctly. */ | ||
65 | crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; | ||
66 | } | ||
67 | |||
68 | |||
69 | fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ||
70 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
71 | |||
72 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
73 | if (!hsync_wid) | ||
74 | hsync_wid = 1; | ||
75 | hsync_start = mode->crtc_hsync_start - 8; | ||
76 | |||
77 | fp_h_sync_strt_wid = ((hsync_start & 0x1fff) | ||
78 | | ((hsync_wid & 0x3f) << 16) | ||
79 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
80 | ? RADEON_CRTC_H_SYNC_POL | ||
81 | : 0)); | ||
82 | |||
83 | fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ||
84 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
85 | |||
86 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
87 | if (!vsync_wid) | ||
88 | vsync_wid = 1; | ||
89 | |||
90 | fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ||
91 | | ((vsync_wid & 0x1f) << 16) | ||
92 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
93 | ? RADEON_CRTC_V_SYNC_POL | ||
94 | : 0)); | ||
95 | |||
96 | fp_horz_vert_active = 0; | ||
97 | |||
98 | if (native_mode->panel_xres == 0 || | ||
99 | native_mode->panel_yres == 0) { | ||
100 | hscale = false; | ||
101 | vscale = false; | ||
102 | } else { | ||
103 | if (xres > native_mode->panel_xres) | ||
104 | xres = native_mode->panel_xres; | ||
105 | if (yres > native_mode->panel_yres) | ||
106 | yres = native_mode->panel_yres; | ||
107 | |||
108 | if (xres == native_mode->panel_xres) | ||
109 | hscale = false; | ||
110 | if (yres == native_mode->panel_yres) | ||
111 | vscale = false; | ||
112 | } | ||
113 | |||
114 | if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
115 | if (radeon_encoder->rmx_type != RMX_CENTER) { | ||
116 | if (!hscale) | ||
117 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
118 | else { | ||
119 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; | ||
120 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) | ||
121 | / native_mode->panel_xres + 1; | ||
122 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | | ||
123 | RADEON_HORZ_STRETCH_BLEND | | ||
124 | RADEON_HORZ_STRETCH_ENABLE | | ||
125 | ((native_mode->panel_xres/8-1) << 16)); | ||
126 | } | ||
127 | |||
128 | if (!vscale) | ||
129 | fp_vert_stretch |= ((yres-1) << 12); | ||
130 | else { | ||
131 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; | ||
132 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) | ||
133 | / native_mode->panel_yres + 1; | ||
134 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | | ||
135 | RADEON_VERT_STRETCH_ENABLE | | ||
136 | RADEON_VERT_STRETCH_BLEND | | ||
137 | ((native_mode->panel_yres-1) << 12)); | ||
138 | } | ||
139 | } else if (radeon_encoder->rmx_type == RMX_CENTER) { | ||
140 | int blank_width; | ||
141 | |||
142 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
143 | fp_vert_stretch |= ((yres-1) << 12); | ||
144 | |||
145 | crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | | ||
146 | RADEON_CRTC_AUTO_VERT_CENTER_EN); | ||
147 | |||
148 | blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; | ||
149 | if (blank_width > 110) | ||
150 | blank_width = 110; | ||
151 | |||
152 | fp_crtc_h_total_disp = (((blank_width) & 0x3ff) | ||
153 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
154 | |||
155 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
156 | if (!hsync_wid) | ||
157 | hsync_wid = 1; | ||
158 | |||
159 | fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) | ||
160 | | ((hsync_wid & 0x3f) << 16) | ||
161 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
162 | ? RADEON_CRTC_H_SYNC_POL | ||
163 | : 0)); | ||
164 | |||
165 | fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) | ||
166 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
167 | |||
168 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
169 | if (!vsync_wid) | ||
170 | vsync_wid = 1; | ||
171 | |||
172 | fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) | ||
173 | | ((vsync_wid & 0x1f) << 16) | ||
174 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
175 | ? RADEON_CRTC_V_SYNC_POL | ||
176 | : 0))); | ||
177 | |||
178 | fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | | ||
179 | (((native_mode->panel_xres / 8) & 0x1ff) << 16)); | ||
180 | } | ||
181 | } else { | ||
182 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
183 | fp_vert_stretch |= ((yres-1) << 12); | ||
184 | } | ||
185 | |||
186 | WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); | ||
187 | WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); | ||
188 | WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); | ||
189 | WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); | ||
190 | WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); | ||
191 | WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); | ||
192 | WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); | ||
193 | WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); | ||
194 | |||
195 | } | ||
196 | |||
197 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | 33 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) |
198 | { | 34 | { |
199 | struct drm_device *dev = encoder->dev; | 35 | struct drm_device *dev = encoder->dev; |
@@ -287,9 +123,6 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
287 | 123 | ||
288 | DRM_DEBUG("\n"); | 124 | DRM_DEBUG("\n"); |
289 | 125 | ||
290 | if (radeon_crtc->crtc_id == 0) | ||
291 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
292 | |||
293 | lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); | 126 | lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); |
294 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; | 127 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; |
295 | 128 | ||
@@ -318,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
318 | 151 | ||
319 | if (radeon_crtc->crtc_id == 0) { | 152 | if (radeon_crtc->crtc_id == 0) { |
320 | if (ASIC_IS_R300(rdev)) { | 153 | if (ASIC_IS_R300(rdev)) { |
321 | if (radeon_encoder->flags & RADEON_USE_RMX) | 154 | if (radeon_encoder->rmx_type != RMX_OFF) |
322 | lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; | 155 | lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; |
323 | } else | 156 | } else |
324 | lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; | 157 | lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; |
@@ -350,8 +183,6 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | |||
350 | 183 | ||
351 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 184 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
352 | 185 | ||
353 | radeon_encoder->flags &= ~RADEON_USE_RMX; | ||
354 | |||
355 | if (radeon_encoder->rmx_type != RMX_OFF) | 186 | if (radeon_encoder->rmx_type != RMX_OFF) |
356 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); | 187 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); |
357 | 188 | ||
@@ -455,9 +286,6 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, | |||
455 | 286 | ||
456 | DRM_DEBUG("\n"); | 287 | DRM_DEBUG("\n"); |
457 | 288 | ||
458 | if (radeon_crtc->crtc_id == 0) | ||
459 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
460 | |||
461 | if (radeon_crtc->crtc_id == 0) { | 289 | if (radeon_crtc->crtc_id == 0) { |
462 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { | 290 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { |
463 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & | 291 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & |
@@ -653,9 +481,6 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
653 | 481 | ||
654 | DRM_DEBUG("\n"); | 482 | DRM_DEBUG("\n"); |
655 | 483 | ||
656 | if (radeon_crtc->crtc_id == 0) | ||
657 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
658 | |||
659 | tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); | 484 | tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); |
660 | tmp &= 0xfffff; | 485 | tmp &= 0xfffff; |
661 | if (rdev->family == CHIP_RV280) { | 486 | if (rdev->family == CHIP_RV280) { |
@@ -711,7 +536,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
711 | if (radeon_crtc->crtc_id == 0) { | 536 | if (radeon_crtc->crtc_id == 0) { |
712 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { | 537 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
713 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; | 538 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
714 | if (radeon_encoder->flags & RADEON_USE_RMX) | 539 | if (radeon_encoder->rmx_type != RMX_OFF) |
715 | fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; | 540 | fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; |
716 | else | 541 | else |
717 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; | 542 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; |
@@ -820,9 +645,6 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
820 | 645 | ||
821 | DRM_DEBUG("\n"); | 646 | DRM_DEBUG("\n"); |
822 | 647 | ||
823 | if (radeon_crtc->crtc_id == 0) | ||
824 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
825 | |||
826 | if (rdev->is_atom_bios) { | 648 | if (rdev->is_atom_bios) { |
827 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 649 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
828 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 650 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); |
@@ -856,7 +678,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
856 | if (radeon_crtc->crtc_id == 0) { | 678 | if (radeon_crtc->crtc_id == 0) { |
857 | if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { | 679 | if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { |
858 | fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; | 680 | fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; |
859 | if (radeon_encoder->flags & RADEON_USE_RMX) | 681 | if (radeon_encoder->rmx_type != RMX_OFF) |
860 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; | 682 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; |
861 | else | 683 | else |
862 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; | 684 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; |
@@ -1014,9 +836,6 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
1014 | 836 | ||
1015 | DRM_DEBUG("\n"); | 837 | DRM_DEBUG("\n"); |
1016 | 838 | ||
1017 | if (radeon_crtc->crtc_id == 0) | ||
1018 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
1019 | |||
1020 | if (rdev->family != CHIP_R200) { | 839 | if (rdev->family != CHIP_R200) { |
1021 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); | 840 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
1022 | if (rdev->family == CHIP_R420 || | 841 | if (rdev->family == CHIP_R420 || |
@@ -1243,6 +1062,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1243 | 1062 | ||
1244 | radeon_encoder->encoder_id = encoder_id; | 1063 | radeon_encoder->encoder_id = encoder_id; |
1245 | radeon_encoder->devices = supported_device; | 1064 | radeon_encoder->devices = supported_device; |
1065 | radeon_encoder->rmx_type = RMX_OFF; | ||
1246 | 1066 | ||
1247 | switch (radeon_encoder->encoder_id) { | 1067 | switch (radeon_encoder->encoder_id) { |
1248 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1068 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 9173b687462b..3b09a1f2d8f9 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -36,6 +36,9 @@ | |||
36 | #include <linux/i2c.h> | 36 | #include <linux/i2c.h> |
37 | #include <linux/i2c-id.h> | 37 | #include <linux/i2c-id.h> |
38 | #include <linux/i2c-algo-bit.h> | 38 | #include <linux/i2c-algo-bit.h> |
39 | #include "radeon_fixed.h" | ||
40 | |||
41 | struct radeon_device; | ||
39 | 42 | ||
40 | #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) | 43 | #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) |
41 | #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) | 44 | #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) |
@@ -124,6 +127,7 @@ struct radeon_tmds_pll { | |||
124 | #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) | 127 | #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) |
125 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) | 128 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) |
126 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | 129 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
130 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | ||
127 | 131 | ||
128 | struct radeon_pll { | 132 | struct radeon_pll { |
129 | uint16_t reference_freq; | 133 | uint16_t reference_freq; |
@@ -170,6 +174,18 @@ struct radeon_mode_info { | |||
170 | struct atom_context *atom_context; | 174 | struct atom_context *atom_context; |
171 | enum radeon_connector_table connector_table; | 175 | enum radeon_connector_table connector_table; |
172 | bool mode_config_initialized; | 176 | bool mode_config_initialized; |
177 | struct radeon_crtc *crtcs[2]; | ||
178 | }; | ||
179 | |||
180 | struct radeon_native_mode { | ||
181 | /* preferred mode */ | ||
182 | uint32_t panel_xres, panel_yres; | ||
183 | uint32_t hoverplus, hsync_width; | ||
184 | uint32_t hblank; | ||
185 | uint32_t voverplus, vsync_width; | ||
186 | uint32_t vblank; | ||
187 | uint32_t dotclock; | ||
188 | uint32_t flags; | ||
173 | }; | 189 | }; |
174 | 190 | ||
175 | struct radeon_crtc { | 191 | struct radeon_crtc { |
@@ -185,19 +201,13 @@ struct radeon_crtc { | |||
185 | uint64_t cursor_addr; | 201 | uint64_t cursor_addr; |
186 | int cursor_width; | 202 | int cursor_width; |
187 | int cursor_height; | 203 | int cursor_height; |
188 | }; | 204 | uint32_t legacy_display_base_addr; |
189 | 205 | uint32_t legacy_cursor_offset; | |
190 | #define RADEON_USE_RMX 1 | 206 | enum radeon_rmx_type rmx_type; |
191 | 207 | uint32_t devices; | |
192 | struct radeon_native_mode { | 208 | fixed20_12 vsc; |
193 | /* preferred mode */ | 209 | fixed20_12 hsc; |
194 | uint32_t panel_xres, panel_yres; | 210 | struct radeon_native_mode native_mode; |
195 | uint32_t hoverplus, hsync_width; | ||
196 | uint32_t hblank; | ||
197 | uint32_t voverplus, vsync_width; | ||
198 | uint32_t vblank; | ||
199 | uint32_t dotclock; | ||
200 | uint32_t flags; | ||
201 | }; | 211 | }; |
202 | 212 | ||
203 | struct radeon_encoder_primary_dac { | 213 | struct radeon_encoder_primary_dac { |
@@ -383,16 +393,9 @@ void radeon_enc_destroy(struct drm_encoder *encoder); | |||
383 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 393 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
384 | void radeon_combios_asic_init(struct drm_device *dev); | 394 | void radeon_combios_asic_init(struct drm_device *dev); |
385 | extern int radeon_static_clocks_init(struct drm_device *dev); | 395 | extern int radeon_static_clocks_init(struct drm_device *dev); |
386 | void radeon_init_disp_bw_legacy(struct drm_device *dev, | 396 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
387 | struct drm_display_mode *mode1, | 397 | struct drm_display_mode *mode, |
388 | uint32_t pixel_bytes1, | 398 | struct drm_display_mode *adjusted_mode); |
389 | struct drm_display_mode *mode2, | 399 | void atom_rv515_force_tv_scaler(struct radeon_device *rdev); |
390 | uint32_t pixel_bytes2); | ||
391 | void radeon_init_disp_bw_avivo(struct drm_device *dev, | ||
392 | struct drm_display_mode *mode1, | ||
393 | uint32_t pixel_bytes1, | ||
394 | struct drm_display_mode *mode2, | ||
395 | uint32_t pixel_bytes2); | ||
396 | void radeon_init_disp_bandwidth(struct drm_device *dev); | ||
397 | 400 | ||
398 | #endif | 401 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index bac0d06c52ac..dd9ac2fed6d6 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -44,6 +44,9 @@ struct radeon_object { | |||
44 | uint64_t gpu_addr; | 44 | uint64_t gpu_addr; |
45 | void *kptr; | 45 | void *kptr; |
46 | bool is_iomem; | 46 | bool is_iomem; |
47 | uint32_t tiling_flags; | ||
48 | uint32_t pitch; | ||
49 | int surface_reg; | ||
47 | }; | 50 | }; |
48 | 51 | ||
49 | int radeon_ttm_init(struct radeon_device *rdev); | 52 | int radeon_ttm_init(struct radeon_device *rdev); |
@@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | |||
70 | 73 | ||
71 | robj = container_of(tobj, struct radeon_object, tobj); | 74 | robj = container_of(tobj, struct radeon_object, tobj); |
72 | list_del_init(&robj->list); | 75 | list_del_init(&robj->list); |
76 | radeon_object_clear_surface_reg(robj); | ||
73 | kfree(robj); | 77 | kfree(robj); |
74 | } | 78 | } |
75 | 79 | ||
@@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |||
99 | { | 103 | { |
100 | uint32_t flags = 0; | 104 | uint32_t flags = 0; |
101 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | 105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
102 | flags |= TTM_PL_FLAG_VRAM; | 106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
103 | } | 107 | } |
104 | if (domain & RADEON_GEM_DOMAIN_GTT) { | 108 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
105 | flags |= TTM_PL_FLAG_TT; | 109 | flags |= TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
106 | } | 110 | } |
107 | if (domain & RADEON_GEM_DOMAIN_CPU) { | 111 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
108 | flags |= TTM_PL_FLAG_SYSTEM; | 112 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
109 | } | 113 | } |
110 | if (!flags) { | 114 | if (!flags) { |
111 | flags |= TTM_PL_FLAG_SYSTEM; | 115 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
112 | } | 116 | } |
113 | return flags; | 117 | return flags; |
114 | } | 118 | } |
@@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev, | |||
141 | } | 145 | } |
142 | robj->rdev = rdev; | 146 | robj->rdev = rdev; |
143 | robj->gobj = gobj; | 147 | robj->gobj = gobj; |
148 | robj->surface_reg = -1; | ||
144 | INIT_LIST_HEAD(&robj->list); | 149 | INIT_LIST_HEAD(&robj->list); |
145 | 150 | ||
146 | flags = radeon_object_flags_from_domain(domain); | 151 | flags = radeon_object_flags_from_domain(domain); |
@@ -304,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj) | |||
304 | } | 309 | } |
305 | spin_lock(&robj->tobj.lock); | 310 | spin_lock(&robj->tobj.lock); |
306 | if (robj->tobj.sync_obj) { | 311 | if (robj->tobj.sync_obj) { |
307 | r = ttm_bo_wait(&robj->tobj, true, false, false); | 312 | r = ttm_bo_wait(&robj->tobj, true, true, false); |
308 | } | 313 | } |
309 | spin_unlock(&robj->tobj.lock); | 314 | spin_unlock(&robj->tobj.lock); |
310 | radeon_object_unreserve(robj); | 315 | radeon_object_unreserve(robj); |
@@ -403,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
403 | struct radeon_object *robj; | 408 | struct radeon_object *robj; |
404 | struct radeon_fence *old_fence = NULL; | 409 | struct radeon_fence *old_fence = NULL; |
405 | struct list_head *i; | 410 | struct list_head *i; |
406 | uint32_t flags; | ||
407 | int r; | 411 | int r; |
408 | 412 | ||
409 | r = radeon_object_list_reserve(head); | 413 | r = radeon_object_list_reserve(head); |
@@ -414,27 +418,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
414 | list_for_each(i, head) { | 418 | list_for_each(i, head) { |
415 | lobj = list_entry(i, struct radeon_object_list, list); | 419 | lobj = list_entry(i, struct radeon_object_list, list); |
416 | robj = lobj->robj; | 420 | robj = lobj->robj; |
417 | if (lobj->wdomain) { | ||
418 | flags = radeon_object_flags_from_domain(lobj->wdomain); | ||
419 | flags |= TTM_PL_FLAG_TT; | ||
420 | } else { | ||
421 | flags = radeon_object_flags_from_domain(lobj->rdomain); | ||
422 | flags |= TTM_PL_FLAG_TT; | ||
423 | flags |= TTM_PL_FLAG_VRAM; | ||
424 | } | ||
425 | if (!robj->pin_count) { | 421 | if (!robj->pin_count) { |
426 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; | 422 | if (lobj->wdomain) { |
423 | robj->tobj.proposed_placement = | ||
424 | radeon_object_flags_from_domain(lobj->wdomain); | ||
425 | } else { | ||
426 | robj->tobj.proposed_placement = | ||
427 | radeon_object_flags_from_domain(lobj->rdomain); | ||
428 | } | ||
427 | r = ttm_buffer_object_validate(&robj->tobj, | 429 | r = ttm_buffer_object_validate(&robj->tobj, |
428 | robj->tobj.proposed_placement, | 430 | robj->tobj.proposed_placement, |
429 | true, false); | 431 | true, false); |
430 | if (unlikely(r)) { | 432 | if (unlikely(r)) { |
431 | radeon_object_list_unreserve(head); | ||
432 | DRM_ERROR("radeon: failed to validate.\n"); | 433 | DRM_ERROR("radeon: failed to validate.\n"); |
433 | return r; | 434 | return r; |
434 | } | 435 | } |
435 | radeon_object_gpu_addr(robj); | 436 | radeon_object_gpu_addr(robj); |
436 | } | 437 | } |
437 | lobj->gpu_offset = robj->gpu_addr; | 438 | lobj->gpu_offset = robj->gpu_addr; |
439 | lobj->tiling_flags = robj->tiling_flags; | ||
438 | if (fence) { | 440 | if (fence) { |
439 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | 441 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
440 | robj->tobj.sync_obj = radeon_fence_ref(fence); | 442 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
@@ -479,3 +481,127 @@ unsigned long radeon_object_size(struct radeon_object *robj) | |||
479 | { | 481 | { |
480 | return robj->tobj.num_pages << PAGE_SHIFT; | 482 | return robj->tobj.num_pages << PAGE_SHIFT; |
481 | } | 483 | } |
484 | |||
485 | int radeon_object_get_surface_reg(struct radeon_object *robj) | ||
486 | { | ||
487 | struct radeon_device *rdev = robj->rdev; | ||
488 | struct radeon_surface_reg *reg; | ||
489 | struct radeon_object *old_object; | ||
490 | int steal; | ||
491 | int i; | ||
492 | |||
493 | if (!robj->tiling_flags) | ||
494 | return 0; | ||
495 | |||
496 | if (robj->surface_reg >= 0) { | ||
497 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
498 | i = robj->surface_reg; | ||
499 | goto out; | ||
500 | } | ||
501 | |||
502 | steal = -1; | ||
503 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | ||
504 | |||
505 | reg = &rdev->surface_regs[i]; | ||
506 | if (!reg->robj) | ||
507 | break; | ||
508 | |||
509 | old_object = reg->robj; | ||
510 | if (old_object->pin_count == 0) | ||
511 | steal = i; | ||
512 | } | ||
513 | |||
514 | /* if we are all out */ | ||
515 | if (i == RADEON_GEM_MAX_SURFACES) { | ||
516 | if (steal == -1) | ||
517 | return -ENOMEM; | ||
518 | /* find someone with a surface reg and nuke their BO */ | ||
519 | reg = &rdev->surface_regs[steal]; | ||
520 | old_object = reg->robj; | ||
521 | /* blow away the mapping */ | ||
522 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | ||
523 | ttm_bo_unmap_virtual(&old_object->tobj); | ||
524 | old_object->surface_reg = -1; | ||
525 | i = steal; | ||
526 | } | ||
527 | |||
528 | robj->surface_reg = i; | ||
529 | reg->robj = robj; | ||
530 | |||
531 | out: | ||
532 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | ||
533 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | ||
534 | robj->tobj.num_pages << PAGE_SHIFT); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | ||
539 | { | ||
540 | struct radeon_device *rdev = robj->rdev; | ||
541 | struct radeon_surface_reg *reg; | ||
542 | |||
543 | if (robj->surface_reg == -1) | ||
544 | return; | ||
545 | |||
546 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
547 | radeon_clear_surface_reg(rdev, robj->surface_reg); | ||
548 | |||
549 | reg->robj = NULL; | ||
550 | robj->surface_reg = -1; | ||
551 | } | ||
552 | |||
553 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
554 | uint32_t tiling_flags, uint32_t pitch) | ||
555 | { | ||
556 | robj->tiling_flags = tiling_flags; | ||
557 | robj->pitch = pitch; | ||
558 | } | ||
559 | |||
560 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | ||
561 | uint32_t *tiling_flags, | ||
562 | uint32_t *pitch) | ||
563 | { | ||
564 | if (tiling_flags) | ||
565 | *tiling_flags = robj->tiling_flags; | ||
566 | if (pitch) | ||
567 | *pitch = robj->pitch; | ||
568 | } | ||
569 | |||
570 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | ||
571 | bool force_drop) | ||
572 | { | ||
573 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | ||
574 | return 0; | ||
575 | |||
576 | if (force_drop) { | ||
577 | radeon_object_clear_surface_reg(robj); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | ||
582 | if (!has_moved) | ||
583 | return 0; | ||
584 | |||
585 | if (robj->surface_reg >= 0) | ||
586 | radeon_object_clear_surface_reg(robj); | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | if ((robj->surface_reg >= 0) && !has_moved) | ||
591 | return 0; | ||
592 | |||
593 | return radeon_object_get_surface_reg(robj); | ||
594 | } | ||
595 | |||
596 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
597 | struct ttm_mem_reg *mem) | ||
598 | { | ||
599 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
600 | radeon_object_check_tiling(robj, 0, 1); | ||
601 | } | ||
602 | |||
603 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | ||
604 | { | ||
605 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
606 | radeon_object_check_tiling(robj, 0, 0); | ||
607 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index a853261d1881..60d159308b88 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -126,32 +126,19 @@ static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) | |||
126 | } | 126 | } |
127 | } | 127 | } |
128 | 128 | ||
129 | static void radeon_ib_cpu_flush(struct radeon_device *rdev, | ||
130 | struct radeon_ib *ib) | ||
131 | { | ||
132 | unsigned long tmp; | ||
133 | unsigned i; | ||
134 | |||
135 | /* To force CPU cache flush ugly but seems reliable */ | ||
136 | for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) { | ||
137 | tmp = readl(&ib->ptr[i]); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | 129 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
142 | { | 130 | { |
143 | int r = 0; | 131 | int r = 0; |
144 | 132 | ||
145 | mutex_lock(&rdev->ib_pool.mutex); | 133 | mutex_lock(&rdev->ib_pool.mutex); |
146 | radeon_ib_align(rdev, ib); | 134 | radeon_ib_align(rdev, ib); |
147 | radeon_ib_cpu_flush(rdev, ib); | ||
148 | if (!ib->length_dw || !rdev->cp.ready) { | 135 | if (!ib->length_dw || !rdev->cp.ready) { |
149 | /* TODO: Nothings in the ib we should report. */ | 136 | /* TODO: Nothings in the ib we should report. */ |
150 | mutex_unlock(&rdev->ib_pool.mutex); | 137 | mutex_unlock(&rdev->ib_pool.mutex); |
151 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); | 138 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
152 | return -EINVAL; | 139 | return -EINVAL; |
153 | } | 140 | } |
154 | /* 64 dwords should be enought for fence too */ | 141 | /* 64 dwords should be enough for fence too */ |
155 | r = radeon_ring_lock(rdev, 64); | 142 | r = radeon_ring_lock(rdev, 64); |
156 | if (r) { | 143 | if (r) { |
157 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); | 144 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); |
diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h new file mode 100644 index 000000000000..63a773578f17 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_share.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RADEON_SHARE_H__ | ||
29 | #define __RADEON_SHARE_H__ | ||
30 | |||
31 | void r100_vram_init_sizes(struct radeon_device *rdev); | ||
32 | |||
33 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
34 | struct drm_display_mode *mode1, | ||
35 | struct drm_display_mode *mode2); | ||
36 | |||
37 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c new file mode 100644 index 000000000000..03c33cf4e14c --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Copyright 2009 VMware, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Michel Dänzer | ||
23 | */ | ||
24 | #include <drm/drmP.h> | ||
25 | #include <drm/radeon_drm.h> | ||
26 | #include "radeon_reg.h" | ||
27 | #include "radeon.h" | ||
28 | |||
29 | |||
30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | ||
31 | void radeon_test_moves(struct radeon_device *rdev) | ||
32 | { | ||
33 | struct radeon_object *vram_obj = NULL; | ||
34 | struct radeon_object **gtt_obj = NULL; | ||
35 | struct radeon_fence *fence = NULL; | ||
36 | uint64_t gtt_addr, vram_addr; | ||
37 | unsigned i, n, size; | ||
38 | int r; | ||
39 | |||
40 | size = 1024 * 1024; | ||
41 | |||
42 | /* Number of tests = | ||
43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | ||
44 | */ | ||
45 | n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - | ||
46 | rdev->cp.ring_size) / size; | ||
47 | |||
48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | ||
49 | if (!gtt_obj) { | ||
50 | DRM_ERROR("Failed to allocate %d pointers\n", n); | ||
51 | r = 1; | ||
52 | goto out_cleanup; | ||
53 | } | ||
54 | |||
55 | r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | ||
56 | false, &vram_obj); | ||
57 | if (r) { | ||
58 | DRM_ERROR("Failed to create VRAM object\n"); | ||
59 | goto out_cleanup; | ||
60 | } | ||
61 | |||
62 | r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | ||
63 | if (r) { | ||
64 | DRM_ERROR("Failed to pin VRAM object\n"); | ||
65 | goto out_cleanup; | ||
66 | } | ||
67 | |||
68 | for (i = 0; i < n; i++) { | ||
69 | void *gtt_map, *vram_map; | ||
70 | void **gtt_start, **gtt_end; | ||
71 | void **vram_start, **vram_end; | ||
72 | |||
73 | r = radeon_object_create(rdev, NULL, size, true, | ||
74 | RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); | ||
75 | if (r) { | ||
76 | DRM_ERROR("Failed to create GTT object %d\n", i); | ||
77 | goto out_cleanup; | ||
78 | } | ||
79 | |||
80 | r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | ||
81 | if (r) { | ||
82 | DRM_ERROR("Failed to pin GTT object %d\n", i); | ||
83 | goto out_cleanup; | ||
84 | } | ||
85 | |||
86 | r = radeon_object_kmap(gtt_obj[i], >t_map); | ||
87 | if (r) { | ||
88 | DRM_ERROR("Failed to map GTT object %d\n", i); | ||
89 | goto out_cleanup; | ||
90 | } | ||
91 | |||
92 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | ||
93 | gtt_start < gtt_end; | ||
94 | gtt_start++) | ||
95 | *gtt_start = gtt_start; | ||
96 | |||
97 | radeon_object_kunmap(gtt_obj[i]); | ||
98 | |||
99 | r = radeon_fence_create(rdev, &fence); | ||
100 | if (r) { | ||
101 | DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); | ||
102 | goto out_cleanup; | ||
103 | } | ||
104 | |||
105 | r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); | ||
106 | if (r) { | ||
107 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | ||
108 | goto out_cleanup; | ||
109 | } | ||
110 | |||
111 | r = radeon_fence_wait(fence, false); | ||
112 | if (r) { | ||
113 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | ||
114 | goto out_cleanup; | ||
115 | } | ||
116 | |||
117 | radeon_fence_unref(&fence); | ||
118 | |||
119 | r = radeon_object_kmap(vram_obj, &vram_map); | ||
120 | if (r) { | ||
121 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | ||
122 | goto out_cleanup; | ||
123 | } | ||
124 | |||
125 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
126 | vram_start = vram_map, vram_end = vram_map + size; | ||
127 | vram_start < vram_end; | ||
128 | gtt_start++, vram_start++) { | ||
129 | if (*vram_start != gtt_start) { | ||
130 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | ||
131 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | ||
132 | i, *vram_start, gtt_start, gtt_map, | ||
133 | gtt_end); | ||
134 | radeon_object_kunmap(vram_obj); | ||
135 | goto out_cleanup; | ||
136 | } | ||
137 | *vram_start = vram_start; | ||
138 | } | ||
139 | |||
140 | radeon_object_kunmap(vram_obj); | ||
141 | |||
142 | r = radeon_fence_create(rdev, &fence); | ||
143 | if (r) { | ||
144 | DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); | ||
145 | goto out_cleanup; | ||
146 | } | ||
147 | |||
148 | r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); | ||
149 | if (r) { | ||
150 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | ||
151 | goto out_cleanup; | ||
152 | } | ||
153 | |||
154 | r = radeon_fence_wait(fence, false); | ||
155 | if (r) { | ||
156 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | ||
157 | goto out_cleanup; | ||
158 | } | ||
159 | |||
160 | radeon_fence_unref(&fence); | ||
161 | |||
162 | r = radeon_object_kmap(gtt_obj[i], >t_map); | ||
163 | if (r) { | ||
164 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | ||
165 | goto out_cleanup; | ||
166 | } | ||
167 | |||
168 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
169 | vram_start = vram_map, vram_end = vram_map + size; | ||
170 | gtt_start < gtt_end; | ||
171 | gtt_start++, vram_start++) { | ||
172 | if (*gtt_start != vram_start) { | ||
173 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | ||
174 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | ||
175 | i, *gtt_start, vram_start, vram_map, | ||
176 | vram_end); | ||
177 | radeon_object_kunmap(gtt_obj[i]); | ||
178 | goto out_cleanup; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | radeon_object_kunmap(gtt_obj[i]); | ||
183 | |||
184 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | ||
185 | gtt_addr - rdev->mc.gtt_location); | ||
186 | } | ||
187 | |||
188 | out_cleanup: | ||
189 | if (vram_obj) { | ||
190 | radeon_object_unpin(vram_obj); | ||
191 | radeon_object_unref(&vram_obj); | ||
192 | } | ||
193 | if (gtt_obj) { | ||
194 | for (i = 0; i < n; i++) { | ||
195 | if (gtt_obj[i]) { | ||
196 | radeon_object_unpin(gtt_obj[i]); | ||
197 | radeon_object_unref(>t_obj[i]); | ||
198 | } | ||
199 | } | ||
200 | kfree(gtt_obj); | ||
201 | } | ||
202 | if (fence) { | ||
203 | radeon_fence_unref(&fence); | ||
204 | } | ||
205 | if (r) { | ||
206 | printk(KERN_WARNING "Error while testing BO move.\n"); | ||
207 | } | ||
208 | } | ||
209 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1227a97f5169..15c3531377ed 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -355,23 +355,26 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
355 | if (!rdev->cp.ready) { | 355 | if (!rdev->cp.ready) { |
356 | /* use memcpy */ | 356 | /* use memcpy */ |
357 | DRM_ERROR("CP is not ready use memcpy.\n"); | 357 | DRM_ERROR("CP is not ready use memcpy.\n"); |
358 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 358 | goto memcpy; |
359 | } | 359 | } |
360 | 360 | ||
361 | if (old_mem->mem_type == TTM_PL_VRAM && | 361 | if (old_mem->mem_type == TTM_PL_VRAM && |
362 | new_mem->mem_type == TTM_PL_SYSTEM) { | 362 | new_mem->mem_type == TTM_PL_SYSTEM) { |
363 | return radeon_move_vram_ram(bo, evict, interruptible, | 363 | r = radeon_move_vram_ram(bo, evict, interruptible, |
364 | no_wait, new_mem); | 364 | no_wait, new_mem); |
365 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | 365 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
366 | new_mem->mem_type == TTM_PL_VRAM) { | 366 | new_mem->mem_type == TTM_PL_VRAM) { |
367 | return radeon_move_ram_vram(bo, evict, interruptible, | 367 | r = radeon_move_ram_vram(bo, evict, interruptible, |
368 | no_wait, new_mem); | 368 | no_wait, new_mem); |
369 | } else { | 369 | } else { |
370 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); | 370 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); |
371 | if (unlikely(r)) { | ||
372 | return r; | ||
373 | } | ||
374 | } | 371 | } |
372 | |||
373 | if (r) { | ||
374 | memcpy: | ||
375 | r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
376 | } | ||
377 | |||
375 | return r; | 378 | return r; |
376 | } | 379 | } |
377 | 380 | ||
@@ -429,6 +432,8 @@ static struct ttm_bo_driver radeon_bo_driver = { | |||
429 | .sync_obj_flush = &radeon_sync_obj_flush, | 432 | .sync_obj_flush = &radeon_sync_obj_flush, |
430 | .sync_obj_unref = &radeon_sync_obj_unref, | 433 | .sync_obj_unref = &radeon_sync_obj_unref, |
431 | .sync_obj_ref = &radeon_sync_obj_ref, | 434 | .sync_obj_ref = &radeon_sync_obj_ref, |
435 | .move_notify = &radeon_bo_move_notify, | ||
436 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | ||
432 | }; | 437 | }; |
433 | 438 | ||
434 | int radeon_ttm_init(struct radeon_device *rdev) | 439 | int radeon_ttm_init(struct radeon_device *rdev) |
@@ -442,13 +447,14 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
442 | /* No others user of address space so set it to 0 */ | 447 | /* No others user of address space so set it to 0 */ |
443 | r = ttm_bo_device_init(&rdev->mman.bdev, | 448 | r = ttm_bo_device_init(&rdev->mman.bdev, |
444 | rdev->mman.mem_global_ref.object, | 449 | rdev->mman.mem_global_ref.object, |
445 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); | 450 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
451 | rdev->need_dma32); | ||
446 | if (r) { | 452 | if (r) { |
447 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 453 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
448 | return r; | 454 | return r; |
449 | } | 455 | } |
450 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, | 456 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, |
451 | ((rdev->mc.aper_size) >> PAGE_SHIFT)); | 457 | ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); |
452 | if (r) { | 458 | if (r) { |
453 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 459 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
454 | return r; | 460 | return r; |
@@ -465,7 +471,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
465 | return r; | 471 | return r; |
466 | } | 472 | } |
467 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | 473 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
468 | rdev->mc.vram_size / (1024 * 1024)); | 474 | rdev->mc.real_vram_size / (1024 * 1024)); |
469 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, | 475 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, |
470 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); | 476 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
471 | if (r) { | 477 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index cc074b5a8f74..b29affd9c5d8 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_share.h" | ||
32 | 33 | ||
33 | /* rs400,rs480 depends on : */ | 34 | /* rs400,rs480 depends on : */ |
34 | void r100_hdp_reset(struct radeon_device *rdev); | 35 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -164,7 +165,9 @@ int rs400_gart_enable(struct radeon_device *rdev) | |||
164 | WREG32(RADEON_BUS_CNTL, tmp); | 165 | WREG32(RADEON_BUS_CNTL, tmp); |
165 | } | 166 | } |
166 | /* Table should be in 32bits address space so ignore bits above. */ | 167 | /* Table should be in 32bits address space so ignore bits above. */ |
167 | tmp = rdev->gart.table_addr & 0xfffff000; | 168 | tmp = (u32)rdev->gart.table_addr & 0xfffff000; |
169 | tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; | ||
170 | |||
168 | WREG32_MC(RS480_GART_BASE, tmp); | 171 | WREG32_MC(RS480_GART_BASE, tmp); |
169 | /* TODO: more tweaking here */ | 172 | /* TODO: more tweaking here */ |
170 | WREG32_MC(RS480_GART_FEATURE_ID, | 173 | WREG32_MC(RS480_GART_FEATURE_ID, |
@@ -201,10 +204,17 @@ void rs400_gart_disable(struct radeon_device *rdev) | |||
201 | 204 | ||
202 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 205 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
203 | { | 206 | { |
207 | uint32_t entry; | ||
208 | |||
204 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 209 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
205 | return -EINVAL; | 210 | return -EINVAL; |
206 | } | 211 | } |
207 | rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); | 212 | |
213 | entry = (lower_32_bits(addr) & PAGE_MASK) | | ||
214 | ((upper_32_bits(addr) & 0xff) << 4) | | ||
215 | 0xc; | ||
216 | entry = cpu_to_le32(entry); | ||
217 | rdev->gart.table.ram.ptr[i] = entry; | ||
208 | return 0; | 218 | return 0; |
209 | } | 219 | } |
210 | 220 | ||
@@ -223,10 +233,9 @@ int rs400_mc_init(struct radeon_device *rdev) | |||
223 | 233 | ||
224 | rs400_gpu_init(rdev); | 234 | rs400_gpu_init(rdev); |
225 | rs400_gart_disable(rdev); | 235 | rs400_gart_disable(rdev); |
226 | rdev->mc.gtt_location = rdev->mc.vram_size; | 236 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
227 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | 237 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
228 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | 238 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
229 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
230 | r = radeon_mc_setup(rdev); | 239 | r = radeon_mc_setup(rdev); |
231 | if (r) { | 240 | if (r) { |
232 | return r; | 241 | return r; |
@@ -238,7 +247,7 @@ int rs400_mc_init(struct radeon_device *rdev) | |||
238 | "programming pipes. Bad things might happen.\n"); | 247 | "programming pipes. Bad things might happen.\n"); |
239 | } | 248 | } |
240 | 249 | ||
241 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 250 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
242 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | 251 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
243 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | 252 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
244 | WREG32(RADEON_MC_FB_LOCATION, tmp); | 253 | WREG32(RADEON_MC_FB_LOCATION, tmp); |
@@ -284,21 +293,12 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
284 | */ | 293 | */ |
285 | void rs400_vram_info(struct radeon_device *rdev) | 294 | void rs400_vram_info(struct radeon_device *rdev) |
286 | { | 295 | { |
287 | uint32_t tom; | ||
288 | |||
289 | rs400_gart_adjust_size(rdev); | 296 | rs400_gart_adjust_size(rdev); |
290 | /* DDR for all card after R300 & IGP */ | 297 | /* DDR for all card after R300 & IGP */ |
291 | rdev->mc.vram_is_ddr = true; | 298 | rdev->mc.vram_is_ddr = true; |
292 | rdev->mc.vram_width = 128; | 299 | rdev->mc.vram_width = 128; |
293 | 300 | ||
294 | /* read NB_TOM to get the amount of ram stolen for the GPU */ | 301 | r100_vram_init_sizes(rdev); |
295 | tom = RREG32(RADEON_NB_TOM); | ||
296 | rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); | ||
297 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | ||
298 | |||
299 | /* Could aper size report 0 ? */ | ||
300 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | ||
301 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
302 | } | 302 | } |
303 | 303 | ||
304 | 304 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index ab0c967553e6..bbea6dee4a94 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -223,7 +223,7 @@ int rs600_mc_init(struct radeon_device *rdev) | |||
223 | printk(KERN_WARNING "Failed to wait MC idle while " | 223 | printk(KERN_WARNING "Failed to wait MC idle while " |
224 | "programming pipes. Bad things might happen.\n"); | 224 | "programming pipes. Bad things might happen.\n"); |
225 | } | 225 | } |
226 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 226 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
227 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); | 227 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); |
228 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); | 228 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); |
229 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); | 229 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); |
@@ -301,6 +301,11 @@ void rs600_vram_info(struct radeon_device *rdev) | |||
301 | rdev->mc.vram_width = 128; | 301 | rdev->mc.vram_width = 128; |
302 | } | 302 | } |
303 | 303 | ||
304 | void rs600_bandwidth_update(struct radeon_device *rdev) | ||
305 | { | ||
306 | /* FIXME: implement, should this be like rs690 ? */ | ||
307 | } | ||
308 | |||
304 | 309 | ||
305 | /* | 310 | /* |
306 | * Indirect registers accessor | 311 | * Indirect registers accessor |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 79ba85042b5f..839595b00728 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -28,6 +28,9 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "rs690r.h" | ||
32 | #include "atom.h" | ||
33 | #include "atom-bits.h" | ||
31 | 34 | ||
32 | /* rs690,rs740 depends on : */ | 35 | /* rs690,rs740 depends on : */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); | 36 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -64,7 +67,7 @@ int rs690_mc_init(struct radeon_device *rdev) | |||
64 | rs400_gart_disable(rdev); | 67 | rs400_gart_disable(rdev); |
65 | 68 | ||
66 | /* Setup GPU memory space */ | 69 | /* Setup GPU memory space */ |
67 | rdev->mc.gtt_location = rdev->mc.vram_size; | 70 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
68 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | 71 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
69 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | 72 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
70 | rdev->mc.vram_location = 0xFFFFFFFFUL; | 73 | rdev->mc.vram_location = 0xFFFFFFFFUL; |
@@ -79,7 +82,7 @@ int rs690_mc_init(struct radeon_device *rdev) | |||
79 | printk(KERN_WARNING "Failed to wait MC idle while " | 82 | printk(KERN_WARNING "Failed to wait MC idle while " |
80 | "programming pipes. Bad things might happen.\n"); | 83 | "programming pipes. Bad things might happen.\n"); |
81 | } | 84 | } |
82 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 85 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
83 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); | 86 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); |
84 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); | 87 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); |
85 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); | 88 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); |
@@ -138,9 +141,82 @@ void rs690_gpu_init(struct radeon_device *rdev) | |||
138 | /* | 141 | /* |
139 | * VRAM info. | 142 | * VRAM info. |
140 | */ | 143 | */ |
144 | void rs690_pm_info(struct radeon_device *rdev) | ||
145 | { | ||
146 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
147 | struct _ATOM_INTEGRATED_SYSTEM_INFO *info; | ||
148 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; | ||
149 | void *ptr; | ||
150 | uint16_t data_offset; | ||
151 | uint8_t frev, crev; | ||
152 | fixed20_12 tmp; | ||
153 | |||
154 | atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, | ||
155 | &frev, &crev, &data_offset); | ||
156 | ptr = rdev->mode_info.atom_context->bios + data_offset; | ||
157 | info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; | ||
158 | info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; | ||
159 | /* Get various system informations from bios */ | ||
160 | switch (crev) { | ||
161 | case 1: | ||
162 | tmp.full = rfixed_const(100); | ||
163 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); | ||
164 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | ||
165 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); | ||
166 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); | ||
167 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); | ||
168 | break; | ||
169 | case 2: | ||
170 | tmp.full = rfixed_const(100); | ||
171 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); | ||
172 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | ||
173 | rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); | ||
174 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
175 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); | ||
176 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | ||
177 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); | ||
178 | break; | ||
179 | default: | ||
180 | tmp.full = rfixed_const(100); | ||
181 | /* We assume the slower possible clock ie worst case */ | ||
182 | /* DDR 333Mhz */ | ||
183 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | ||
184 | /* FIXME: system clock ? */ | ||
185 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | ||
186 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
187 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | ||
188 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | ||
189 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | ||
190 | break; | ||
191 | } | ||
192 | /* Compute various bandwidth */ | ||
193 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | ||
194 | tmp.full = rfixed_const(4); | ||
195 | rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); | ||
196 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 | ||
197 | * = ht_clk * ht_width / 5 | ||
198 | */ | ||
199 | tmp.full = rfixed_const(5); | ||
200 | rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, | ||
201 | rdev->pm.igp_ht_link_width); | ||
202 | rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); | ||
203 | if (tmp.full < rdev->pm.max_bandwidth.full) { | ||
204 | /* HT link is a limiting factor */ | ||
205 | rdev->pm.max_bandwidth.full = tmp.full; | ||
206 | } | ||
207 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 | ||
208 | * = (sideport_clk * 14) / 10 | ||
209 | */ | ||
210 | tmp.full = rfixed_const(14); | ||
211 | rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); | ||
212 | tmp.full = rfixed_const(10); | ||
213 | rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); | ||
214 | } | ||
215 | |||
141 | void rs690_vram_info(struct radeon_device *rdev) | 216 | void rs690_vram_info(struct radeon_device *rdev) |
142 | { | 217 | { |
143 | uint32_t tmp; | 218 | uint32_t tmp; |
219 | fixed20_12 a; | ||
144 | 220 | ||
145 | rs400_gart_adjust_size(rdev); | 221 | rs400_gart_adjust_size(rdev); |
146 | /* DDR for all card after R300 & IGP */ | 222 | /* DDR for all card after R300 & IGP */ |
@@ -152,12 +228,409 @@ void rs690_vram_info(struct radeon_device *rdev) | |||
152 | } else { | 228 | } else { |
153 | rdev->mc.vram_width = 64; | 229 | rdev->mc.vram_width = 64; |
154 | } | 230 | } |
155 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 231 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
232 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
156 | 233 | ||
157 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 234 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
158 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 235 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
236 | rs690_pm_info(rdev); | ||
237 | /* FIXME: we should enforce default clock in case GPU is not in | ||
238 | * default setup | ||
239 | */ | ||
240 | a.full = rfixed_const(100); | ||
241 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
242 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
243 | a.full = rfixed_const(16); | ||
244 | /* core_bandwidth = sclk(Mhz) * 16 */ | ||
245 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | ||
246 | } | ||
247 | |||
248 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
249 | struct drm_display_mode *mode1, | ||
250 | struct drm_display_mode *mode2) | ||
251 | { | ||
252 | u32 tmp; | ||
253 | |||
254 | /* | ||
255 | * Line Buffer Setup | ||
256 | * There is a single line buffer shared by both display controllers. | ||
257 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | ||
258 | * the display controllers. The paritioning can either be done | ||
259 | * manually or via one of four preset allocations specified in bits 1:0: | ||
260 | * 0 - line buffer is divided in half and shared between crtc | ||
261 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | ||
262 | * 2 - D1 gets the whole buffer | ||
263 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | ||
264 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual | ||
265 | * allocation mode. In manual allocation mode, D1 always starts at 0, | ||
266 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | ||
267 | */ | ||
268 | tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; | ||
269 | tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
270 | /* auto */ | ||
271 | if (mode1 && mode2) { | ||
272 | if (mode1->hdisplay > mode2->hdisplay) { | ||
273 | if (mode1->hdisplay > 2560) | ||
274 | tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | ||
275 | else | ||
276 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
277 | } else if (mode2->hdisplay > mode1->hdisplay) { | ||
278 | if (mode2->hdisplay > 2560) | ||
279 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
280 | else | ||
281 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
282 | } else | ||
283 | tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
284 | } else if (mode1) { | ||
285 | tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; | ||
286 | } else if (mode2) { | ||
287 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
288 | } | ||
289 | WREG32(DC_LB_MEMORY_SPLIT, tmp); | ||
159 | } | 290 | } |
160 | 291 | ||
292 | struct rs690_watermark { | ||
293 | u32 lb_request_fifo_depth; | ||
294 | fixed20_12 num_line_pair; | ||
295 | fixed20_12 estimated_width; | ||
296 | fixed20_12 worst_case_latency; | ||
297 | fixed20_12 consumption_rate; | ||
298 | fixed20_12 active_time; | ||
299 | fixed20_12 dbpp; | ||
300 | fixed20_12 priority_mark_max; | ||
301 | fixed20_12 priority_mark; | ||
302 | fixed20_12 sclk; | ||
303 | }; | ||
304 | |||
305 | void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | ||
306 | struct radeon_crtc *crtc, | ||
307 | struct rs690_watermark *wm) | ||
308 | { | ||
309 | struct drm_display_mode *mode = &crtc->base.mode; | ||
310 | fixed20_12 a, b, c; | ||
311 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | ||
312 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | ||
313 | /* FIXME: detect IGP with sideport memory, i don't think there is any | ||
314 | * such product available | ||
315 | */ | ||
316 | bool sideport = false; | ||
317 | |||
318 | if (!crtc->base.enabled) { | ||
319 | /* FIXME: wouldn't it better to set priority mark to maximum */ | ||
320 | wm->lb_request_fifo_depth = 4; | ||
321 | return; | ||
322 | } | ||
323 | |||
324 | if (crtc->vsc.full > rfixed_const(2)) | ||
325 | wm->num_line_pair.full = rfixed_const(2); | ||
326 | else | ||
327 | wm->num_line_pair.full = rfixed_const(1); | ||
328 | |||
329 | b.full = rfixed_const(mode->crtc_hdisplay); | ||
330 | c.full = rfixed_const(256); | ||
331 | a.full = rfixed_mul(wm->num_line_pair, b); | ||
332 | request_fifo_depth.full = rfixed_div(a, c); | ||
333 | if (a.full < rfixed_const(4)) { | ||
334 | wm->lb_request_fifo_depth = 4; | ||
335 | } else { | ||
336 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | ||
337 | } | ||
338 | |||
339 | /* Determine consumption rate | ||
340 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | ||
341 | * vtaps = number of vertical taps, | ||
342 | * vsc = vertical scaling ratio, defined as source/destination | ||
343 | * hsc = horizontal scaling ration, defined as source/destination | ||
344 | */ | ||
345 | a.full = rfixed_const(mode->clock); | ||
346 | b.full = rfixed_const(1000); | ||
347 | a.full = rfixed_div(a, b); | ||
348 | pclk.full = rfixed_div(b, a); | ||
349 | if (crtc->rmx_type != RMX_OFF) { | ||
350 | b.full = rfixed_const(2); | ||
351 | if (crtc->vsc.full > b.full) | ||
352 | b.full = crtc->vsc.full; | ||
353 | b.full = rfixed_mul(b, crtc->hsc); | ||
354 | c.full = rfixed_const(2); | ||
355 | b.full = rfixed_div(b, c); | ||
356 | consumption_time.full = rfixed_div(pclk, b); | ||
357 | } else { | ||
358 | consumption_time.full = pclk.full; | ||
359 | } | ||
360 | a.full = rfixed_const(1); | ||
361 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | ||
362 | |||
363 | |||
364 | /* Determine line time | ||
365 | * LineTime = total time for one line of displayhtotal | ||
366 | * LineTime = total number of horizontal pixels | ||
367 | * pclk = pixel clock period(ns) | ||
368 | */ | ||
369 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
370 | line_time.full = rfixed_mul(a, pclk); | ||
371 | |||
372 | /* Determine active time | ||
373 | * ActiveTime = time of active region of display within one line, | ||
374 | * hactive = total number of horizontal active pixels | ||
375 | * htotal = total number of horizontal pixels | ||
376 | */ | ||
377 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
378 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
379 | wm->active_time.full = rfixed_mul(line_time, b); | ||
380 | wm->active_time.full = rfixed_div(wm->active_time, a); | ||
381 | |||
382 | /* Maximun bandwidth is the minimun bandwidth of all component */ | ||
383 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | ||
384 | if (sideport) { | ||
385 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | ||
386 | rdev->pm.sideport_bandwidth.full) | ||
387 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | ||
388 | read_delay_latency.full = rfixed_const(370 * 800 * 1000); | ||
389 | read_delay_latency.full = rfixed_div(read_delay_latency, | ||
390 | rdev->pm.igp_sideport_mclk); | ||
391 | } else { | ||
392 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && | ||
393 | rdev->pm.k8_bandwidth.full) | ||
394 | rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; | ||
395 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && | ||
396 | rdev->pm.ht_bandwidth.full) | ||
397 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; | ||
398 | read_delay_latency.full = rfixed_const(5000); | ||
399 | } | ||
400 | |||
401 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ | ||
402 | a.full = rfixed_const(16); | ||
403 | rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); | ||
404 | a.full = rfixed_const(1000); | ||
405 | rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); | ||
406 | /* Determine chunk time | ||
407 | * ChunkTime = the time it takes the DCP to send one chunk of data | ||
408 | * to the LB which consists of pipeline delay and inter chunk gap | ||
409 | * sclk = system clock(ns) | ||
410 | */ | ||
411 | a.full = rfixed_const(256 * 13); | ||
412 | chunk_time.full = rfixed_mul(rdev->pm.sclk, a); | ||
413 | a.full = rfixed_const(10); | ||
414 | chunk_time.full = rfixed_div(chunk_time, a); | ||
415 | |||
416 | /* Determine the worst case latency | ||
417 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | ||
418 | * WorstCaseLatency = worst case time from urgent to when the MC starts | ||
419 | * to return data | ||
420 | * READ_DELAY_IDLE_MAX = constant of 1us | ||
421 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | ||
422 | * which consists of pipeline delay and inter chunk gap | ||
423 | */ | ||
424 | if (rfixed_trunc(wm->num_line_pair) > 1) { | ||
425 | a.full = rfixed_const(3); | ||
426 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
427 | wm->worst_case_latency.full += read_delay_latency.full; | ||
428 | } else { | ||
429 | a.full = rfixed_const(2); | ||
430 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
431 | wm->worst_case_latency.full += read_delay_latency.full; | ||
432 | } | ||
433 | |||
434 | /* Determine the tolerable latency | ||
435 | * TolerableLatency = Any given request has only 1 line time | ||
436 | * for the data to be returned | ||
437 | * LBRequestFifoDepth = Number of chunk requests the LB can | ||
438 | * put into the request FIFO for a display | ||
439 | * LineTime = total time for one line of display | ||
440 | * ChunkTime = the time it takes the DCP to send one chunk | ||
441 | * of data to the LB which consists of | ||
442 | * pipeline delay and inter chunk gap | ||
443 | */ | ||
444 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | ||
445 | tolerable_latency.full = line_time.full; | ||
446 | } else { | ||
447 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | ||
448 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | ||
449 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | ||
450 | tolerable_latency.full = line_time.full - tolerable_latency.full; | ||
451 | } | ||
452 | /* We assume worst case 32bits (4 bytes) */ | ||
453 | wm->dbpp.full = rfixed_const(4 * 8); | ||
454 | |||
455 | /* Determine the maximum priority mark | ||
456 | * width = viewport width in pixels | ||
457 | */ | ||
458 | a.full = rfixed_const(16); | ||
459 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
460 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | ||
461 | |||
462 | /* Determine estimated width */ | ||
463 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | ||
464 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | ||
465 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | ||
466 | wm->priority_mark.full = rfixed_const(10); | ||
467 | } else { | ||
468 | a.full = rfixed_const(16); | ||
469 | wm->priority_mark.full = rfixed_div(estimated_width, a); | ||
470 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | ||
471 | } | ||
472 | } | ||
473 | |||
474 | void rs690_bandwidth_update(struct radeon_device *rdev) | ||
475 | { | ||
476 | struct drm_display_mode *mode0 = NULL; | ||
477 | struct drm_display_mode *mode1 = NULL; | ||
478 | struct rs690_watermark wm0; | ||
479 | struct rs690_watermark wm1; | ||
480 | u32 tmp; | ||
481 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | ||
482 | fixed20_12 a, b; | ||
483 | |||
484 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
485 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
486 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
487 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
488 | /* | ||
489 | * Set display0/1 priority up in the memory controller for | ||
490 | * modes if the user specifies HIGH for displaypriority | ||
491 | * option. | ||
492 | */ | ||
493 | if (rdev->disp_priority == 2) { | ||
494 | tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); | ||
495 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | ||
496 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | ||
497 | if (mode1) | ||
498 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
499 | if (mode0) | ||
500 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | ||
501 | WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); | ||
502 | } | ||
503 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
504 | |||
505 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | ||
506 | WREG32(DCP_CONTROL, 0); | ||
507 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | ||
508 | WREG32(DCP_CONTROL, 2); | ||
509 | |||
510 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | ||
511 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | ||
512 | |||
513 | tmp = (wm0.lb_request_fifo_depth - 1); | ||
514 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | ||
515 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | ||
516 | |||
517 | if (mode0 && mode1) { | ||
518 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
519 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | ||
520 | else | ||
521 | a.full = wm0.num_line_pair.full; | ||
522 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
523 | b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | ||
524 | else | ||
525 | b.full = wm1.num_line_pair.full; | ||
526 | a.full += b.full; | ||
527 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
528 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
529 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
530 | b.full = rfixed_mul(b, wm0.active_time); | ||
531 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
532 | wm0.consumption_rate); | ||
533 | a.full = a.full + b.full; | ||
534 | b.full = rfixed_const(16 * 1000); | ||
535 | priority_mark02.full = rfixed_div(a, b); | ||
536 | } else { | ||
537 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
538 | wm0.consumption_rate); | ||
539 | b.full = rfixed_const(16 * 1000); | ||
540 | priority_mark02.full = rfixed_div(a, b); | ||
541 | } | ||
542 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
543 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
544 | b.full = rfixed_mul(b, wm1.active_time); | ||
545 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
546 | wm1.consumption_rate); | ||
547 | a.full = a.full + b.full; | ||
548 | b.full = rfixed_const(16 * 1000); | ||
549 | priority_mark12.full = rfixed_div(a, b); | ||
550 | } else { | ||
551 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
552 | wm1.consumption_rate); | ||
553 | b.full = rfixed_const(16 * 1000); | ||
554 | priority_mark12.full = rfixed_div(a, b); | ||
555 | } | ||
556 | if (wm0.priority_mark.full > priority_mark02.full) | ||
557 | priority_mark02.full = wm0.priority_mark.full; | ||
558 | if (rfixed_trunc(priority_mark02) < 0) | ||
559 | priority_mark02.full = 0; | ||
560 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
561 | priority_mark02.full = wm0.priority_mark_max.full; | ||
562 | if (wm1.priority_mark.full > priority_mark12.full) | ||
563 | priority_mark12.full = wm1.priority_mark.full; | ||
564 | if (rfixed_trunc(priority_mark12) < 0) | ||
565 | priority_mark12.full = 0; | ||
566 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
567 | priority_mark12.full = wm1.priority_mark_max.full; | ||
568 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
569 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
570 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
571 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
572 | } else if (mode0) { | ||
573 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
574 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | ||
575 | else | ||
576 | a.full = wm0.num_line_pair.full; | ||
577 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
578 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
579 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
580 | b.full = rfixed_mul(b, wm0.active_time); | ||
581 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
582 | wm0.consumption_rate); | ||
583 | a.full = a.full + b.full; | ||
584 | b.full = rfixed_const(16 * 1000); | ||
585 | priority_mark02.full = rfixed_div(a, b); | ||
586 | } else { | ||
587 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
588 | wm0.consumption_rate); | ||
589 | b.full = rfixed_const(16 * 1000); | ||
590 | priority_mark02.full = rfixed_div(a, b); | ||
591 | } | ||
592 | if (wm0.priority_mark.full > priority_mark02.full) | ||
593 | priority_mark02.full = wm0.priority_mark.full; | ||
594 | if (rfixed_trunc(priority_mark02) < 0) | ||
595 | priority_mark02.full = 0; | ||
596 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
597 | priority_mark02.full = wm0.priority_mark_max.full; | ||
598 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
599 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
600 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
601 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
602 | } else { | ||
603 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
604 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | ||
605 | else | ||
606 | a.full = wm1.num_line_pair.full; | ||
607 | fill_rate.full = rfixed_div(wm1.sclk, a); | ||
608 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
609 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
610 | b.full = rfixed_mul(b, wm1.active_time); | ||
611 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
612 | wm1.consumption_rate); | ||
613 | a.full = a.full + b.full; | ||
614 | b.full = rfixed_const(16 * 1000); | ||
615 | priority_mark12.full = rfixed_div(a, b); | ||
616 | } else { | ||
617 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
618 | wm1.consumption_rate); | ||
619 | b.full = rfixed_const(16 * 1000); | ||
620 | priority_mark12.full = rfixed_div(a, b); | ||
621 | } | ||
622 | if (wm1.priority_mark.full > priority_mark12.full) | ||
623 | priority_mark12.full = wm1.priority_mark.full; | ||
624 | if (rfixed_trunc(priority_mark12) < 0) | ||
625 | priority_mark12.full = 0; | ||
626 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
627 | priority_mark12.full = wm1.priority_mark_max.full; | ||
628 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
629 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
630 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
631 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
632 | } | ||
633 | } | ||
161 | 634 | ||
162 | /* | 635 | /* |
163 | * Indirect registers accessor | 636 | * Indirect registers accessor |
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h new file mode 100644 index 000000000000..c0d9faa2175b --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690r.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef RS690R_H | ||
29 | #define RS690R_H | ||
30 | |||
31 | /* RS690/RS740 registers */ | ||
32 | #define MC_INDEX 0x0078 | ||
33 | # define MC_INDEX_MASK 0x1FF | ||
34 | # define MC_INDEX_WR_EN (1 << 9) | ||
35 | # define MC_INDEX_WR_ACK 0x7F | ||
36 | #define MC_DATA 0x007C | ||
37 | #define HDP_FB_LOCATION 0x0134 | ||
38 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
39 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
40 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
41 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
42 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
43 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
44 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
45 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
46 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
47 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
48 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
49 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
50 | #define MODE_PRIORITY_OFF (1 << 16) | ||
51 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
52 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
53 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
54 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
55 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
56 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
57 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
58 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
59 | #define DCP_CONTROL 0x6C9C | ||
60 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
61 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
62 | |||
63 | /* MC indirect registers */ | ||
64 | #define MC_STATUS_IDLE (1 << 0) | ||
65 | #define MC_MISC_CNTL 0x18 | ||
66 | #define DISABLE_GTW (1 << 1) | ||
67 | #define GART_INDEX_REG_EN (1 << 12) | ||
68 | #define BLOCK_GFX_D3_EN (1 << 14) | ||
69 | #define GART_FEATURE_ID 0x2B | ||
70 | #define HANG_EN (1 << 11) | ||
71 | #define TLB_ENABLE (1 << 18) | ||
72 | #define P2P_ENABLE (1 << 19) | ||
73 | #define GTW_LAC_EN (1 << 25) | ||
74 | #define LEVEL2_GART (0 << 30) | ||
75 | #define LEVEL1_GART (1 << 30) | ||
76 | #define PDC_EN (1 << 31) | ||
77 | #define GART_BASE 0x2C | ||
78 | #define GART_CACHE_CNTRL 0x2E | ||
79 | # define GART_CACHE_INVALIDATE (1 << 0) | ||
80 | #define MC_STATUS 0x90 | ||
81 | #define MCCFG_FB_LOCATION 0x100 | ||
82 | #define MC_FB_START_MASK 0x0000FFFF | ||
83 | #define MC_FB_START_SHIFT 0 | ||
84 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
85 | #define MC_FB_TOP_SHIFT 16 | ||
86 | #define MCCFG_AGP_LOCATION 0x101 | ||
87 | #define MC_AGP_START_MASK 0x0000FFFF | ||
88 | #define MC_AGP_START_SHIFT 0 | ||
89 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
90 | #define MC_AGP_TOP_SHIFT 16 | ||
91 | #define MCCFG_AGP_BASE 0x102 | ||
92 | #define MCCFG_AGP_BASE_2 0x103 | ||
93 | #define MC_INIT_MISC_LAT_TIMER 0x104 | ||
94 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
95 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
96 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
97 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
98 | |||
99 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ffea37b1b3e2..551e608702e4 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -27,8 +27,9 @@ | |||
27 | */ | 27 | */ |
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "radeon_reg.h" | 30 | #include "rv515r.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_share.h" | ||
32 | 33 | ||
33 | /* rv515 depends on : */ | 34 | /* rv515 depends on : */ |
34 | void r100_hdp_reset(struct radeon_device *rdev); | 35 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -99,26 +100,26 @@ int rv515_mc_init(struct radeon_device *rdev) | |||
99 | "programming pipes. Bad things might happen.\n"); | 100 | "programming pipes. Bad things might happen.\n"); |
100 | } | 101 | } |
101 | /* Write VRAM size in case we are limiting it */ | 102 | /* Write VRAM size in case we are limiting it */ |
102 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 103 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
103 | tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); | 104 | tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
104 | WREG32(0x134, tmp); | 105 | WREG32(0x134, tmp); |
105 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 106 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
106 | tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); | 107 | tmp = REG_SET(MC_FB_TOP, tmp >> 16); |
107 | tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); | 108 | tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
108 | WREG32_MC(RV515_MC_FB_LOCATION, tmp); | 109 | WREG32_MC(MC_FB_LOCATION, tmp); |
109 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | 110 | WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16); |
110 | WREG32(0x310, rdev->mc.vram_location); | 111 | WREG32(0x310, rdev->mc.vram_location); |
111 | if (rdev->flags & RADEON_IS_AGP) { | 112 | if (rdev->flags & RADEON_IS_AGP) { |
112 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 113 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
113 | tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); | 114 | tmp = REG_SET(MC_AGP_TOP, tmp >> 16); |
114 | tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); | 115 | tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16); |
115 | WREG32_MC(RV515_MC_AGP_LOCATION, tmp); | 116 | WREG32_MC(MC_AGP_LOCATION, tmp); |
116 | WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); | 117 | WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base); |
117 | WREG32_MC(RV515_MC_AGP_BASE_2, 0); | 118 | WREG32_MC(MC_AGP_BASE_2, 0); |
118 | } else { | 119 | } else { |
119 | WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); | 120 | WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF); |
120 | WREG32_MC(RV515_MC_AGP_BASE, 0); | 121 | WREG32_MC(MC_AGP_BASE, 0); |
121 | WREG32_MC(RV515_MC_AGP_BASE_2, 0); | 122 | WREG32_MC(MC_AGP_BASE_2, 0); |
122 | } | 123 | } |
123 | return 0; | 124 | return 0; |
124 | } | 125 | } |
@@ -136,95 +137,67 @@ void rv515_mc_fini(struct radeon_device *rdev) | |||
136 | */ | 137 | */ |
137 | void rv515_ring_start(struct radeon_device *rdev) | 138 | void rv515_ring_start(struct radeon_device *rdev) |
138 | { | 139 | { |
139 | unsigned gb_tile_config; | ||
140 | int r; | 140 | int r; |
141 | 141 | ||
142 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ | ||
143 | gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16; | ||
144 | switch (rdev->num_gb_pipes) { | ||
145 | case 2: | ||
146 | gb_tile_config |= R300_PIPE_COUNT_R300; | ||
147 | break; | ||
148 | case 3: | ||
149 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; | ||
150 | break; | ||
151 | case 4: | ||
152 | gb_tile_config |= R300_PIPE_COUNT_R420; | ||
153 | break; | ||
154 | case 1: | ||
155 | default: | ||
156 | gb_tile_config |= R300_PIPE_COUNT_RV350; | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | r = radeon_ring_lock(rdev, 64); | 142 | r = radeon_ring_lock(rdev, 64); |
161 | if (r) { | 143 | if (r) { |
162 | return; | 144 | return; |
163 | } | 145 | } |
164 | radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); | 146 | radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0)); |
165 | radeon_ring_write(rdev, | ||
166 | RADEON_ISYNC_ANY2D_IDLE3D | | ||
167 | RADEON_ISYNC_ANY3D_IDLE2D | | ||
168 | RADEON_ISYNC_WAIT_IDLEGUI | | ||
169 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | ||
170 | radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); | ||
171 | radeon_ring_write(rdev, gb_tile_config); | ||
172 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
173 | radeon_ring_write(rdev, | 147 | radeon_ring_write(rdev, |
174 | RADEON_WAIT_2D_IDLECLEAN | | 148 | ISYNC_ANY2D_IDLE3D | |
175 | RADEON_WAIT_3D_IDLECLEAN); | 149 | ISYNC_ANY3D_IDLE2D | |
150 | ISYNC_WAIT_IDLEGUI | | ||
151 | ISYNC_CPSCRATCH_IDLEGUI); | ||
152 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); | ||
153 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | ||
176 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); | 154 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); |
177 | radeon_ring_write(rdev, 1 << 31); | 155 | radeon_ring_write(rdev, 1 << 31); |
178 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); | 156 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); |
179 | radeon_ring_write(rdev, 0); | 157 | radeon_ring_write(rdev, 0); |
180 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); | 158 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); |
181 | radeon_ring_write(rdev, 0); | 159 | radeon_ring_write(rdev, 0); |
182 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); | 160 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); |
183 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); | 161 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); |
184 | radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); | 162 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); |
185 | radeon_ring_write(rdev, 0); | 163 | radeon_ring_write(rdev, 0); |
186 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 164 | radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
187 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 165 | radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
188 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 166 | radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
189 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); | 167 | radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
190 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | 168 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
191 | radeon_ring_write(rdev, | 169 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
192 | RADEON_WAIT_2D_IDLECLEAN | | 170 | radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0)); |
193 | RADEON_WAIT_3D_IDLECLEAN); | ||
194 | radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); | ||
195 | radeon_ring_write(rdev, 0); | 171 | radeon_ring_write(rdev, 0); |
196 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 172 | radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
197 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 173 | radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
198 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 174 | radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
199 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); | 175 | radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
200 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); | 176 | radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0)); |
201 | radeon_ring_write(rdev, | ||
202 | ((6 << R300_MS_X0_SHIFT) | | ||
203 | (6 << R300_MS_Y0_SHIFT) | | ||
204 | (6 << R300_MS_X1_SHIFT) | | ||
205 | (6 << R300_MS_Y1_SHIFT) | | ||
206 | (6 << R300_MS_X2_SHIFT) | | ||
207 | (6 << R300_MS_Y2_SHIFT) | | ||
208 | (6 << R300_MSBD0_Y_SHIFT) | | ||
209 | (6 << R300_MSBD0_X_SHIFT))); | ||
210 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); | ||
211 | radeon_ring_write(rdev, | 177 | radeon_ring_write(rdev, |
212 | ((6 << R300_MS_X3_SHIFT) | | 178 | ((6 << MS_X0_SHIFT) | |
213 | (6 << R300_MS_Y3_SHIFT) | | 179 | (6 << MS_Y0_SHIFT) | |
214 | (6 << R300_MS_X4_SHIFT) | | 180 | (6 << MS_X1_SHIFT) | |
215 | (6 << R300_MS_Y4_SHIFT) | | 181 | (6 << MS_Y1_SHIFT) | |
216 | (6 << R300_MS_X5_SHIFT) | | 182 | (6 << MS_X2_SHIFT) | |
217 | (6 << R300_MS_Y5_SHIFT) | | 183 | (6 << MS_Y2_SHIFT) | |
218 | (6 << R300_MSBD1_SHIFT))); | 184 | (6 << MSBD0_Y_SHIFT) | |
219 | radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); | 185 | (6 << MSBD0_X_SHIFT))); |
220 | radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); | 186 | radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0)); |
221 | radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); | ||
222 | radeon_ring_write(rdev, | 187 | radeon_ring_write(rdev, |
223 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); | 188 | ((6 << MS_X3_SHIFT) | |
224 | radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); | 189 | (6 << MS_Y3_SHIFT) | |
225 | radeon_ring_write(rdev, | 190 | (6 << MS_X4_SHIFT) | |
226 | R300_GEOMETRY_ROUND_NEAREST | | 191 | (6 << MS_Y4_SHIFT) | |
227 | R300_COLOR_ROUND_NEAREST); | 192 | (6 << MS_X5_SHIFT) | |
193 | (6 << MS_Y5_SHIFT) | | ||
194 | (6 << MSBD1_SHIFT))); | ||
195 | radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0)); | ||
196 | radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); | ||
197 | radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0)); | ||
198 | radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); | ||
199 | radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0)); | ||
200 | radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); | ||
228 | radeon_ring_write(rdev, PACKET0(0x20C8, 0)); | 201 | radeon_ring_write(rdev, PACKET0(0x20C8, 0)); |
229 | radeon_ring_write(rdev, 0); | 202 | radeon_ring_write(rdev, 0); |
230 | radeon_ring_unlock_commit(rdev); | 203 | radeon_ring_unlock_commit(rdev); |
@@ -242,8 +215,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) | |||
242 | 215 | ||
243 | for (i = 0; i < rdev->usec_timeout; i++) { | 216 | for (i = 0; i < rdev->usec_timeout; i++) { |
244 | /* read MC_STATUS */ | 217 | /* read MC_STATUS */ |
245 | tmp = RREG32_MC(RV515_MC_STATUS); | 218 | tmp = RREG32_MC(MC_STATUS); |
246 | if (tmp & RV515_MC_STATUS_IDLE) { | 219 | if (tmp & MC_STATUS_IDLE) { |
247 | return 0; | 220 | return 0; |
248 | } | 221 | } |
249 | DRM_UDELAY(1); | 222 | DRM_UDELAY(1); |
@@ -291,33 +264,33 @@ int rv515_ga_reset(struct radeon_device *rdev) | |||
291 | reinit_cp = rdev->cp.ready; | 264 | reinit_cp = rdev->cp.ready; |
292 | rdev->cp.ready = false; | 265 | rdev->cp.ready = false; |
293 | for (i = 0; i < rdev->usec_timeout; i++) { | 266 | for (i = 0; i < rdev->usec_timeout; i++) { |
294 | WREG32(RADEON_CP_CSQ_MODE, 0); | 267 | WREG32(CP_CSQ_MODE, 0); |
295 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 268 | WREG32(CP_CSQ_CNTL, 0); |
296 | WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); | 269 | WREG32(RBBM_SOFT_RESET, 0x32005); |
297 | (void)RREG32(RADEON_RBBM_SOFT_RESET); | 270 | (void)RREG32(RBBM_SOFT_RESET); |
298 | udelay(200); | 271 | udelay(200); |
299 | WREG32(RADEON_RBBM_SOFT_RESET, 0); | 272 | WREG32(RBBM_SOFT_RESET, 0); |
300 | /* Wait to prevent race in RBBM_STATUS */ | 273 | /* Wait to prevent race in RBBM_STATUS */ |
301 | mdelay(1); | 274 | mdelay(1); |
302 | tmp = RREG32(RADEON_RBBM_STATUS); | 275 | tmp = RREG32(RBBM_STATUS); |
303 | if (tmp & ((1 << 20) | (1 << 26))) { | 276 | if (tmp & ((1 << 20) | (1 << 26))) { |
304 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); | 277 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); |
305 | /* GA still busy soft reset it */ | 278 | /* GA still busy soft reset it */ |
306 | WREG32(0x429C, 0x200); | 279 | WREG32(0x429C, 0x200); |
307 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); | 280 | WREG32(VAP_PVS_STATE_FLUSH_REG, 0); |
308 | WREG32(0x43E0, 0); | 281 | WREG32(0x43E0, 0); |
309 | WREG32(0x43E4, 0); | 282 | WREG32(0x43E4, 0); |
310 | WREG32(0x24AC, 0); | 283 | WREG32(0x24AC, 0); |
311 | } | 284 | } |
312 | /* Wait to prevent race in RBBM_STATUS */ | 285 | /* Wait to prevent race in RBBM_STATUS */ |
313 | mdelay(1); | 286 | mdelay(1); |
314 | tmp = RREG32(RADEON_RBBM_STATUS); | 287 | tmp = RREG32(RBBM_STATUS); |
315 | if (!(tmp & ((1 << 20) | (1 << 26)))) { | 288 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
316 | break; | 289 | break; |
317 | } | 290 | } |
318 | } | 291 | } |
319 | for (i = 0; i < rdev->usec_timeout; i++) { | 292 | for (i = 0; i < rdev->usec_timeout; i++) { |
320 | tmp = RREG32(RADEON_RBBM_STATUS); | 293 | tmp = RREG32(RBBM_STATUS); |
321 | if (!(tmp & ((1 << 20) | (1 << 26)))) { | 294 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
322 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", | 295 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", |
323 | tmp); | 296 | tmp); |
@@ -331,7 +304,7 @@ int rv515_ga_reset(struct radeon_device *rdev) | |||
331 | } | 304 | } |
332 | DRM_UDELAY(1); | 305 | DRM_UDELAY(1); |
333 | } | 306 | } |
334 | tmp = RREG32(RADEON_RBBM_STATUS); | 307 | tmp = RREG32(RBBM_STATUS); |
335 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); | 308 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); |
336 | return -1; | 309 | return -1; |
337 | } | 310 | } |
@@ -341,7 +314,7 @@ int rv515_gpu_reset(struct radeon_device *rdev) | |||
341 | uint32_t status; | 314 | uint32_t status; |
342 | 315 | ||
343 | /* reset order likely matter */ | 316 | /* reset order likely matter */ |
344 | status = RREG32(RADEON_RBBM_STATUS); | 317 | status = RREG32(RBBM_STATUS); |
345 | /* reset HDP */ | 318 | /* reset HDP */ |
346 | r100_hdp_reset(rdev); | 319 | r100_hdp_reset(rdev); |
347 | /* reset rb2d */ | 320 | /* reset rb2d */ |
@@ -353,12 +326,12 @@ int rv515_gpu_reset(struct radeon_device *rdev) | |||
353 | rv515_ga_reset(rdev); | 326 | rv515_ga_reset(rdev); |
354 | } | 327 | } |
355 | /* reset CP */ | 328 | /* reset CP */ |
356 | status = RREG32(RADEON_RBBM_STATUS); | 329 | status = RREG32(RBBM_STATUS); |
357 | if (status & (1 << 16)) { | 330 | if (status & (1 << 16)) { |
358 | r100_cp_reset(rdev); | 331 | r100_cp_reset(rdev); |
359 | } | 332 | } |
360 | /* Check if GPU is idle */ | 333 | /* Check if GPU is idle */ |
361 | status = RREG32(RADEON_RBBM_STATUS); | 334 | status = RREG32(RBBM_STATUS); |
362 | if (status & (1 << 31)) { | 335 | if (status & (1 << 31)) { |
363 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); | 336 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
364 | return -1; | 337 | return -1; |
@@ -377,8 +350,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
377 | 350 | ||
378 | rdev->mc.vram_width = 128; | 351 | rdev->mc.vram_width = 128; |
379 | rdev->mc.vram_is_ddr = true; | 352 | rdev->mc.vram_is_ddr = true; |
380 | tmp = RREG32_MC(RV515_MC_CNTL); | 353 | tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK; |
381 | tmp &= RV515_MEM_NUM_CHANNELS_MASK; | ||
382 | switch (tmp) { | 354 | switch (tmp) { |
383 | case 0: | 355 | case 0: |
384 | rdev->mc.vram_width = 64; | 356 | rdev->mc.vram_width = 64; |
@@ -394,11 +366,16 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
394 | 366 | ||
395 | void rv515_vram_info(struct radeon_device *rdev) | 367 | void rv515_vram_info(struct radeon_device *rdev) |
396 | { | 368 | { |
369 | fixed20_12 a; | ||
370 | |||
397 | rv515_vram_get_type(rdev); | 371 | rv515_vram_get_type(rdev); |
398 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
399 | 372 | ||
400 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 373 | /* FIXME: we should enforce default clock in case GPU is not in |
401 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 374 | * default setup |
375 | */ | ||
376 | a.full = rfixed_const(100); | ||
377 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
378 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
402 | } | 379 | } |
403 | 380 | ||
404 | 381 | ||
@@ -409,35 +386,35 @@ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) | |||
409 | { | 386 | { |
410 | uint32_t r; | 387 | uint32_t r; |
411 | 388 | ||
412 | WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); | 389 | WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); |
413 | r = RREG32(R520_MC_IND_DATA); | 390 | r = RREG32(MC_IND_DATA); |
414 | WREG32(R520_MC_IND_INDEX, 0); | 391 | WREG32(MC_IND_INDEX, 0); |
415 | return r; | 392 | return r; |
416 | } | 393 | } |
417 | 394 | ||
418 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 395 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
419 | { | 396 | { |
420 | WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); | 397 | WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); |
421 | WREG32(R520_MC_IND_DATA, (v)); | 398 | WREG32(MC_IND_DATA, (v)); |
422 | WREG32(R520_MC_IND_INDEX, 0); | 399 | WREG32(MC_IND_INDEX, 0); |
423 | } | 400 | } |
424 | 401 | ||
425 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | 402 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
426 | { | 403 | { |
427 | uint32_t r; | 404 | uint32_t r; |
428 | 405 | ||
429 | WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); | 406 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); |
430 | (void)RREG32(RADEON_PCIE_INDEX); | 407 | (void)RREG32(PCIE_INDEX); |
431 | r = RREG32(RADEON_PCIE_DATA); | 408 | r = RREG32(PCIE_DATA); |
432 | return r; | 409 | return r; |
433 | } | 410 | } |
434 | 411 | ||
435 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 412 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
436 | { | 413 | { |
437 | WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); | 414 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); |
438 | (void)RREG32(RADEON_PCIE_INDEX); | 415 | (void)RREG32(PCIE_INDEX); |
439 | WREG32(RADEON_PCIE_DATA, (v)); | 416 | WREG32(PCIE_DATA, (v)); |
440 | (void)RREG32(RADEON_PCIE_DATA); | 417 | (void)RREG32(PCIE_DATA); |
441 | } | 418 | } |
442 | 419 | ||
443 | 420 | ||
@@ -452,13 +429,13 @@ static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) | |||
452 | struct radeon_device *rdev = dev->dev_private; | 429 | struct radeon_device *rdev = dev->dev_private; |
453 | uint32_t tmp; | 430 | uint32_t tmp; |
454 | 431 | ||
455 | tmp = RREG32(R400_GB_PIPE_SELECT); | 432 | tmp = RREG32(GB_PIPE_SELECT); |
456 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); | 433 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
457 | tmp = RREG32(R500_SU_REG_DEST); | 434 | tmp = RREG32(SU_REG_DEST); |
458 | seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); | 435 | seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); |
459 | tmp = RREG32(R300_GB_TILE_CONFIG); | 436 | tmp = RREG32(GB_TILE_CONFIG); |
460 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); | 437 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
461 | tmp = RREG32(R300_DST_PIPE_CONFIG); | 438 | tmp = RREG32(DST_PIPE_CONFIG); |
462 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); | 439 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
463 | return 0; | 440 | return 0; |
464 | } | 441 | } |
@@ -509,9 +486,9 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) | |||
509 | /* | 486 | /* |
510 | * Asic initialization | 487 | * Asic initialization |
511 | */ | 488 | */ |
512 | static const unsigned r500_reg_safe_bm[159] = { | 489 | static const unsigned r500_reg_safe_bm[219] = { |
490 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
513 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 491 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
514 | 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, | ||
515 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 492 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
516 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 493 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
517 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 494 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
@@ -549,14 +526,575 @@ static const unsigned r500_reg_safe_bm[159] = { | |||
549 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 526 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
550 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, | 527 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, |
551 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 528 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
552 | 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, | 529 | 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF, |
530 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
531 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
532 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
533 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
534 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
535 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
536 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
537 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
538 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
539 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
540 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
541 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
542 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
543 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
544 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
553 | }; | 545 | }; |
554 | 546 | ||
555 | |||
556 | |||
557 | int rv515_init(struct radeon_device *rdev) | 547 | int rv515_init(struct radeon_device *rdev) |
558 | { | 548 | { |
559 | rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; | 549 | rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; |
560 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); | 550 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); |
561 | return 0; | 551 | return 0; |
562 | } | 552 | } |
553 | |||
554 | void atom_rv515_force_tv_scaler(struct radeon_device *rdev) | ||
555 | { | ||
556 | |||
557 | WREG32(0x659C, 0x0); | ||
558 | WREG32(0x6594, 0x705); | ||
559 | WREG32(0x65A4, 0x10001); | ||
560 | WREG32(0x65D8, 0x0); | ||
561 | WREG32(0x65B0, 0x0); | ||
562 | WREG32(0x65C0, 0x0); | ||
563 | WREG32(0x65D4, 0x0); | ||
564 | WREG32(0x6578, 0x0); | ||
565 | WREG32(0x657C, 0x841880A8); | ||
566 | WREG32(0x6578, 0x1); | ||
567 | WREG32(0x657C, 0x84208680); | ||
568 | WREG32(0x6578, 0x2); | ||
569 | WREG32(0x657C, 0xBFF880B0); | ||
570 | WREG32(0x6578, 0x100); | ||
571 | WREG32(0x657C, 0x83D88088); | ||
572 | WREG32(0x6578, 0x101); | ||
573 | WREG32(0x657C, 0x84608680); | ||
574 | WREG32(0x6578, 0x102); | ||
575 | WREG32(0x657C, 0xBFF080D0); | ||
576 | WREG32(0x6578, 0x200); | ||
577 | WREG32(0x657C, 0x83988068); | ||
578 | WREG32(0x6578, 0x201); | ||
579 | WREG32(0x657C, 0x84A08680); | ||
580 | WREG32(0x6578, 0x202); | ||
581 | WREG32(0x657C, 0xBFF080F8); | ||
582 | WREG32(0x6578, 0x300); | ||
583 | WREG32(0x657C, 0x83588058); | ||
584 | WREG32(0x6578, 0x301); | ||
585 | WREG32(0x657C, 0x84E08660); | ||
586 | WREG32(0x6578, 0x302); | ||
587 | WREG32(0x657C, 0xBFF88120); | ||
588 | WREG32(0x6578, 0x400); | ||
589 | WREG32(0x657C, 0x83188040); | ||
590 | WREG32(0x6578, 0x401); | ||
591 | WREG32(0x657C, 0x85008660); | ||
592 | WREG32(0x6578, 0x402); | ||
593 | WREG32(0x657C, 0xBFF88150); | ||
594 | WREG32(0x6578, 0x500); | ||
595 | WREG32(0x657C, 0x82D88030); | ||
596 | WREG32(0x6578, 0x501); | ||
597 | WREG32(0x657C, 0x85408640); | ||
598 | WREG32(0x6578, 0x502); | ||
599 | WREG32(0x657C, 0xBFF88180); | ||
600 | WREG32(0x6578, 0x600); | ||
601 | WREG32(0x657C, 0x82A08018); | ||
602 | WREG32(0x6578, 0x601); | ||
603 | WREG32(0x657C, 0x85808620); | ||
604 | WREG32(0x6578, 0x602); | ||
605 | WREG32(0x657C, 0xBFF081B8); | ||
606 | WREG32(0x6578, 0x700); | ||
607 | WREG32(0x657C, 0x82608010); | ||
608 | WREG32(0x6578, 0x701); | ||
609 | WREG32(0x657C, 0x85A08600); | ||
610 | WREG32(0x6578, 0x702); | ||
611 | WREG32(0x657C, 0x800081F0); | ||
612 | WREG32(0x6578, 0x800); | ||
613 | WREG32(0x657C, 0x8228BFF8); | ||
614 | WREG32(0x6578, 0x801); | ||
615 | WREG32(0x657C, 0x85E085E0); | ||
616 | WREG32(0x6578, 0x802); | ||
617 | WREG32(0x657C, 0xBFF88228); | ||
618 | WREG32(0x6578, 0x10000); | ||
619 | WREG32(0x657C, 0x82A8BF00); | ||
620 | WREG32(0x6578, 0x10001); | ||
621 | WREG32(0x657C, 0x82A08CC0); | ||
622 | WREG32(0x6578, 0x10002); | ||
623 | WREG32(0x657C, 0x8008BEF8); | ||
624 | WREG32(0x6578, 0x10100); | ||
625 | WREG32(0x657C, 0x81F0BF28); | ||
626 | WREG32(0x6578, 0x10101); | ||
627 | WREG32(0x657C, 0x83608CA0); | ||
628 | WREG32(0x6578, 0x10102); | ||
629 | WREG32(0x657C, 0x8018BED0); | ||
630 | WREG32(0x6578, 0x10200); | ||
631 | WREG32(0x657C, 0x8148BF38); | ||
632 | WREG32(0x6578, 0x10201); | ||
633 | WREG32(0x657C, 0x84408C80); | ||
634 | WREG32(0x6578, 0x10202); | ||
635 | WREG32(0x657C, 0x8008BEB8); | ||
636 | WREG32(0x6578, 0x10300); | ||
637 | WREG32(0x657C, 0x80B0BF78); | ||
638 | WREG32(0x6578, 0x10301); | ||
639 | WREG32(0x657C, 0x85008C20); | ||
640 | WREG32(0x6578, 0x10302); | ||
641 | WREG32(0x657C, 0x8020BEA0); | ||
642 | WREG32(0x6578, 0x10400); | ||
643 | WREG32(0x657C, 0x8028BF90); | ||
644 | WREG32(0x6578, 0x10401); | ||
645 | WREG32(0x657C, 0x85E08BC0); | ||
646 | WREG32(0x6578, 0x10402); | ||
647 | WREG32(0x657C, 0x8018BE90); | ||
648 | WREG32(0x6578, 0x10500); | ||
649 | WREG32(0x657C, 0xBFB8BFB0); | ||
650 | WREG32(0x6578, 0x10501); | ||
651 | WREG32(0x657C, 0x86C08B40); | ||
652 | WREG32(0x6578, 0x10502); | ||
653 | WREG32(0x657C, 0x8010BE90); | ||
654 | WREG32(0x6578, 0x10600); | ||
655 | WREG32(0x657C, 0xBF58BFC8); | ||
656 | WREG32(0x6578, 0x10601); | ||
657 | WREG32(0x657C, 0x87A08AA0); | ||
658 | WREG32(0x6578, 0x10602); | ||
659 | WREG32(0x657C, 0x8010BE98); | ||
660 | WREG32(0x6578, 0x10700); | ||
661 | WREG32(0x657C, 0xBF10BFF0); | ||
662 | WREG32(0x6578, 0x10701); | ||
663 | WREG32(0x657C, 0x886089E0); | ||
664 | WREG32(0x6578, 0x10702); | ||
665 | WREG32(0x657C, 0x8018BEB0); | ||
666 | WREG32(0x6578, 0x10800); | ||
667 | WREG32(0x657C, 0xBED8BFE8); | ||
668 | WREG32(0x6578, 0x10801); | ||
669 | WREG32(0x657C, 0x89408940); | ||
670 | WREG32(0x6578, 0x10802); | ||
671 | WREG32(0x657C, 0xBFE8BED8); | ||
672 | WREG32(0x6578, 0x20000); | ||
673 | WREG32(0x657C, 0x80008000); | ||
674 | WREG32(0x6578, 0x20001); | ||
675 | WREG32(0x657C, 0x90008000); | ||
676 | WREG32(0x6578, 0x20002); | ||
677 | WREG32(0x657C, 0x80008000); | ||
678 | WREG32(0x6578, 0x20003); | ||
679 | WREG32(0x657C, 0x80008000); | ||
680 | WREG32(0x6578, 0x20100); | ||
681 | WREG32(0x657C, 0x80108000); | ||
682 | WREG32(0x6578, 0x20101); | ||
683 | WREG32(0x657C, 0x8FE0BF70); | ||
684 | WREG32(0x6578, 0x20102); | ||
685 | WREG32(0x657C, 0xBFE880C0); | ||
686 | WREG32(0x6578, 0x20103); | ||
687 | WREG32(0x657C, 0x80008000); | ||
688 | WREG32(0x6578, 0x20200); | ||
689 | WREG32(0x657C, 0x8018BFF8); | ||
690 | WREG32(0x6578, 0x20201); | ||
691 | WREG32(0x657C, 0x8F80BF08); | ||
692 | WREG32(0x6578, 0x20202); | ||
693 | WREG32(0x657C, 0xBFD081A0); | ||
694 | WREG32(0x6578, 0x20203); | ||
695 | WREG32(0x657C, 0xBFF88000); | ||
696 | WREG32(0x6578, 0x20300); | ||
697 | WREG32(0x657C, 0x80188000); | ||
698 | WREG32(0x6578, 0x20301); | ||
699 | WREG32(0x657C, 0x8EE0BEC0); | ||
700 | WREG32(0x6578, 0x20302); | ||
701 | WREG32(0x657C, 0xBFB082A0); | ||
702 | WREG32(0x6578, 0x20303); | ||
703 | WREG32(0x657C, 0x80008000); | ||
704 | WREG32(0x6578, 0x20400); | ||
705 | WREG32(0x657C, 0x80188000); | ||
706 | WREG32(0x6578, 0x20401); | ||
707 | WREG32(0x657C, 0x8E00BEA0); | ||
708 | WREG32(0x6578, 0x20402); | ||
709 | WREG32(0x657C, 0xBF8883C0); | ||
710 | WREG32(0x6578, 0x20403); | ||
711 | WREG32(0x657C, 0x80008000); | ||
712 | WREG32(0x6578, 0x20500); | ||
713 | WREG32(0x657C, 0x80188000); | ||
714 | WREG32(0x6578, 0x20501); | ||
715 | WREG32(0x657C, 0x8D00BE90); | ||
716 | WREG32(0x6578, 0x20502); | ||
717 | WREG32(0x657C, 0xBF588500); | ||
718 | WREG32(0x6578, 0x20503); | ||
719 | WREG32(0x657C, 0x80008008); | ||
720 | WREG32(0x6578, 0x20600); | ||
721 | WREG32(0x657C, 0x80188000); | ||
722 | WREG32(0x6578, 0x20601); | ||
723 | WREG32(0x657C, 0x8BC0BE98); | ||
724 | WREG32(0x6578, 0x20602); | ||
725 | WREG32(0x657C, 0xBF308660); | ||
726 | WREG32(0x6578, 0x20603); | ||
727 | WREG32(0x657C, 0x80008008); | ||
728 | WREG32(0x6578, 0x20700); | ||
729 | WREG32(0x657C, 0x80108000); | ||
730 | WREG32(0x6578, 0x20701); | ||
731 | WREG32(0x657C, 0x8A80BEB0); | ||
732 | WREG32(0x6578, 0x20702); | ||
733 | WREG32(0x657C, 0xBF0087C0); | ||
734 | WREG32(0x6578, 0x20703); | ||
735 | WREG32(0x657C, 0x80008008); | ||
736 | WREG32(0x6578, 0x20800); | ||
737 | WREG32(0x657C, 0x80108000); | ||
738 | WREG32(0x6578, 0x20801); | ||
739 | WREG32(0x657C, 0x8920BED0); | ||
740 | WREG32(0x6578, 0x20802); | ||
741 | WREG32(0x657C, 0xBED08920); | ||
742 | WREG32(0x6578, 0x20803); | ||
743 | WREG32(0x657C, 0x80008010); | ||
744 | WREG32(0x6578, 0x30000); | ||
745 | WREG32(0x657C, 0x90008000); | ||
746 | WREG32(0x6578, 0x30001); | ||
747 | WREG32(0x657C, 0x80008000); | ||
748 | WREG32(0x6578, 0x30100); | ||
749 | WREG32(0x657C, 0x8FE0BF90); | ||
750 | WREG32(0x6578, 0x30101); | ||
751 | WREG32(0x657C, 0xBFF880A0); | ||
752 | WREG32(0x6578, 0x30200); | ||
753 | WREG32(0x657C, 0x8F60BF40); | ||
754 | WREG32(0x6578, 0x30201); | ||
755 | WREG32(0x657C, 0xBFE88180); | ||
756 | WREG32(0x6578, 0x30300); | ||
757 | WREG32(0x657C, 0x8EC0BF00); | ||
758 | WREG32(0x6578, 0x30301); | ||
759 | WREG32(0x657C, 0xBFC88280); | ||
760 | WREG32(0x6578, 0x30400); | ||
761 | WREG32(0x657C, 0x8DE0BEE0); | ||
762 | WREG32(0x6578, 0x30401); | ||
763 | WREG32(0x657C, 0xBFA083A0); | ||
764 | WREG32(0x6578, 0x30500); | ||
765 | WREG32(0x657C, 0x8CE0BED0); | ||
766 | WREG32(0x6578, 0x30501); | ||
767 | WREG32(0x657C, 0xBF7884E0); | ||
768 | WREG32(0x6578, 0x30600); | ||
769 | WREG32(0x657C, 0x8BA0BED8); | ||
770 | WREG32(0x6578, 0x30601); | ||
771 | WREG32(0x657C, 0xBF508640); | ||
772 | WREG32(0x6578, 0x30700); | ||
773 | WREG32(0x657C, 0x8A60BEE8); | ||
774 | WREG32(0x6578, 0x30701); | ||
775 | WREG32(0x657C, 0xBF2087A0); | ||
776 | WREG32(0x6578, 0x30800); | ||
777 | WREG32(0x657C, 0x8900BF00); | ||
778 | WREG32(0x6578, 0x30801); | ||
779 | WREG32(0x657C, 0xBF008900); | ||
780 | } | ||
781 | |||
782 | struct rv515_watermark { | ||
783 | u32 lb_request_fifo_depth; | ||
784 | fixed20_12 num_line_pair; | ||
785 | fixed20_12 estimated_width; | ||
786 | fixed20_12 worst_case_latency; | ||
787 | fixed20_12 consumption_rate; | ||
788 | fixed20_12 active_time; | ||
789 | fixed20_12 dbpp; | ||
790 | fixed20_12 priority_mark_max; | ||
791 | fixed20_12 priority_mark; | ||
792 | fixed20_12 sclk; | ||
793 | }; | ||
794 | |||
795 | void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | ||
796 | struct radeon_crtc *crtc, | ||
797 | struct rv515_watermark *wm) | ||
798 | { | ||
799 | struct drm_display_mode *mode = &crtc->base.mode; | ||
800 | fixed20_12 a, b, c; | ||
801 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | ||
802 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | ||
803 | |||
804 | if (!crtc->base.enabled) { | ||
805 | /* FIXME: wouldn't it better to set priority mark to maximum */ | ||
806 | wm->lb_request_fifo_depth = 4; | ||
807 | return; | ||
808 | } | ||
809 | |||
810 | if (crtc->vsc.full > rfixed_const(2)) | ||
811 | wm->num_line_pair.full = rfixed_const(2); | ||
812 | else | ||
813 | wm->num_line_pair.full = rfixed_const(1); | ||
814 | |||
815 | b.full = rfixed_const(mode->crtc_hdisplay); | ||
816 | c.full = rfixed_const(256); | ||
817 | a.full = rfixed_mul(wm->num_line_pair, b); | ||
818 | request_fifo_depth.full = rfixed_div(a, c); | ||
819 | if (a.full < rfixed_const(4)) { | ||
820 | wm->lb_request_fifo_depth = 4; | ||
821 | } else { | ||
822 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | ||
823 | } | ||
824 | |||
825 | /* Determine consumption rate | ||
826 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | ||
827 | * vtaps = number of vertical taps, | ||
828 | * vsc = vertical scaling ratio, defined as source/destination | ||
829 | * hsc = horizontal scaling ration, defined as source/destination | ||
830 | */ | ||
831 | a.full = rfixed_const(mode->clock); | ||
832 | b.full = rfixed_const(1000); | ||
833 | a.full = rfixed_div(a, b); | ||
834 | pclk.full = rfixed_div(b, a); | ||
835 | if (crtc->rmx_type != RMX_OFF) { | ||
836 | b.full = rfixed_const(2); | ||
837 | if (crtc->vsc.full > b.full) | ||
838 | b.full = crtc->vsc.full; | ||
839 | b.full = rfixed_mul(b, crtc->hsc); | ||
840 | c.full = rfixed_const(2); | ||
841 | b.full = rfixed_div(b, c); | ||
842 | consumption_time.full = rfixed_div(pclk, b); | ||
843 | } else { | ||
844 | consumption_time.full = pclk.full; | ||
845 | } | ||
846 | a.full = rfixed_const(1); | ||
847 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | ||
848 | |||
849 | |||
850 | /* Determine line time | ||
851 | * LineTime = total time for one line of displayhtotal | ||
852 | * LineTime = total number of horizontal pixels | ||
853 | * pclk = pixel clock period(ns) | ||
854 | */ | ||
855 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
856 | line_time.full = rfixed_mul(a, pclk); | ||
857 | |||
858 | /* Determine active time | ||
859 | * ActiveTime = time of active region of display within one line, | ||
860 | * hactive = total number of horizontal active pixels | ||
861 | * htotal = total number of horizontal pixels | ||
862 | */ | ||
863 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
864 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
865 | wm->active_time.full = rfixed_mul(line_time, b); | ||
866 | wm->active_time.full = rfixed_div(wm->active_time, a); | ||
867 | |||
868 | /* Determine chunk time | ||
869 | * ChunkTime = the time it takes the DCP to send one chunk of data | ||
870 | * to the LB which consists of pipeline delay and inter chunk gap | ||
871 | * sclk = system clock(Mhz) | ||
872 | */ | ||
873 | a.full = rfixed_const(600 * 1000); | ||
874 | chunk_time.full = rfixed_div(a, rdev->pm.sclk); | ||
875 | read_delay_latency.full = rfixed_const(1000); | ||
876 | |||
877 | /* Determine the worst case latency | ||
878 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | ||
879 | * WorstCaseLatency = worst case time from urgent to when the MC starts | ||
880 | * to return data | ||
881 | * READ_DELAY_IDLE_MAX = constant of 1us | ||
882 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | ||
883 | * which consists of pipeline delay and inter chunk gap | ||
884 | */ | ||
885 | if (rfixed_trunc(wm->num_line_pair) > 1) { | ||
886 | a.full = rfixed_const(3); | ||
887 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
888 | wm->worst_case_latency.full += read_delay_latency.full; | ||
889 | } else { | ||
890 | wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; | ||
891 | } | ||
892 | |||
893 | /* Determine the tolerable latency | ||
894 | * TolerableLatency = Any given request has only 1 line time | ||
895 | * for the data to be returned | ||
896 | * LBRequestFifoDepth = Number of chunk requests the LB can | ||
897 | * put into the request FIFO for a display | ||
898 | * LineTime = total time for one line of display | ||
899 | * ChunkTime = the time it takes the DCP to send one chunk | ||
900 | * of data to the LB which consists of | ||
901 | * pipeline delay and inter chunk gap | ||
902 | */ | ||
903 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | ||
904 | tolerable_latency.full = line_time.full; | ||
905 | } else { | ||
906 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | ||
907 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | ||
908 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | ||
909 | tolerable_latency.full = line_time.full - tolerable_latency.full; | ||
910 | } | ||
911 | /* We assume worst case 32bits (4 bytes) */ | ||
912 | wm->dbpp.full = rfixed_const(2 * 16); | ||
913 | |||
914 | /* Determine the maximum priority mark | ||
915 | * width = viewport width in pixels | ||
916 | */ | ||
917 | a.full = rfixed_const(16); | ||
918 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
919 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | ||
920 | |||
921 | /* Determine estimated width */ | ||
922 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | ||
923 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | ||
924 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | ||
925 | wm->priority_mark.full = rfixed_const(10); | ||
926 | } else { | ||
927 | a.full = rfixed_const(16); | ||
928 | wm->priority_mark.full = rfixed_div(estimated_width, a); | ||
929 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | ||
930 | } | ||
931 | } | ||
932 | |||
933 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | ||
934 | { | ||
935 | struct drm_display_mode *mode0 = NULL; | ||
936 | struct drm_display_mode *mode1 = NULL; | ||
937 | struct rv515_watermark wm0; | ||
938 | struct rv515_watermark wm1; | ||
939 | u32 tmp; | ||
940 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | ||
941 | fixed20_12 a, b; | ||
942 | |||
943 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
944 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
945 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
946 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
947 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
948 | |||
949 | rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | ||
950 | rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | ||
951 | |||
952 | tmp = wm0.lb_request_fifo_depth; | ||
953 | tmp |= wm1.lb_request_fifo_depth << 16; | ||
954 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | ||
955 | |||
956 | if (mode0 && mode1) { | ||
957 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
958 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | ||
959 | else | ||
960 | a.full = wm0.num_line_pair.full; | ||
961 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
962 | b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | ||
963 | else | ||
964 | b.full = wm1.num_line_pair.full; | ||
965 | a.full += b.full; | ||
966 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
967 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
968 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
969 | b.full = rfixed_mul(b, wm0.active_time); | ||
970 | a.full = rfixed_const(16); | ||
971 | b.full = rfixed_div(b, a); | ||
972 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
973 | wm0.consumption_rate); | ||
974 | priority_mark02.full = a.full + b.full; | ||
975 | } else { | ||
976 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
977 | wm0.consumption_rate); | ||
978 | b.full = rfixed_const(16 * 1000); | ||
979 | priority_mark02.full = rfixed_div(a, b); | ||
980 | } | ||
981 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
982 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
983 | b.full = rfixed_mul(b, wm1.active_time); | ||
984 | a.full = rfixed_const(16); | ||
985 | b.full = rfixed_div(b, a); | ||
986 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
987 | wm1.consumption_rate); | ||
988 | priority_mark12.full = a.full + b.full; | ||
989 | } else { | ||
990 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
991 | wm1.consumption_rate); | ||
992 | b.full = rfixed_const(16 * 1000); | ||
993 | priority_mark12.full = rfixed_div(a, b); | ||
994 | } | ||
995 | if (wm0.priority_mark.full > priority_mark02.full) | ||
996 | priority_mark02.full = wm0.priority_mark.full; | ||
997 | if (rfixed_trunc(priority_mark02) < 0) | ||
998 | priority_mark02.full = 0; | ||
999 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
1000 | priority_mark02.full = wm0.priority_mark_max.full; | ||
1001 | if (wm1.priority_mark.full > priority_mark12.full) | ||
1002 | priority_mark12.full = wm1.priority_mark.full; | ||
1003 | if (rfixed_trunc(priority_mark12) < 0) | ||
1004 | priority_mark12.full = 0; | ||
1005 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
1006 | priority_mark12.full = wm1.priority_mark_max.full; | ||
1007 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
1008 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
1009 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
1010 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
1011 | } else if (mode0) { | ||
1012 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
1013 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | ||
1014 | else | ||
1015 | a.full = wm0.num_line_pair.full; | ||
1016 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
1017 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
1018 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
1019 | b.full = rfixed_mul(b, wm0.active_time); | ||
1020 | a.full = rfixed_const(16); | ||
1021 | b.full = rfixed_div(b, a); | ||
1022 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
1023 | wm0.consumption_rate); | ||
1024 | priority_mark02.full = a.full + b.full; | ||
1025 | } else { | ||
1026 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
1027 | wm0.consumption_rate); | ||
1028 | b.full = rfixed_const(16); | ||
1029 | priority_mark02.full = rfixed_div(a, b); | ||
1030 | } | ||
1031 | if (wm0.priority_mark.full > priority_mark02.full) | ||
1032 | priority_mark02.full = wm0.priority_mark.full; | ||
1033 | if (rfixed_trunc(priority_mark02) < 0) | ||
1034 | priority_mark02.full = 0; | ||
1035 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
1036 | priority_mark02.full = wm0.priority_mark_max.full; | ||
1037 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
1038 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
1039 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
1040 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
1041 | } else { | ||
1042 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
1043 | a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | ||
1044 | else | ||
1045 | a.full = wm1.num_line_pair.full; | ||
1046 | fill_rate.full = rfixed_div(wm1.sclk, a); | ||
1047 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
1048 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
1049 | b.full = rfixed_mul(b, wm1.active_time); | ||
1050 | a.full = rfixed_const(16); | ||
1051 | b.full = rfixed_div(b, a); | ||
1052 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
1053 | wm1.consumption_rate); | ||
1054 | priority_mark12.full = a.full + b.full; | ||
1055 | } else { | ||
1056 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
1057 | wm1.consumption_rate); | ||
1058 | b.full = rfixed_const(16 * 1000); | ||
1059 | priority_mark12.full = rfixed_div(a, b); | ||
1060 | } | ||
1061 | if (wm1.priority_mark.full > priority_mark12.full) | ||
1062 | priority_mark12.full = wm1.priority_mark.full; | ||
1063 | if (rfixed_trunc(priority_mark12) < 0) | ||
1064 | priority_mark12.full = 0; | ||
1065 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
1066 | priority_mark12.full = wm1.priority_mark_max.full; | ||
1067 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
1068 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
1069 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
1070 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
1071 | } | ||
1072 | } | ||
1073 | |||
1074 | void rv515_bandwidth_update(struct radeon_device *rdev) | ||
1075 | { | ||
1076 | uint32_t tmp; | ||
1077 | struct drm_display_mode *mode0 = NULL; | ||
1078 | struct drm_display_mode *mode1 = NULL; | ||
1079 | |||
1080 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
1081 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
1082 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
1083 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
1084 | /* | ||
1085 | * Set display0/1 priority up in the memory controller for | ||
1086 | * modes if the user specifies HIGH for displaypriority | ||
1087 | * option. | ||
1088 | */ | ||
1089 | if (rdev->disp_priority == 2) { | ||
1090 | tmp = RREG32_MC(MC_MISC_LAT_TIMER); | ||
1091 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | ||
1092 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | ||
1093 | if (mode1) | ||
1094 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
1095 | if (mode0) | ||
1096 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | ||
1097 | WREG32_MC(MC_MISC_LAT_TIMER, tmp); | ||
1098 | } | ||
1099 | rv515_bandwidth_avivo_update(rdev); | ||
1100 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h new file mode 100644 index 000000000000..f3cf84039906 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv515r.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef RV515R_H | ||
29 | #define RV515R_H | ||
30 | |||
31 | /* RV515 registers */ | ||
32 | #define PCIE_INDEX 0x0030 | ||
33 | #define PCIE_DATA 0x0034 | ||
34 | #define MC_IND_INDEX 0x0070 | ||
35 | #define MC_IND_WR_EN (1 << 24) | ||
36 | #define MC_IND_DATA 0x0074 | ||
37 | #define RBBM_SOFT_RESET 0x00F0 | ||
38 | #define CONFIG_MEMSIZE 0x00F8 | ||
39 | #define HDP_FB_LOCATION 0x0134 | ||
40 | #define CP_CSQ_CNTL 0x0740 | ||
41 | #define CP_CSQ_MODE 0x0744 | ||
42 | #define CP_CSQ_ADDR 0x07F0 | ||
43 | #define CP_CSQ_DATA 0x07F4 | ||
44 | #define CP_CSQ_STAT 0x07F8 | ||
45 | #define CP_CSQ2_STAT 0x07FC | ||
46 | #define RBBM_STATUS 0x0E40 | ||
47 | #define DST_PIPE_CONFIG 0x170C | ||
48 | #define WAIT_UNTIL 0x1720 | ||
49 | #define WAIT_2D_IDLE (1 << 14) | ||
50 | #define WAIT_3D_IDLE (1 << 15) | ||
51 | #define WAIT_2D_IDLECLEAN (1 << 16) | ||
52 | #define WAIT_3D_IDLECLEAN (1 << 17) | ||
53 | #define ISYNC_CNTL 0x1724 | ||
54 | #define ISYNC_ANY2D_IDLE3D (1 << 0) | ||
55 | #define ISYNC_ANY3D_IDLE2D (1 << 1) | ||
56 | #define ISYNC_TRIG2D_IDLE3D (1 << 2) | ||
57 | #define ISYNC_TRIG3D_IDLE2D (1 << 3) | ||
58 | #define ISYNC_WAIT_IDLEGUI (1 << 4) | ||
59 | #define ISYNC_CPSCRATCH_IDLEGUI (1 << 5) | ||
60 | #define VAP_INDEX_OFFSET 0x208C | ||
61 | #define VAP_PVS_STATE_FLUSH_REG 0x2284 | ||
62 | #define GB_ENABLE 0x4008 | ||
63 | #define GB_MSPOS0 0x4010 | ||
64 | #define MS_X0_SHIFT 0 | ||
65 | #define MS_Y0_SHIFT 4 | ||
66 | #define MS_X1_SHIFT 8 | ||
67 | #define MS_Y1_SHIFT 12 | ||
68 | #define MS_X2_SHIFT 16 | ||
69 | #define MS_Y2_SHIFT 20 | ||
70 | #define MSBD0_Y_SHIFT 24 | ||
71 | #define MSBD0_X_SHIFT 28 | ||
72 | #define GB_MSPOS1 0x4014 | ||
73 | #define MS_X3_SHIFT 0 | ||
74 | #define MS_Y3_SHIFT 4 | ||
75 | #define MS_X4_SHIFT 8 | ||
76 | #define MS_Y4_SHIFT 12 | ||
77 | #define MS_X5_SHIFT 16 | ||
78 | #define MS_Y5_SHIFT 20 | ||
79 | #define MSBD1_SHIFT 24 | ||
80 | #define GB_TILE_CONFIG 0x4018 | ||
81 | #define ENABLE_TILING (1 << 0) | ||
82 | #define PIPE_COUNT_MASK 0x0000000E | ||
83 | #define PIPE_COUNT_SHIFT 1 | ||
84 | #define TILE_SIZE_8 (0 << 4) | ||
85 | #define TILE_SIZE_16 (1 << 4) | ||
86 | #define TILE_SIZE_32 (2 << 4) | ||
87 | #define SUBPIXEL_1_12 (0 << 16) | ||
88 | #define SUBPIXEL_1_16 (1 << 16) | ||
89 | #define GB_SELECT 0x401C | ||
90 | #define GB_AA_CONFIG 0x4020 | ||
91 | #define GB_PIPE_SELECT 0x402C | ||
92 | #define GA_ENHANCE 0x4274 | ||
93 | #define GA_DEADLOCK_CNTL (1 << 0) | ||
94 | #define GA_FASTSYNC_CNTL (1 << 1) | ||
95 | #define GA_POLY_MODE 0x4288 | ||
96 | #define FRONT_PTYPE_POINT (0 << 4) | ||
97 | #define FRONT_PTYPE_LINE (1 << 4) | ||
98 | #define FRONT_PTYPE_TRIANGE (2 << 4) | ||
99 | #define BACK_PTYPE_POINT (0 << 7) | ||
100 | #define BACK_PTYPE_LINE (1 << 7) | ||
101 | #define BACK_PTYPE_TRIANGE (2 << 7) | ||
102 | #define GA_ROUND_MODE 0x428C | ||
103 | #define GEOMETRY_ROUND_TRUNC (0 << 0) | ||
104 | #define GEOMETRY_ROUND_NEAREST (1 << 0) | ||
105 | #define COLOR_ROUND_TRUNC (0 << 2) | ||
106 | #define COLOR_ROUND_NEAREST (1 << 2) | ||
107 | #define SU_REG_DEST 0x42C8 | ||
108 | #define RB3D_DSTCACHE_CTLSTAT 0x4E4C | ||
109 | #define RB3D_DC_FLUSH (2 << 0) | ||
110 | #define RB3D_DC_FREE (2 << 2) | ||
111 | #define RB3D_DC_FINISH (1 << 4) | ||
112 | #define ZB_ZCACHE_CTLSTAT 0x4F18 | ||
113 | #define ZC_FLUSH (1 << 0) | ||
114 | #define ZC_FREE (1 << 1) | ||
115 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
116 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
117 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
118 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
119 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
120 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
121 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
122 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
123 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
124 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
125 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
126 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
127 | #define MODE_PRIORITY_OFF (1 << 16) | ||
128 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
129 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
130 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
131 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
132 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
133 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
134 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
135 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
136 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
137 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
138 | |||
139 | /* ix[MC] registers */ | ||
140 | #define MC_FB_LOCATION 0x01 | ||
141 | #define MC_FB_START_MASK 0x0000FFFF | ||
142 | #define MC_FB_START_SHIFT 0 | ||
143 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
144 | #define MC_FB_TOP_SHIFT 16 | ||
145 | #define MC_AGP_LOCATION 0x02 | ||
146 | #define MC_AGP_START_MASK 0x0000FFFF | ||
147 | #define MC_AGP_START_SHIFT 0 | ||
148 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
149 | #define MC_AGP_TOP_SHIFT 16 | ||
150 | #define MC_AGP_BASE 0x03 | ||
151 | #define MC_AGP_BASE_2 0x04 | ||
152 | #define MC_CNTL 0x5 | ||
153 | #define MEM_NUM_CHANNELS_MASK 0x00000003 | ||
154 | #define MC_STATUS 0x08 | ||
155 | #define MC_STATUS_IDLE (1 << 4) | ||
156 | #define MC_MISC_LAT_TIMER 0x09 | ||
157 | #define MC_CPR_INIT_LAT_MASK 0x0000000F | ||
158 | #define MC_VF_INIT_LAT_MASK 0x000000F0 | ||
159 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
160 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
161 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
162 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
163 | #define MC_FIXED_INIT_LAT_MASK 0x000F0000 | ||
164 | #define MC_E2R_INIT_LAT_MASK 0x00F00000 | ||
165 | #define SAME_PAGE_PRIO_MASK 0x0F000000 | ||
166 | #define MC_GLOBW_INIT_LAT_MASK 0xF0000000 | ||
167 | |||
168 | |||
169 | #endif | ||
170 | |||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index da50cc51ede3..21d8ffd57308 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -67,7 +67,7 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
67 | "programming pipes. Bad things might happen.\n"); | 67 | "programming pipes. Bad things might happen.\n"); |
68 | } | 68 | } |
69 | 69 | ||
70 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 70 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
71 | tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); | 71 | tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); |
72 | tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); | 72 | tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); |
73 | WREG32(R700_MC_VM_FB_LOCATION, tmp); | 73 | WREG32(R700_MC_VM_FB_LOCATION, tmp); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c1c407f7cca3..6538d4236989 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #define TTM_BO_HASH_ORDER 13 | 43 | #define TTM_BO_HASH_ORDER 13 |
44 | 44 | ||
45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); | 45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); |
46 | static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | ||
47 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); | 46 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); |
48 | 47 | ||
49 | static inline uint32_t ttm_bo_type_flags(unsigned type) | 48 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
@@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
224 | TTM_ASSERT_LOCKED(&bo->mutex); | 223 | TTM_ASSERT_LOCKED(&bo->mutex); |
225 | bo->ttm = NULL; | 224 | bo->ttm = NULL; |
226 | 225 | ||
226 | if (bdev->need_dma32) | ||
227 | page_flags |= TTM_PAGE_FLAG_DMA32; | ||
228 | |||
227 | switch (bo->type) { | 229 | switch (bo->type) { |
228 | case ttm_bo_type_device: | 230 | case ttm_bo_type_device: |
229 | if (zero_alloc) | 231 | if (zero_alloc) |
@@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
304 | 306 | ||
305 | } | 307 | } |
306 | 308 | ||
309 | if (bdev->driver->move_notify) | ||
310 | bdev->driver->move_notify(bo, mem); | ||
311 | |||
307 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && | 312 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
308 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) | 313 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
309 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); | 314 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); |
@@ -655,31 +660,52 @@ retry_pre_get: | |||
655 | return 0; | 660 | return 0; |
656 | } | 661 | } |
657 | 662 | ||
663 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, | ||
664 | uint32_t cur_placement, | ||
665 | uint32_t proposed_placement) | ||
666 | { | ||
667 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; | ||
668 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; | ||
669 | |||
670 | /** | ||
671 | * Keep current caching if possible. | ||
672 | */ | ||
673 | |||
674 | if ((cur_placement & caching) != 0) | ||
675 | result |= (cur_placement & caching); | ||
676 | else if ((man->default_caching & caching) != 0) | ||
677 | result |= man->default_caching; | ||
678 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) | ||
679 | result |= TTM_PL_FLAG_CACHED; | ||
680 | else if ((TTM_PL_FLAG_WC & caching) != 0) | ||
681 | result |= TTM_PL_FLAG_WC; | ||
682 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) | ||
683 | result |= TTM_PL_FLAG_UNCACHED; | ||
684 | |||
685 | return result; | ||
686 | } | ||
687 | |||
688 | |||
658 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | 689 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
659 | bool disallow_fixed, | 690 | bool disallow_fixed, |
660 | uint32_t mem_type, | 691 | uint32_t mem_type, |
661 | uint32_t mask, uint32_t *res_mask) | 692 | uint32_t proposed_placement, |
693 | uint32_t *masked_placement) | ||
662 | { | 694 | { |
663 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); | 695 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
664 | 696 | ||
665 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) | 697 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) |
666 | return false; | 698 | return false; |
667 | 699 | ||
668 | if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) | 700 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
669 | return false; | 701 | return false; |
670 | 702 | ||
671 | if ((mask & man->available_caching) == 0) | 703 | if ((proposed_placement & man->available_caching) == 0) |
672 | return false; | 704 | return false; |
673 | if (mask & man->default_caching) | ||
674 | cur_flags |= man->default_caching; | ||
675 | else if (mask & TTM_PL_FLAG_CACHED) | ||
676 | cur_flags |= TTM_PL_FLAG_CACHED; | ||
677 | else if (mask & TTM_PL_FLAG_WC) | ||
678 | cur_flags |= TTM_PL_FLAG_WC; | ||
679 | else | ||
680 | cur_flags |= TTM_PL_FLAG_UNCACHED; | ||
681 | 705 | ||
682 | *res_mask = cur_flags; | 706 | cur_flags |= (proposed_placement & man->available_caching); |
707 | |||
708 | *masked_placement = cur_flags; | ||
683 | return true; | 709 | return true; |
684 | } | 710 | } |
685 | 711 | ||
@@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
723 | if (!type_ok) | 749 | if (!type_ok) |
724 | continue; | 750 | continue; |
725 | 751 | ||
752 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | ||
753 | cur_flags); | ||
754 | |||
726 | if (mem_type == TTM_PL_SYSTEM) | 755 | if (mem_type == TTM_PL_SYSTEM) |
727 | break; | 756 | break; |
728 | 757 | ||
@@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
779 | proposed_placement, &cur_flags)) | 808 | proposed_placement, &cur_flags)) |
780 | continue; | 809 | continue; |
781 | 810 | ||
811 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | ||
812 | cur_flags); | ||
813 | |||
782 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, | 814 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, |
783 | interruptible, no_wait); | 815 | interruptible, no_wait); |
784 | 816 | ||
@@ -1305,7 +1337,8 @@ EXPORT_SYMBOL(ttm_bo_device_release); | |||
1305 | 1337 | ||
1306 | int ttm_bo_device_init(struct ttm_bo_device *bdev, | 1338 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1307 | struct ttm_mem_global *mem_glob, | 1339 | struct ttm_mem_global *mem_glob, |
1308 | struct ttm_bo_driver *driver, uint64_t file_page_offset) | 1340 | struct ttm_bo_driver *driver, uint64_t file_page_offset, |
1341 | bool need_dma32) | ||
1309 | { | 1342 | { |
1310 | int ret = -EINVAL; | 1343 | int ret = -EINVAL; |
1311 | 1344 | ||
@@ -1342,6 +1375,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1342 | INIT_LIST_HEAD(&bdev->ddestroy); | 1375 | INIT_LIST_HEAD(&bdev->ddestroy); |
1343 | INIT_LIST_HEAD(&bdev->swap_lru); | 1376 | INIT_LIST_HEAD(&bdev->swap_lru); |
1344 | bdev->dev_mapping = NULL; | 1377 | bdev->dev_mapping = NULL; |
1378 | bdev->need_dma32 = need_dma32; | ||
1345 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); | 1379 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); |
1346 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); | 1380 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); |
1347 | if (unlikely(ret != 0)) { | 1381 | if (unlikely(ret != 0)) { |
@@ -1419,6 +1453,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1419 | 1453 | ||
1420 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1454 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1421 | } | 1455 | } |
1456 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | ||
1422 | 1457 | ||
1423 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | 1458 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
1424 | { | 1459 | { |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bdec583901eb..ce2e6f38ea01 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | 138 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
139 | unsigned long page) | 139 | unsigned long page, |
140 | pgprot_t prot) | ||
140 | { | 141 | { |
141 | struct page *d = ttm_tt_get_page(ttm, page); | 142 | struct page *d = ttm_tt_get_page(ttm, page); |
142 | void *dst; | 143 | void *dst; |
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |||
145 | return -ENOMEM; | 146 | return -ENOMEM; |
146 | 147 | ||
147 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | 148 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
148 | dst = kmap(d); | 149 | |
150 | #ifdef CONFIG_X86 | ||
151 | dst = kmap_atomic_prot(d, KM_USER0, prot); | ||
152 | #else | ||
153 | if (prot != PAGE_KERNEL) | ||
154 | dst = vmap(&d, 1, 0, prot); | ||
155 | else | ||
156 | dst = kmap(d); | ||
157 | #endif | ||
149 | if (!dst) | 158 | if (!dst) |
150 | return -ENOMEM; | 159 | return -ENOMEM; |
151 | 160 | ||
152 | memcpy_fromio(dst, src, PAGE_SIZE); | 161 | memcpy_fromio(dst, src, PAGE_SIZE); |
153 | kunmap(d); | 162 | |
163 | #ifdef CONFIG_X86 | ||
164 | kunmap_atomic(dst, KM_USER0); | ||
165 | #else | ||
166 | if (prot != PAGE_KERNEL) | ||
167 | vunmap(dst); | ||
168 | else | ||
169 | kunmap(d); | ||
170 | #endif | ||
171 | |||
154 | return 0; | 172 | return 0; |
155 | } | 173 | } |
156 | 174 | ||
157 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | 175 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
158 | unsigned long page) | 176 | unsigned long page, |
177 | pgprot_t prot) | ||
159 | { | 178 | { |
160 | struct page *s = ttm_tt_get_page(ttm, page); | 179 | struct page *s = ttm_tt_get_page(ttm, page); |
161 | void *src; | 180 | void *src; |
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |||
164 | return -ENOMEM; | 183 | return -ENOMEM; |
165 | 184 | ||
166 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | 185 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
167 | src = kmap(s); | 186 | #ifdef CONFIG_X86 |
187 | src = kmap_atomic_prot(s, KM_USER0, prot); | ||
188 | #else | ||
189 | if (prot != PAGE_KERNEL) | ||
190 | src = vmap(&s, 1, 0, prot); | ||
191 | else | ||
192 | src = kmap(s); | ||
193 | #endif | ||
168 | if (!src) | 194 | if (!src) |
169 | return -ENOMEM; | 195 | return -ENOMEM; |
170 | 196 | ||
171 | memcpy_toio(dst, src, PAGE_SIZE); | 197 | memcpy_toio(dst, src, PAGE_SIZE); |
172 | kunmap(s); | 198 | |
199 | #ifdef CONFIG_X86 | ||
200 | kunmap_atomic(src, KM_USER0); | ||
201 | #else | ||
202 | if (prot != PAGE_KERNEL) | ||
203 | vunmap(src); | ||
204 | else | ||
205 | kunmap(s); | ||
206 | #endif | ||
207 | |||
173 | return 0; | 208 | return 0; |
174 | } | 209 | } |
175 | 210 | ||
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
214 | 249 | ||
215 | for (i = 0; i < new_mem->num_pages; ++i) { | 250 | for (i = 0; i < new_mem->num_pages; ++i) { |
216 | page = i * dir + add; | 251 | page = i * dir + add; |
217 | if (old_iomap == NULL) | 252 | if (old_iomap == NULL) { |
218 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); | 253 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
219 | else if (new_iomap == NULL) | 254 | PAGE_KERNEL); |
220 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); | 255 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
221 | else | 256 | prot); |
257 | } else if (new_iomap == NULL) { | ||
258 | pgprot_t prot = ttm_io_prot(new_mem->placement, | ||
259 | PAGE_KERNEL); | ||
260 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | ||
261 | prot); | ||
262 | } else | ||
222 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); | 263 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
223 | if (ret) | 264 | if (ret) |
224 | goto out1; | 265 | goto out1; |
@@ -509,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
509 | if (evict) { | 550 | if (evict) { |
510 | ret = ttm_bo_wait(bo, false, false, false); | 551 | ret = ttm_bo_wait(bo, false, false, false); |
511 | spin_unlock(&bo->lock); | 552 | spin_unlock(&bo->lock); |
512 | driver->sync_obj_unref(&bo->sync_obj); | 553 | if (tmp_obj) |
513 | 554 | driver->sync_obj_unref(&tmp_obj); | |
514 | if (ret) | 555 | if (ret) |
515 | return ret; | 556 | return ret; |
516 | 557 | ||
@@ -532,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
532 | 573 | ||
533 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 574 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
534 | spin_unlock(&bo->lock); | 575 | spin_unlock(&bo->lock); |
576 | if (tmp_obj) | ||
577 | driver->sync_obj_unref(&tmp_obj); | ||
535 | 578 | ||
536 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 579 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
537 | if (ret) | 580 | if (ret) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe949a12fe40..33de7637c0c6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
101 | return VM_FAULT_NOPAGE; | 101 | return VM_FAULT_NOPAGE; |
102 | } | 102 | } |
103 | 103 | ||
104 | if (bdev->driver->fault_reserve_notify) | ||
105 | bdev->driver->fault_reserve_notify(bo); | ||
106 | |||
104 | /* | 107 | /* |
105 | * Wait for buffer data in transit, due to a pipelined | 108 | * Wait for buffer data in transit, due to a pipelined |
106 | * move. | 109 | * move. |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 75dc8bd24592..b8b6c4a5f983 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -86,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) | |||
86 | unsigned long i; | 86 | unsigned long i; |
87 | 87 | ||
88 | for (i = 0; i < num_pages; ++i) { | 88 | for (i = 0; i < num_pages; ++i) { |
89 | if (pages[i]) { | 89 | struct page *page = pages[i]; |
90 | unsigned long start = (unsigned long)page_address(pages[i]); | 90 | void *page_virtual; |
91 | flush_dcache_range(start, start + PAGE_SIZE); | 91 | |
92 | } | 92 | if (unlikely(page == NULL)) |
93 | continue; | ||
94 | |||
95 | page_virtual = kmap_atomic(page, KM_USER0); | ||
96 | flush_dcache_range((unsigned long) page_virtual, | ||
97 | (unsigned long) page_virtual + PAGE_SIZE); | ||
98 | kunmap_atomic(page_virtual, KM_USER0); | ||
93 | } | 99 | } |
94 | #else | 100 | #else |
95 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) | 101 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) |
@@ -131,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | |||
131 | 137 | ||
132 | static struct page *ttm_tt_alloc_page(unsigned page_flags) | 138 | static struct page *ttm_tt_alloc_page(unsigned page_flags) |
133 | { | 139 | { |
140 | gfp_t gfp_flags = GFP_USER; | ||
141 | |||
134 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | 142 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) |
135 | return alloc_page(GFP_HIGHUSER | __GFP_ZERO); | 143 | gfp_flags |= __GFP_ZERO; |
144 | |||
145 | if (page_flags & TTM_PAGE_FLAG_DMA32) | ||
146 | gfp_flags |= __GFP_DMA32; | ||
147 | else | ||
148 | gfp_flags |= __GFP_HIGHMEM; | ||
136 | 149 | ||
137 | return alloc_page(GFP_HIGHUSER); | 150 | return alloc_page(gfp_flags); |
138 | } | 151 | } |
139 | 152 | ||
140 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | 153 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index bff0103610c1..fe4fa29c9219 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -593,7 +593,11 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) | |||
593 | sensor->data = data; | 593 | sensor->data = data; |
594 | sensor->id = flags->integer.value; | 594 | sensor->id = flags->integer.value; |
595 | sensor->limit1 = limit1->integer.value; | 595 | sensor->limit1 = limit1->integer.value; |
596 | sensor->limit2 = limit2->integer.value; | 596 | if (data->old_interface) |
597 | sensor->limit2 = limit2->integer.value; | ||
598 | else | ||
599 | /* The upper limit is expressed as delta from lower limit */ | ||
600 | sensor->limit2 = sensor->limit1 + limit2->integer.value; | ||
597 | 601 | ||
598 | snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, | 602 | snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, |
599 | "%s%d_input", base_name, start + *num); | 603 | "%s%d_input", base_name, start + *num); |
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index a92dbb97ee99..ba75bfcf14ce 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c | |||
@@ -86,6 +86,7 @@ superio_exit(void) | |||
86 | #define SUPERIO_REG_ACT 0x30 | 86 | #define SUPERIO_REG_ACT 0x30 |
87 | #define SUPERIO_REG_BASE 0x60 | 87 | #define SUPERIO_REG_BASE 0x60 |
88 | #define SUPERIO_REG_DEVID 0x20 | 88 | #define SUPERIO_REG_DEVID 0x20 |
89 | #define SUPERIO_REG_DEVREV 0x21 | ||
89 | 90 | ||
90 | /* Logical device registers */ | 91 | /* Logical device registers */ |
91 | 92 | ||
@@ -429,6 +430,9 @@ static int __init smsc47m1_find(unsigned short *addr, | |||
429 | * The LPC47M292 (device id 0x6B) is somewhat compatible, but it | 430 | * The LPC47M292 (device id 0x6B) is somewhat compatible, but it |
430 | * supports a 3rd fan, and the pin configuration registers are | 431 | * supports a 3rd fan, and the pin configuration registers are |
431 | * unfortunately different. | 432 | * unfortunately different. |
433 | * The LPC47M233 has the same device id (0x6B) but is not compatible. | ||
434 | * We check the high bit of the device revision register to | ||
435 | * differentiate them. | ||
432 | */ | 436 | */ |
433 | switch (val) { | 437 | switch (val) { |
434 | case 0x51: | 438 | case 0x51: |
@@ -448,6 +452,13 @@ static int __init smsc47m1_find(unsigned short *addr, | |||
448 | sio_data->type = smsc47m1; | 452 | sio_data->type = smsc47m1; |
449 | break; | 453 | break; |
450 | case 0x6B: | 454 | case 0x6B: |
455 | if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) { | ||
456 | pr_debug(DRVNAME ": " | ||
457 | "Found SMSC LPC47M233, unsupported\n"); | ||
458 | superio_exit(); | ||
459 | return -ENODEV; | ||
460 | } | ||
461 | |||
451 | pr_info(DRVNAME ": Found SMSC LPC47M292\n"); | 462 | pr_info(DRVNAME ": Found SMSC LPC47M292\n"); |
452 | sio_data->type = smsc47m2; | 463 | sio_data->type = smsc47m2; |
453 | break; | 464 | break; |
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c index 1a9cc135219f..b96f3025e588 100644 --- a/drivers/i2c/chips/tsl2550.c +++ b/drivers/i2c/chips/tsl2550.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | 28 | ||
29 | #define TSL2550_DRV_NAME "tsl2550" | 29 | #define TSL2550_DRV_NAME "tsl2550" |
30 | #define DRIVER_VERSION "1.1.1" | 30 | #define DRIVER_VERSION "1.1.2" |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * Defines | 33 | * Defines |
@@ -189,13 +189,16 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) | |||
189 | u8 r = 128; | 189 | u8 r = 128; |
190 | 190 | ||
191 | /* Avoid division by 0 and count 1 cannot be greater than count 0 */ | 191 | /* Avoid division by 0 and count 1 cannot be greater than count 0 */ |
192 | if (c0 && (c1 <= c0)) | 192 | if (c1 <= c0) |
193 | r = c1 * 128 / c0; | 193 | if (c0) { |
194 | r = c1 * 128 / c0; | ||
195 | |||
196 | /* Calculate LUX */ | ||
197 | lux = ((c0 - c1) * ratio_lut[r]) / 256; | ||
198 | } else | ||
199 | lux = 0; | ||
194 | else | 200 | else |
195 | return -1; | 201 | return -EAGAIN; |
196 | |||
197 | /* Calculate LUX */ | ||
198 | lux = ((c0 - c1) * ratio_lut[r]) / 256; | ||
199 | 202 | ||
200 | /* LUX range check */ | 203 | /* LUX range check */ |
201 | return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; | 204 | return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; |
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 990e6a7e6674..c3b661a666cb 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c | |||
@@ -731,10 +731,10 @@ l1oip_socket_thread(void *data) | |||
731 | while (!signal_pending(current)) { | 731 | while (!signal_pending(current)) { |
732 | struct kvec iov = { | 732 | struct kvec iov = { |
733 | .iov_base = recvbuf, | 733 | .iov_base = recvbuf, |
734 | .iov_len = sizeof(recvbuf), | 734 | .iov_len = recvbuf_size, |
735 | }; | 735 | }; |
736 | recvlen = kernel_recvmsg(socket, &msg, &iov, 1, | 736 | recvlen = kernel_recvmsg(socket, &msg, &iov, 1, |
737 | sizeof(recvbuf), 0); | 737 | recvbuf_size, 0); |
738 | if (recvlen > 0) { | 738 | if (recvlen > 0) { |
739 | l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); | 739 | l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); |
740 | } else { | 740 | } else { |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 529e2ba505c3..ed1038164019 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1318,7 +1318,7 @@ static int crypt_iterate_devices(struct dm_target *ti, | |||
1318 | { | 1318 | { |
1319 | struct crypt_config *cc = ti->private; | 1319 | struct crypt_config *cc = ti->private; |
1320 | 1320 | ||
1321 | return fn(ti, cc->dev, cc->start, data); | 1321 | return fn(ti, cc->dev, cc->start, ti->len, data); |
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | static struct target_type crypt_target = { | 1324 | static struct target_type crypt_target = { |
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 4e5b843cd4d7..ebe7381f47c8 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -324,12 +324,12 @@ static int delay_iterate_devices(struct dm_target *ti, | |||
324 | struct delay_c *dc = ti->private; | 324 | struct delay_c *dc = ti->private; |
325 | int ret = 0; | 325 | int ret = 0; |
326 | 326 | ||
327 | ret = fn(ti, dc->dev_read, dc->start_read, data); | 327 | ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data); |
328 | if (ret) | 328 | if (ret) |
329 | goto out; | 329 | goto out; |
330 | 330 | ||
331 | if (dc->dev_write) | 331 | if (dc->dev_write) |
332 | ret = fn(ti, dc->dev_write, dc->start_write, data); | 332 | ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data); |
333 | 333 | ||
334 | out: | 334 | out: |
335 | return ret; | 335 | return ret; |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 9184b6deb868..82f7d6e6b1ea 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -139,7 +139,7 @@ static int linear_iterate_devices(struct dm_target *ti, | |||
139 | { | 139 | { |
140 | struct linear_c *lc = ti->private; | 140 | struct linear_c *lc = ti->private; |
141 | 141 | ||
142 | return fn(ti, lc->dev, lc->start, data); | 142 | return fn(ti, lc->dev, lc->start, ti->len, data); |
143 | } | 143 | } |
144 | 144 | ||
145 | static struct target_type linear_target = { | 145 | static struct target_type linear_target = { |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c70604a20897..6f0d90d4a541 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -1453,7 +1453,7 @@ static int multipath_iterate_devices(struct dm_target *ti, | |||
1453 | 1453 | ||
1454 | list_for_each_entry(pg, &m->priority_groups, list) { | 1454 | list_for_each_entry(pg, &m->priority_groups, list) { |
1455 | list_for_each_entry(p, &pg->pgpaths, list) { | 1455 | list_for_each_entry(p, &pg->pgpaths, list) { |
1456 | ret = fn(ti, p->path.dev, ti->begin, data); | 1456 | ret = fn(ti, p->path.dev, ti->begin, ti->len, data); |
1457 | if (ret) | 1457 | if (ret) |
1458 | goto out; | 1458 | goto out; |
1459 | } | 1459 | } |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ce8868c768cc..9726577cde49 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -638,6 +638,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
638 | spin_lock_irq(&ms->lock); | 638 | spin_lock_irq(&ms->lock); |
639 | bio_list_merge(&ms->writes, &requeue); | 639 | bio_list_merge(&ms->writes, &requeue); |
640 | spin_unlock_irq(&ms->lock); | 640 | spin_unlock_irq(&ms->lock); |
641 | delayed_wake(ms); | ||
641 | } | 642 | } |
642 | 643 | ||
643 | /* | 644 | /* |
@@ -1292,7 +1293,7 @@ static int mirror_iterate_devices(struct dm_target *ti, | |||
1292 | 1293 | ||
1293 | for (i = 0; !ret && i < ms->nr_mirrors; i++) | 1294 | for (i = 0; !ret && i < ms->nr_mirrors; i++) |
1294 | ret = fn(ti, ms->mirror[i].dev, | 1295 | ret = fn(ti, ms->mirror[i].dev, |
1295 | ms->mirror[i].offset, data); | 1296 | ms->mirror[i].offset, ti->len, data); |
1296 | 1297 | ||
1297 | return ret; | 1298 | return ret; |
1298 | } | 1299 | } |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index b240e85ae39a..4e0e5937e42a 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -320,10 +320,11 @@ static int stripe_iterate_devices(struct dm_target *ti, | |||
320 | int ret = 0; | 320 | int ret = 0; |
321 | unsigned i = 0; | 321 | unsigned i = 0; |
322 | 322 | ||
323 | do | 323 | do { |
324 | ret = fn(ti, sc->stripe[i].dev, | 324 | ret = fn(ti, sc->stripe[i].dev, |
325 | sc->stripe[i].physical_start, data); | 325 | sc->stripe[i].physical_start, |
326 | while (!ret && ++i < sc->stripes); | 326 | sc->stripe_width, data); |
327 | } while (!ret && ++i < sc->stripes); | ||
327 | 328 | ||
328 | return ret; | 329 | return ret; |
329 | } | 330 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2cba557d9e61..d952b3441913 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -346,7 +346,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) | |||
346 | * If possible, this checks an area of a destination device is valid. | 346 | * If possible, this checks an area of a destination device is valid. |
347 | */ | 347 | */ |
348 | static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, | 348 | static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, |
349 | sector_t start, void *data) | 349 | sector_t start, sector_t len, void *data) |
350 | { | 350 | { |
351 | struct queue_limits *limits = data; | 351 | struct queue_limits *limits = data; |
352 | struct block_device *bdev = dev->bdev; | 352 | struct block_device *bdev = dev->bdev; |
@@ -359,7 +359,7 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, | |||
359 | if (!dev_size) | 359 | if (!dev_size) |
360 | return 1; | 360 | return 1; |
361 | 361 | ||
362 | if ((start >= dev_size) || (start + ti->len > dev_size)) { | 362 | if ((start >= dev_size) || (start + len > dev_size)) { |
363 | DMWARN("%s: %s too small for target", | 363 | DMWARN("%s: %s too small for target", |
364 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 364 | dm_device_name(ti->table->md), bdevname(bdev, b)); |
365 | return 0; | 365 | return 0; |
@@ -377,11 +377,11 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, | |||
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | if (ti->len & (logical_block_size_sectors - 1)) { | 380 | if (len & (logical_block_size_sectors - 1)) { |
381 | DMWARN("%s: len=%llu not aligned to h/w " | 381 | DMWARN("%s: len=%llu not aligned to h/w " |
382 | "logical block size %hu of %s", | 382 | "logical block size %hu of %s", |
383 | dm_device_name(ti->table->md), | 383 | dm_device_name(ti->table->md), |
384 | (unsigned long long)ti->len, | 384 | (unsigned long long)len, |
385 | limits->logical_block_size, bdevname(bdev, b)); | 385 | limits->logical_block_size, bdevname(bdev, b)); |
386 | return 0; | 386 | return 0; |
387 | } | 387 | } |
@@ -482,7 +482,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
482 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | 482 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) |
483 | 483 | ||
484 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | 484 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, |
485 | sector_t start, void *data) | 485 | sector_t start, sector_t len, void *data) |
486 | { | 486 | { |
487 | struct queue_limits *limits = data; | 487 | struct queue_limits *limits = data; |
488 | struct block_device *bdev = dev->bdev; | 488 | struct block_device *bdev = dev->bdev; |
@@ -830,11 +830,6 @@ unsigned dm_table_get_type(struct dm_table *t) | |||
830 | return t->type; | 830 | return t->type; |
831 | } | 831 | } |
832 | 832 | ||
833 | bool dm_table_bio_based(struct dm_table *t) | ||
834 | { | ||
835 | return dm_table_get_type(t) == DM_TYPE_BIO_BASED; | ||
836 | } | ||
837 | |||
838 | bool dm_table_request_based(struct dm_table *t) | 833 | bool dm_table_request_based(struct dm_table *t) |
839 | { | 834 | { |
840 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; | 835 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9acd54a5cffb..8a311ea0d441 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -2203,16 +2203,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
2203 | goto out; | 2203 | goto out; |
2204 | } | 2204 | } |
2205 | 2205 | ||
2206 | /* | ||
2207 | * It is enought that blk_queue_ordered() is called only once when | ||
2208 | * the first bio-based table is bound. | ||
2209 | * | ||
2210 | * This setting should be moved to alloc_dev() when request-based dm | ||
2211 | * supports barrier. | ||
2212 | */ | ||
2213 | if (!md->map && dm_table_bio_based(table)) | ||
2214 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); | ||
2215 | |||
2216 | __unbind(md); | 2206 | __unbind(md); |
2217 | r = __bind(md, table, &limits); | 2207 | r = __bind(md, table, &limits); |
2218 | 2208 | ||
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 23278ae80f08..a7663eba17e2 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); | |||
61 | int dm_table_any_busy_target(struct dm_table *t); | 61 | int dm_table_any_busy_target(struct dm_table *t); |
62 | int dm_table_set_type(struct dm_table *t); | 62 | int dm_table_set_type(struct dm_table *t); |
63 | unsigned dm_table_get_type(struct dm_table *t); | 63 | unsigned dm_table_get_type(struct dm_table *t); |
64 | bool dm_table_bio_based(struct dm_table *t); | ||
65 | bool dm_table_request_based(struct dm_table *t); | 64 | bool dm_table_request_based(struct dm_table *t); |
66 | int dm_table_alloc_md_mempools(struct dm_table *t); | 65 | int dm_table_alloc_md_mempools(struct dm_table *t); |
67 | void dm_table_free_md_mempools(struct dm_table *t); | 66 | void dm_table_free_md_mempools(struct dm_table *t); |
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c index efb4a6c2b57a..9a6307a347b2 100644 --- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c +++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c | |||
@@ -20,8 +20,14 @@ | |||
20 | #include "tuner-simple.h" | 20 | #include "tuner-simple.h" |
21 | #include "stv0297.h" | 21 | #include "stv0297.h" |
22 | 22 | ||
23 | |||
24 | /* Can we use the specified front-end? Remember that if we are compiled | ||
25 | * into the kernel we can't call code that's in modules. */ | ||
26 | #define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \ | ||
27 | (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE))) | ||
28 | |||
23 | /* lnb control */ | 29 | /* lnb control */ |
24 | #if defined(CONFIG_DVB_MT312_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) | 30 | #if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299) |
25 | static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) | 31 | static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) |
26 | { | 32 | { |
27 | struct flexcop_device *fc = fe->dvb->priv; | 33 | struct flexcop_device *fc = fe->dvb->priv; |
@@ -49,8 +55,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage | |||
49 | } | 55 | } |
50 | #endif | 56 | #endif |
51 | 57 | ||
52 | #if defined(CONFIG_DVB_S5H1420_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) \ | 58 | #if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) |
53 | || defined(CONFIG_DVB_MT312_MODULE) | ||
54 | static int flexcop_sleep(struct dvb_frontend* fe) | 59 | static int flexcop_sleep(struct dvb_frontend* fe) |
55 | { | 60 | { |
56 | struct flexcop_device *fc = fe->dvb->priv; | 61 | struct flexcop_device *fc = fe->dvb->priv; |
@@ -61,7 +66,7 @@ static int flexcop_sleep(struct dvb_frontend* fe) | |||
61 | #endif | 66 | #endif |
62 | 67 | ||
63 | /* SkyStar2 DVB-S rev 2.3 */ | 68 | /* SkyStar2 DVB-S rev 2.3 */ |
64 | #if defined(CONFIG_DVB_MT312_MODULE) | 69 | #if FE_SUPPORTED(MT312) |
65 | static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) | 70 | static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) |
66 | { | 71 | { |
67 | /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ | 72 | /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ |
@@ -193,10 +198,12 @@ static int skystar2_rev23_attach(struct flexcop_device *fc, | |||
193 | } | 198 | } |
194 | return 0; | 199 | return 0; |
195 | } | 200 | } |
201 | #else | ||
202 | #define skystar2_rev23_attach NULL | ||
196 | #endif | 203 | #endif |
197 | 204 | ||
198 | /* SkyStar2 DVB-S rev 2.6 */ | 205 | /* SkyStar2 DVB-S rev 2.6 */ |
199 | #if defined(CONFIG_DVB_STV0299_MODULE) | 206 | #if FE_SUPPORTED(STV0299) |
200 | static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, | 207 | static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, |
201 | u32 srate, u32 ratio) | 208 | u32 srate, u32 ratio) |
202 | { | 209 | { |
@@ -321,10 +328,12 @@ static int skystar2_rev26_attach(struct flexcop_device *fc, | |||
321 | } | 328 | } |
322 | return 0; | 329 | return 0; |
323 | } | 330 | } |
331 | #else | ||
332 | #define skystar2_rev26_attach NULL | ||
324 | #endif | 333 | #endif |
325 | 334 | ||
326 | /* SkyStar2 DVB-S rev 2.7 */ | 335 | /* SkyStar2 DVB-S rev 2.7 */ |
327 | #if defined(CONFIG_DVB_S5H1420_MODULE) | 336 | #if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000) |
328 | static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { | 337 | static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { |
329 | .demod_address = 0x53, | 338 | .demod_address = 0x53, |
330 | .invert = 1, | 339 | .invert = 1, |
@@ -385,10 +394,12 @@ fail: | |||
385 | fc->fc_i2c_adap[0].no_base_addr = 0; | 394 | fc->fc_i2c_adap[0].no_base_addr = 0; |
386 | return 0; | 395 | return 0; |
387 | } | 396 | } |
397 | #else | ||
398 | #define skystar2_rev27_attach NULL | ||
388 | #endif | 399 | #endif |
389 | 400 | ||
390 | /* SkyStar2 rev 2.8 */ | 401 | /* SkyStar2 rev 2.8 */ |
391 | #if defined(CONFIG_DVB_CX24123_MODULE) | 402 | #if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113) |
392 | static struct cx24123_config skystar2_rev2_8_cx24123_config = { | 403 | static struct cx24123_config skystar2_rev2_8_cx24123_config = { |
393 | .demod_address = 0x55, | 404 | .demod_address = 0x55, |
394 | .dont_use_pll = 1, | 405 | .dont_use_pll = 1, |
@@ -433,10 +444,12 @@ static int skystar2_rev28_attach(struct flexcop_device *fc, | |||
433 | * IR-receiver (PIC16F818) - but the card has no input for that ??? */ | 444 | * IR-receiver (PIC16F818) - but the card has no input for that ??? */ |
434 | return 1; | 445 | return 1; |
435 | } | 446 | } |
447 | #else | ||
448 | #define skystar2_rev28_attach NULL | ||
436 | #endif | 449 | #endif |
437 | 450 | ||
438 | /* AirStar DVB-T */ | 451 | /* AirStar DVB-T */ |
439 | #if defined(CONFIG_DVB_MT352_MODULE) | 452 | #if FE_SUPPORTED(MT352) |
440 | static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) | 453 | static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) |
441 | { | 454 | { |
442 | static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; | 455 | static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; |
@@ -495,10 +508,12 @@ static int airstar_dvbt_attach(struct flexcop_device *fc, | |||
495 | } | 508 | } |
496 | return 0; | 509 | return 0; |
497 | } | 510 | } |
511 | #else | ||
512 | #define airstar_dvbt_attach NULL | ||
498 | #endif | 513 | #endif |
499 | 514 | ||
500 | /* AirStar ATSC 1st generation */ | 515 | /* AirStar ATSC 1st generation */ |
501 | #if defined(CONFIG_DVB_BCM3510_MODULE) | 516 | #if FE_SUPPORTED(BCM3510) |
502 | static int flexcop_fe_request_firmware(struct dvb_frontend *fe, | 517 | static int flexcop_fe_request_firmware(struct dvb_frontend *fe, |
503 | const struct firmware **fw, char* name) | 518 | const struct firmware **fw, char* name) |
504 | { | 519 | { |
@@ -517,10 +532,12 @@ static int airstar_atsc1_attach(struct flexcop_device *fc, | |||
517 | fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); | 532 | fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); |
518 | return fc->fe != NULL; | 533 | return fc->fe != NULL; |
519 | } | 534 | } |
535 | #else | ||
536 | #define airstar_atsc1_attach NULL | ||
520 | #endif | 537 | #endif |
521 | 538 | ||
522 | /* AirStar ATSC 2nd generation */ | 539 | /* AirStar ATSC 2nd generation */ |
523 | #if defined(CONFIG_DVB_NXT200X_MODULE) | 540 | #if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL) |
524 | static struct nxt200x_config samsung_tbmv_config = { | 541 | static struct nxt200x_config samsung_tbmv_config = { |
525 | .demod_address = 0x0a, | 542 | .demod_address = 0x0a, |
526 | }; | 543 | }; |
@@ -535,10 +552,12 @@ static int airstar_atsc2_attach(struct flexcop_device *fc, | |||
535 | return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, | 552 | return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, |
536 | DVB_PLL_SAMSUNG_TBMV); | 553 | DVB_PLL_SAMSUNG_TBMV); |
537 | } | 554 | } |
555 | #else | ||
556 | #define airstar_atsc2_attach NULL | ||
538 | #endif | 557 | #endif |
539 | 558 | ||
540 | /* AirStar ATSC 3rd generation */ | 559 | /* AirStar ATSC 3rd generation */ |
541 | #if defined(CONFIG_DVB_LGDT330X_MODULE) | 560 | #if FE_SUPPORTED(LGDT330X) |
542 | static struct lgdt330x_config air2pc_atsc_hd5000_config = { | 561 | static struct lgdt330x_config air2pc_atsc_hd5000_config = { |
543 | .demod_address = 0x59, | 562 | .demod_address = 0x59, |
544 | .demod_chip = LGDT3303, | 563 | .demod_chip = LGDT3303, |
@@ -556,10 +575,12 @@ static int airstar_atsc3_attach(struct flexcop_device *fc, | |||
556 | return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, | 575 | return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, |
557 | TUNER_LG_TDVS_H06XF); | 576 | TUNER_LG_TDVS_H06XF); |
558 | } | 577 | } |
578 | #else | ||
579 | #define airstar_atsc3_attach NULL | ||
559 | #endif | 580 | #endif |
560 | 581 | ||
561 | /* CableStar2 DVB-C */ | 582 | /* CableStar2 DVB-C */ |
562 | #if defined(CONFIG_DVB_STV0297_MODULE) | 583 | #if FE_SUPPORTED(STV0297) |
563 | static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe, | 584 | static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe, |
564 | struct dvb_frontend_parameters *fep) | 585 | struct dvb_frontend_parameters *fep) |
565 | { | 586 | { |
@@ -698,39 +719,23 @@ static int cablestar2_attach(struct flexcop_device *fc, | |||
698 | fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; | 719 | fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; |
699 | return 1; | 720 | return 1; |
700 | } | 721 | } |
722 | #else | ||
723 | #define cablestar2_attach NULL | ||
701 | #endif | 724 | #endif |
702 | 725 | ||
703 | static struct { | 726 | static struct { |
704 | flexcop_device_type_t type; | 727 | flexcop_device_type_t type; |
705 | int (*attach)(struct flexcop_device *, struct i2c_adapter *); | 728 | int (*attach)(struct flexcop_device *, struct i2c_adapter *); |
706 | } flexcop_frontends[] = { | 729 | } flexcop_frontends[] = { |
707 | #if defined(CONFIG_DVB_S5H1420_MODULE) | ||
708 | { FC_SKY_REV27, skystar2_rev27_attach }, | 730 | { FC_SKY_REV27, skystar2_rev27_attach }, |
709 | #endif | ||
710 | #if defined(CONFIG_DVB_CX24123_MODULE) | ||
711 | { FC_SKY_REV28, skystar2_rev28_attach }, | 731 | { FC_SKY_REV28, skystar2_rev28_attach }, |
712 | #endif | ||
713 | #if defined(CONFIG_DVB_STV0299_MODULE) | ||
714 | { FC_SKY_REV26, skystar2_rev26_attach }, | 732 | { FC_SKY_REV26, skystar2_rev26_attach }, |
715 | #endif | ||
716 | #if defined(CONFIG_DVB_MT352_MODULE) | ||
717 | { FC_AIR_DVBT, airstar_dvbt_attach }, | 733 | { FC_AIR_DVBT, airstar_dvbt_attach }, |
718 | #endif | ||
719 | #if defined(CONFIG_DVB_NXT200X_MODULE) | ||
720 | { FC_AIR_ATSC2, airstar_atsc2_attach }, | 734 | { FC_AIR_ATSC2, airstar_atsc2_attach }, |
721 | #endif | ||
722 | #if defined(CONFIG_DVB_LGDT330X_MODULE) | ||
723 | { FC_AIR_ATSC3, airstar_atsc3_attach }, | 735 | { FC_AIR_ATSC3, airstar_atsc3_attach }, |
724 | #endif | ||
725 | #if defined(CONFIG_DVB_BCM3510_MODULE) | ||
726 | { FC_AIR_ATSC1, airstar_atsc1_attach }, | 736 | { FC_AIR_ATSC1, airstar_atsc1_attach }, |
727 | #endif | ||
728 | #if defined(CONFIG_DVB_STV0297_MODULE) | ||
729 | { FC_CABLE, cablestar2_attach }, | 737 | { FC_CABLE, cablestar2_attach }, |
730 | #endif | ||
731 | #if defined(CONFIG_DVB_MT312_MODULE) | ||
732 | { FC_SKY_REV23, skystar2_rev23_attach }, | 738 | { FC_SKY_REV23, skystar2_rev23_attach }, |
733 | #endif | ||
734 | }; | 739 | }; |
735 | 740 | ||
736 | /* try to figure out the frontend */ | 741 | /* try to figure out the frontend */ |
@@ -738,6 +743,8 @@ int flexcop_frontend_init(struct flexcop_device *fc) | |||
738 | { | 743 | { |
739 | int i; | 744 | int i; |
740 | for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { | 745 | for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { |
746 | if (!flexcop_frontends[i].attach) | ||
747 | continue; | ||
741 | /* type needs to be set before, because of some workarounds | 748 | /* type needs to be set before, because of some workarounds |
742 | * done based on the probed card type */ | 749 | * done based on the probed card type */ |
743 | fc->dev_type = flexcop_frontends[i].type; | 750 | fc->dev_type = flexcop_frontends[i].type; |
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c index 136c5863d81b..12e018b4107d 100644 --- a/drivers/media/dvb/frontends/af9013.c +++ b/drivers/media/dvb/frontends/af9013.c | |||
@@ -527,6 +527,10 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
527 | u8 i, buf[3] = {0, 0, 0}; | 527 | u8 i, buf[3] = {0, 0, 0}; |
528 | *auto_mode = 0; /* set if parameters are requested to auto set */ | 528 | *auto_mode = 0; /* set if parameters are requested to auto set */ |
529 | 529 | ||
530 | /* Try auto-detect transmission parameters in case of AUTO requested or | ||
531 | garbage parameters given by application for compatibility. | ||
532 | MPlayer seems to provide garbage parameters currently. */ | ||
533 | |||
530 | switch (params->transmission_mode) { | 534 | switch (params->transmission_mode) { |
531 | case TRANSMISSION_MODE_AUTO: | 535 | case TRANSMISSION_MODE_AUTO: |
532 | *auto_mode = 1; | 536 | *auto_mode = 1; |
@@ -536,7 +540,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
536 | buf[0] |= (1 << 0); | 540 | buf[0] |= (1 << 0); |
537 | break; | 541 | break; |
538 | default: | 542 | default: |
539 | return -EINVAL; | 543 | deb_info("%s: invalid transmission_mode\n", __func__); |
544 | *auto_mode = 1; | ||
540 | } | 545 | } |
541 | 546 | ||
542 | switch (params->guard_interval) { | 547 | switch (params->guard_interval) { |
@@ -554,7 +559,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
554 | buf[0] |= (3 << 2); | 559 | buf[0] |= (3 << 2); |
555 | break; | 560 | break; |
556 | default: | 561 | default: |
557 | return -EINVAL; | 562 | deb_info("%s: invalid guard_interval\n", __func__); |
563 | *auto_mode = 1; | ||
558 | } | 564 | } |
559 | 565 | ||
560 | switch (params->hierarchy_information) { | 566 | switch (params->hierarchy_information) { |
@@ -572,7 +578,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
572 | buf[0] |= (3 << 4); | 578 | buf[0] |= (3 << 4); |
573 | break; | 579 | break; |
574 | default: | 580 | default: |
575 | return -EINVAL; | 581 | deb_info("%s: invalid hierarchy_information\n", __func__); |
582 | *auto_mode = 1; | ||
576 | }; | 583 | }; |
577 | 584 | ||
578 | switch (params->constellation) { | 585 | switch (params->constellation) { |
@@ -587,7 +594,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
587 | buf[1] |= (2 << 6); | 594 | buf[1] |= (2 << 6); |
588 | break; | 595 | break; |
589 | default: | 596 | default: |
590 | return -EINVAL; | 597 | deb_info("%s: invalid constellation\n", __func__); |
598 | *auto_mode = 1; | ||
591 | } | 599 | } |
592 | 600 | ||
593 | /* Use HP. How and which case we can switch to LP? */ | 601 | /* Use HP. How and which case we can switch to LP? */ |
@@ -611,7 +619,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
611 | buf[2] |= (4 << 0); | 619 | buf[2] |= (4 << 0); |
612 | break; | 620 | break; |
613 | default: | 621 | default: |
614 | return -EINVAL; | 622 | deb_info("%s: invalid code_rate_HP\n", __func__); |
623 | *auto_mode = 1; | ||
615 | } | 624 | } |
616 | 625 | ||
617 | switch (params->code_rate_LP) { | 626 | switch (params->code_rate_LP) { |
@@ -638,7 +647,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
638 | if (params->hierarchy_information == HIERARCHY_AUTO) | 647 | if (params->hierarchy_information == HIERARCHY_AUTO) |
639 | break; | 648 | break; |
640 | default: | 649 | default: |
641 | return -EINVAL; | 650 | deb_info("%s: invalid code_rate_LP\n", __func__); |
651 | *auto_mode = 1; | ||
642 | } | 652 | } |
643 | 653 | ||
644 | switch (params->bandwidth) { | 654 | switch (params->bandwidth) { |
@@ -651,7 +661,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, | |||
651 | buf[1] |= (2 << 2); | 661 | buf[1] |= (2 << 2); |
652 | break; | 662 | break; |
653 | default: | 663 | default: |
654 | return -EINVAL; | 664 | deb_info("%s: invalid bandwidth\n", __func__); |
665 | buf[1] |= (2 << 2); /* cannot auto-detect BW, try 8 MHz */ | ||
655 | } | 666 | } |
656 | 667 | ||
657 | /* program */ | 668 | /* program */ |
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c index fdb4adff3d28..ca6558c394be 100644 --- a/drivers/media/video/bt8xx/bttv-cards.c +++ b/drivers/media/video/bt8xx/bttv-cards.c | |||
@@ -3324,8 +3324,6 @@ void __devinit bttv_init_card1(struct bttv *btv) | |||
3324 | /* initialization part two -- after registering i2c bus */ | 3324 | /* initialization part two -- after registering i2c bus */ |
3325 | void __devinit bttv_init_card2(struct bttv *btv) | 3325 | void __devinit bttv_init_card2(struct bttv *btv) |
3326 | { | 3326 | { |
3327 | int addr=ADDR_UNSET; | ||
3328 | |||
3329 | btv->tuner_type = UNSET; | 3327 | btv->tuner_type = UNSET; |
3330 | 3328 | ||
3331 | if (BTTV_BOARD_UNKNOWN == btv->c.type) { | 3329 | if (BTTV_BOARD_UNKNOWN == btv->c.type) { |
@@ -3470,9 +3468,6 @@ void __devinit bttv_init_card2(struct bttv *btv) | |||
3470 | btv->pll.pll_current = -1; | 3468 | btv->pll.pll_current = -1; |
3471 | 3469 | ||
3472 | /* tuner configuration (from card list / autodetect / insmod option) */ | 3470 | /* tuner configuration (from card list / autodetect / insmod option) */ |
3473 | if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) | ||
3474 | addr = bttv_tvcards[btv->c.type].tuner_addr; | ||
3475 | |||
3476 | if (UNSET != bttv_tvcards[btv->c.type].tuner_type) | 3471 | if (UNSET != bttv_tvcards[btv->c.type].tuner_type) |
3477 | if (UNSET == btv->tuner_type) | 3472 | if (UNSET == btv->tuner_type) |
3478 | btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; | 3473 | btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; |
@@ -3496,40 +3491,6 @@ void __devinit bttv_init_card2(struct bttv *btv) | |||
3496 | if (UNSET == btv->tuner_type) | 3491 | if (UNSET == btv->tuner_type) |
3497 | btv->tuner_type = TUNER_ABSENT; | 3492 | btv->tuner_type = TUNER_ABSENT; |
3498 | 3493 | ||
3499 | if (btv->tuner_type != TUNER_ABSENT) { | ||
3500 | struct tuner_setup tun_setup; | ||
3501 | |||
3502 | /* Load tuner module before issuing tuner config call! */ | ||
3503 | if (bttv_tvcards[btv->c.type].has_radio) | ||
3504 | v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, | ||
3505 | &btv->c.i2c_adap, "tuner", "tuner", | ||
3506 | v4l2_i2c_tuner_addrs(ADDRS_RADIO)); | ||
3507 | v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, | ||
3508 | &btv->c.i2c_adap, "tuner", "tuner", | ||
3509 | v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); | ||
3510 | v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, | ||
3511 | &btv->c.i2c_adap, "tuner", "tuner", | ||
3512 | v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); | ||
3513 | |||
3514 | tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; | ||
3515 | tun_setup.type = btv->tuner_type; | ||
3516 | tun_setup.addr = addr; | ||
3517 | |||
3518 | if (bttv_tvcards[btv->c.type].has_radio) | ||
3519 | tun_setup.mode_mask |= T_RADIO; | ||
3520 | |||
3521 | bttv_call_all(btv, tuner, s_type_addr, &tun_setup); | ||
3522 | } | ||
3523 | |||
3524 | if (btv->tda9887_conf) { | ||
3525 | struct v4l2_priv_tun_config tda9887_cfg; | ||
3526 | |||
3527 | tda9887_cfg.tuner = TUNER_TDA9887; | ||
3528 | tda9887_cfg.priv = &btv->tda9887_conf; | ||
3529 | |||
3530 | bttv_call_all(btv, tuner, s_config, &tda9887_cfg); | ||
3531 | } | ||
3532 | |||
3533 | btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? | 3494 | btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? |
3534 | bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; | 3495 | bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; |
3535 | btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? | 3496 | btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? |
@@ -3540,15 +3501,15 @@ void __devinit bttv_init_card2(struct bttv *btv) | |||
3540 | btv->has_remote = remote[btv->c.nr]; | 3501 | btv->has_remote = remote[btv->c.nr]; |
3541 | 3502 | ||
3542 | if (bttv_tvcards[btv->c.type].has_radio) | 3503 | if (bttv_tvcards[btv->c.type].has_radio) |
3543 | btv->has_radio=1; | 3504 | btv->has_radio = 1; |
3544 | if (bttv_tvcards[btv->c.type].has_remote) | 3505 | if (bttv_tvcards[btv->c.type].has_remote) |
3545 | btv->has_remote=1; | 3506 | btv->has_remote = 1; |
3546 | if (!bttv_tvcards[btv->c.type].no_gpioirq) | 3507 | if (!bttv_tvcards[btv->c.type].no_gpioirq) |
3547 | btv->gpioirq=1; | 3508 | btv->gpioirq = 1; |
3548 | if (bttv_tvcards[btv->c.type].volume_gpio) | 3509 | if (bttv_tvcards[btv->c.type].volume_gpio) |
3549 | btv->volume_gpio=bttv_tvcards[btv->c.type].volume_gpio; | 3510 | btv->volume_gpio = bttv_tvcards[btv->c.type].volume_gpio; |
3550 | if (bttv_tvcards[btv->c.type].audio_mode_gpio) | 3511 | if (bttv_tvcards[btv->c.type].audio_mode_gpio) |
3551 | btv->audio_mode_gpio=bttv_tvcards[btv->c.type].audio_mode_gpio; | 3512 | btv->audio_mode_gpio = bttv_tvcards[btv->c.type].audio_mode_gpio; |
3552 | 3513 | ||
3553 | if (btv->tuner_type == TUNER_ABSENT) | 3514 | if (btv->tuner_type == TUNER_ABSENT) |
3554 | return; /* no tuner or related drivers to load */ | 3515 | return; /* no tuner or related drivers to load */ |
@@ -3666,6 +3627,49 @@ no_audio: | |||
3666 | } | 3627 | } |
3667 | 3628 | ||
3668 | 3629 | ||
3630 | /* initialize the tuner */ | ||
3631 | void __devinit bttv_init_tuner(struct bttv *btv) | ||
3632 | { | ||
3633 | int addr = ADDR_UNSET; | ||
3634 | |||
3635 | if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) | ||
3636 | addr = bttv_tvcards[btv->c.type].tuner_addr; | ||
3637 | |||
3638 | if (btv->tuner_type != TUNER_ABSENT) { | ||
3639 | struct tuner_setup tun_setup; | ||
3640 | |||
3641 | /* Load tuner module before issuing tuner config call! */ | ||
3642 | if (bttv_tvcards[btv->c.type].has_radio) | ||
3643 | v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, | ||
3644 | &btv->c.i2c_adap, "tuner", "tuner", | ||
3645 | v4l2_i2c_tuner_addrs(ADDRS_RADIO)); | ||
3646 | v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, | ||
3647 | &btv->c.i2c_adap, "tuner", "tuner", | ||
3648 | v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); | ||
3649 | v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, | ||
3650 | &btv->c.i2c_adap, "tuner", "tuner", | ||
3651 | v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); | ||
3652 | |||
3653 | tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; | ||
3654 | tun_setup.type = btv->tuner_type; | ||
3655 | tun_setup.addr = addr; | ||
3656 | |||
3657 | if (bttv_tvcards[btv->c.type].has_radio) | ||
3658 | tun_setup.mode_mask |= T_RADIO; | ||
3659 | |||
3660 | bttv_call_all(btv, tuner, s_type_addr, &tun_setup); | ||
3661 | } | ||
3662 | |||
3663 | if (btv->tda9887_conf) { | ||
3664 | struct v4l2_priv_tun_config tda9887_cfg; | ||
3665 | |||
3666 | tda9887_cfg.tuner = TUNER_TDA9887; | ||
3667 | tda9887_cfg.priv = &btv->tda9887_conf; | ||
3668 | |||
3669 | bttv_call_all(btv, tuner, s_config, &tda9887_cfg); | ||
3670 | } | ||
3671 | } | ||
3672 | |||
3669 | /* ----------------------------------------------------------------------- */ | 3673 | /* ----------------------------------------------------------------------- */ |
3670 | 3674 | ||
3671 | static void modtec_eeprom(struct bttv *btv) | 3675 | static void modtec_eeprom(struct bttv *btv) |
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index d147d29bb0d3..8cc6dd28d6a7 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c | |||
@@ -4419,6 +4419,7 @@ static int __devinit bttv_probe(struct pci_dev *dev, | |||
4419 | 4419 | ||
4420 | /* some card-specific stuff (needs working i2c) */ | 4420 | /* some card-specific stuff (needs working i2c) */ |
4421 | bttv_init_card2(btv); | 4421 | bttv_init_card2(btv); |
4422 | bttv_init_tuner(btv); | ||
4422 | init_irqreg(btv); | 4423 | init_irqreg(btv); |
4423 | 4424 | ||
4424 | /* register video4linux + input */ | 4425 | /* register video4linux + input */ |
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h index 3d36daf206f3..3ec2402c6b4a 100644 --- a/drivers/media/video/bt8xx/bttv.h +++ b/drivers/media/video/bt8xx/bttv.h | |||
@@ -283,6 +283,7 @@ extern struct tvcard bttv_tvcards[]; | |||
283 | extern void bttv_idcard(struct bttv *btv); | 283 | extern void bttv_idcard(struct bttv *btv); |
284 | extern void bttv_init_card1(struct bttv *btv); | 284 | extern void bttv_init_card1(struct bttv *btv); |
285 | extern void bttv_init_card2(struct bttv *btv); | 285 | extern void bttv_init_card2(struct bttv *btv); |
286 | extern void bttv_init_tuner(struct bttv *btv); | ||
286 | 287 | ||
287 | /* card-specific funtions */ | 288 | /* card-specific funtions */ |
288 | extern void tea5757_set_freq(struct bttv *btv, unsigned short freq); | 289 | extern void tea5757_set_freq(struct bttv *btv, unsigned short freq); |
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c index 428f0c45e6b7..e0cf21e0b1bf 100644 --- a/drivers/media/video/cx23885/cx23885-417.c +++ b/drivers/media/video/cx23885/cx23885-417.c | |||
@@ -58,7 +58,8 @@ MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages"); | |||
58 | 58 | ||
59 | #define dprintk(level, fmt, arg...)\ | 59 | #define dprintk(level, fmt, arg...)\ |
60 | do { if (v4l_debug >= level) \ | 60 | do { if (v4l_debug >= level) \ |
61 | printk(KERN_DEBUG "%s: " fmt, dev->name , ## arg);\ | 61 | printk(KERN_DEBUG "%s: " fmt, \ |
62 | (dev) ? dev->name : "cx23885[?]", ## arg); \ | ||
62 | } while (0) | 63 | } while (0) |
63 | 64 | ||
64 | static struct cx23885_tvnorm cx23885_tvnorms[] = { | 65 | static struct cx23885_tvnorm cx23885_tvnorms[] = { |
@@ -1677,6 +1678,7 @@ static struct v4l2_file_operations mpeg_fops = { | |||
1677 | .read = mpeg_read, | 1678 | .read = mpeg_read, |
1678 | .poll = mpeg_poll, | 1679 | .poll = mpeg_poll, |
1679 | .mmap = mpeg_mmap, | 1680 | .mmap = mpeg_mmap, |
1681 | .ioctl = video_ioctl2, | ||
1680 | }; | 1682 | }; |
1681 | 1683 | ||
1682 | static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { | 1684 | static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { |
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index ebd24a25fb85..320f1f60276e 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c | |||
@@ -58,8 +58,6 @@ static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; | |||
58 | module_param_array(card, int, NULL, 0444); | 58 | module_param_array(card, int, NULL, 0444); |
59 | MODULE_PARM_DESC(card, "card type"); | 59 | MODULE_PARM_DESC(card, "card type"); |
60 | 60 | ||
61 | #define MT9V011_VERSION 0x8243 | ||
62 | |||
63 | /* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */ | 61 | /* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */ |
64 | static unsigned long em28xx_devused; | 62 | static unsigned long em28xx_devused; |
65 | 63 | ||
@@ -159,6 +157,20 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { | |||
159 | { -1, -1, -1, -1}, | 157 | { -1, -1, -1, -1}, |
160 | }; | 158 | }; |
161 | 159 | ||
160 | /* Pinnacle Hybrid Pro eb1a:2881 */ | ||
161 | static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = { | ||
162 | {EM28XX_R08_GPIO, 0xfd, ~EM_GPIO_4, 10}, | ||
163 | { -1, -1, -1, -1}, | ||
164 | }; | ||
165 | |||
166 | static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = { | ||
167 | {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10}, | ||
168 | {EM2880_R04_GPO, 0x04, 0xff, 100},/* zl10353 reset */ | ||
169 | {EM2880_R04_GPO, 0x0c, 0xff, 1}, | ||
170 | { -1, -1, -1, -1}, | ||
171 | }; | ||
172 | |||
173 | |||
162 | /* Callback for the most boards */ | 174 | /* Callback for the most boards */ |
163 | static struct em28xx_reg_seq default_tuner_gpio[] = { | 175 | static struct em28xx_reg_seq default_tuner_gpio[] = { |
164 | {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, | 176 | {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, |
@@ -205,13 +217,15 @@ static struct em28xx_reg_seq silvercrest_reg_seq[] = { | |||
205 | */ | 217 | */ |
206 | struct em28xx_board em28xx_boards[] = { | 218 | struct em28xx_board em28xx_boards[] = { |
207 | [EM2750_BOARD_UNKNOWN] = { | 219 | [EM2750_BOARD_UNKNOWN] = { |
208 | .name = "Unknown EM2750/EM2751 webcam grabber", | 220 | .name = "EM2710/EM2750/EM2751 webcam grabber", |
209 | .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, | 221 | .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, |
210 | .tuner_type = TUNER_ABSENT, /* This is a webcam */ | 222 | .tuner_type = TUNER_ABSENT, |
223 | .is_webcam = 1, | ||
211 | .input = { { | 224 | .input = { { |
212 | .type = EM28XX_VMUX_COMPOSITE1, | 225 | .type = EM28XX_VMUX_COMPOSITE1, |
213 | .vmux = 0, | 226 | .vmux = 0, |
214 | .amux = EM28XX_AMUX_VIDEO, | 227 | .amux = EM28XX_AMUX_VIDEO, |
228 | .gpio = silvercrest_reg_seq, | ||
215 | } }, | 229 | } }, |
216 | }, | 230 | }, |
217 | [EM2800_BOARD_UNKNOWN] = { | 231 | [EM2800_BOARD_UNKNOWN] = { |
@@ -233,13 +247,15 @@ struct em28xx_board em28xx_boards[] = { | |||
233 | [EM2820_BOARD_UNKNOWN] = { | 247 | [EM2820_BOARD_UNKNOWN] = { |
234 | .name = "Unknown EM2750/28xx video grabber", | 248 | .name = "Unknown EM2750/28xx video grabber", |
235 | .tuner_type = TUNER_ABSENT, | 249 | .tuner_type = TUNER_ABSENT, |
250 | .is_webcam = 1, /* To enable sensor probe */ | ||
236 | }, | 251 | }, |
237 | [EM2750_BOARD_DLCW_130] = { | 252 | [EM2750_BOARD_DLCW_130] = { |
238 | /* Beijing Huaqi Information Digital Technology Co., Ltd */ | 253 | /* Beijing Huaqi Information Digital Technology Co., Ltd */ |
239 | .name = "Huaqi DLCW-130", | 254 | .name = "Huaqi DLCW-130", |
240 | .valid = EM28XX_BOARD_NOT_VALIDATED, | 255 | .valid = EM28XX_BOARD_NOT_VALIDATED, |
241 | .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, | 256 | .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, |
242 | .tuner_type = TUNER_ABSENT, /* This is a webcam */ | 257 | .tuner_type = TUNER_ABSENT, |
258 | .is_webcam = 1, | ||
243 | .input = { { | 259 | .input = { { |
244 | .type = EM28XX_VMUX_COMPOSITE1, | 260 | .type = EM28XX_VMUX_COMPOSITE1, |
245 | .vmux = 0, | 261 | .vmux = 0, |
@@ -440,7 +456,8 @@ struct em28xx_board em28xx_boards[] = { | |||
440 | [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { | 456 | [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { |
441 | .name = "Videology 20K14XUSB USB2.0", | 457 | .name = "Videology 20K14XUSB USB2.0", |
442 | .valid = EM28XX_BOARD_NOT_VALIDATED, | 458 | .valid = EM28XX_BOARD_NOT_VALIDATED, |
443 | .tuner_type = TUNER_ABSENT, /* This is a webcam */ | 459 | .tuner_type = TUNER_ABSENT, |
460 | .is_webcam = 1, | ||
444 | .input = { { | 461 | .input = { { |
445 | .type = EM28XX_VMUX_COMPOSITE1, | 462 | .type = EM28XX_VMUX_COMPOSITE1, |
446 | .vmux = 0, | 463 | .vmux = 0, |
@@ -450,8 +467,7 @@ struct em28xx_board em28xx_boards[] = { | |||
450 | [EM2820_BOARD_SILVERCREST_WEBCAM] = { | 467 | [EM2820_BOARD_SILVERCREST_WEBCAM] = { |
451 | .name = "Silvercrest Webcam 1.3mpix", | 468 | .name = "Silvercrest Webcam 1.3mpix", |
452 | .tuner_type = TUNER_ABSENT, | 469 | .tuner_type = TUNER_ABSENT, |
453 | .is_27xx = 1, | 470 | .is_webcam = 1, |
454 | .decoder = EM28XX_MT9V011, | ||
455 | .input = { { | 471 | .input = { { |
456 | .type = EM28XX_VMUX_COMPOSITE1, | 472 | .type = EM28XX_VMUX_COMPOSITE1, |
457 | .vmux = 0, | 473 | .vmux = 0, |
@@ -500,7 +516,8 @@ struct em28xx_board em28xx_boards[] = { | |||
500 | /* Beijing Huaqi Information Digital Technology Co., Ltd */ | 516 | /* Beijing Huaqi Information Digital Technology Co., Ltd */ |
501 | .name = "NetGMBH Cam", | 517 | .name = "NetGMBH Cam", |
502 | .valid = EM28XX_BOARD_NOT_VALIDATED, | 518 | .valid = EM28XX_BOARD_NOT_VALIDATED, |
503 | .tuner_type = TUNER_ABSENT, /* This is a webcam */ | 519 | .tuner_type = TUNER_ABSENT, |
520 | .is_webcam = 1, | ||
504 | .input = { { | 521 | .input = { { |
505 | .type = EM28XX_VMUX_COMPOSITE1, | 522 | .type = EM28XX_VMUX_COMPOSITE1, |
506 | .vmux = 0, | 523 | .vmux = 0, |
@@ -1250,25 +1267,26 @@ struct em28xx_board em28xx_boards[] = { | |||
1250 | }, | 1267 | }, |
1251 | [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { | 1268 | [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { |
1252 | .name = "Pinnacle Hybrid Pro", | 1269 | .name = "Pinnacle Hybrid Pro", |
1253 | .valid = EM28XX_BOARD_NOT_VALIDATED, | ||
1254 | .tuner_type = TUNER_XC2028, | 1270 | .tuner_type = TUNER_XC2028, |
1255 | .tuner_gpio = default_tuner_gpio, | 1271 | .tuner_gpio = default_tuner_gpio, |
1256 | .decoder = EM28XX_TVP5150, | 1272 | .decoder = EM28XX_TVP5150, |
1273 | .has_dvb = 1, | ||
1274 | .dvb_gpio = pinnacle_hybrid_pro_digital, | ||
1257 | .input = { { | 1275 | .input = { { |
1258 | .type = EM28XX_VMUX_TELEVISION, | 1276 | .type = EM28XX_VMUX_TELEVISION, |
1259 | .vmux = TVP5150_COMPOSITE0, | 1277 | .vmux = TVP5150_COMPOSITE0, |
1260 | .amux = EM28XX_AMUX_VIDEO, | 1278 | .amux = EM28XX_AMUX_VIDEO, |
1261 | .gpio = default_analog, | 1279 | .gpio = pinnacle_hybrid_pro_analog, |
1262 | }, { | 1280 | }, { |
1263 | .type = EM28XX_VMUX_COMPOSITE1, | 1281 | .type = EM28XX_VMUX_COMPOSITE1, |
1264 | .vmux = TVP5150_COMPOSITE1, | 1282 | .vmux = TVP5150_COMPOSITE1, |
1265 | .amux = EM28XX_AMUX_LINE_IN, | 1283 | .amux = EM28XX_AMUX_LINE_IN, |
1266 | .gpio = default_analog, | 1284 | .gpio = pinnacle_hybrid_pro_analog, |
1267 | }, { | 1285 | }, { |
1268 | .type = EM28XX_VMUX_SVIDEO, | 1286 | .type = EM28XX_VMUX_SVIDEO, |
1269 | .vmux = TVP5150_SVIDEO, | 1287 | .vmux = TVP5150_SVIDEO, |
1270 | .amux = EM28XX_AMUX_LINE_IN, | 1288 | .amux = EM28XX_AMUX_LINE_IN, |
1271 | .gpio = default_analog, | 1289 | .gpio = pinnacle_hybrid_pro_analog, |
1272 | } }, | 1290 | } }, |
1273 | }, | 1291 | }, |
1274 | [EM2882_BOARD_PINNACLE_HYBRID_PRO] = { | 1292 | [EM2882_BOARD_PINNACLE_HYBRID_PRO] = { |
@@ -1638,6 +1656,7 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = { | |||
1638 | {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, | 1656 | {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, |
1639 | {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, | 1657 | {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, |
1640 | {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, | 1658 | {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, |
1659 | {0xb8846b20, EM2881_BOARD_PINNACLE_HYBRID_PRO, TUNER_XC2028}, | ||
1641 | }; | 1660 | }; |
1642 | 1661 | ||
1643 | /* I2C devicelist hash table for devices with generic USB IDs */ | 1662 | /* I2C devicelist hash table for devices with generic USB IDs */ |
@@ -1704,6 +1723,32 @@ static inline void em28xx_set_model(struct em28xx *dev) | |||
1704 | EM28XX_I2C_FREQ_100_KHZ; | 1723 | EM28XX_I2C_FREQ_100_KHZ; |
1705 | } | 1724 | } |
1706 | 1725 | ||
1726 | /* FIXME: Should be replaced by a proper mt9m001 driver */ | ||
1727 | static int em28xx_initialize_mt9m001(struct em28xx *dev) | ||
1728 | { | ||
1729 | int i; | ||
1730 | unsigned char regs[][3] = { | ||
1731 | { 0x0d, 0x00, 0x01, }, | ||
1732 | { 0x0d, 0x00, 0x00, }, | ||
1733 | { 0x04, 0x05, 0x00, }, /* hres = 1280 */ | ||
1734 | { 0x03, 0x04, 0x00, }, /* vres = 1024 */ | ||
1735 | { 0x20, 0x11, 0x00, }, | ||
1736 | { 0x06, 0x00, 0x10, }, | ||
1737 | { 0x2b, 0x00, 0x24, }, | ||
1738 | { 0x2e, 0x00, 0x24, }, | ||
1739 | { 0x35, 0x00, 0x24, }, | ||
1740 | { 0x2d, 0x00, 0x20, }, | ||
1741 | { 0x2c, 0x00, 0x20, }, | ||
1742 | { 0x09, 0x0a, 0xd4, }, | ||
1743 | { 0x35, 0x00, 0x57, }, | ||
1744 | }; | ||
1745 | |||
1746 | for (i = 0; i < ARRAY_SIZE(regs); i++) | ||
1747 | i2c_master_send(&dev->i2c_client, ®s[i][0], 3); | ||
1748 | |||
1749 | return 0; | ||
1750 | } | ||
1751 | |||
1707 | /* HINT method: webcam I2C chips | 1752 | /* HINT method: webcam I2C chips |
1708 | * | 1753 | * |
1709 | * This method work for webcams with Micron sensors | 1754 | * This method work for webcams with Micron sensors |
@@ -1716,9 +1761,6 @@ static int em28xx_hint_sensor(struct em28xx *dev) | |||
1716 | __be16 version_be; | 1761 | __be16 version_be; |
1717 | u16 version; | 1762 | u16 version; |
1718 | 1763 | ||
1719 | if (dev->model != EM2820_BOARD_UNKNOWN) | ||
1720 | return 0; | ||
1721 | |||
1722 | dev->i2c_client.addr = 0xba >> 1; | 1764 | dev->i2c_client.addr = 0xba >> 1; |
1723 | cmd = 0; | 1765 | cmd = 0; |
1724 | i2c_master_send(&dev->i2c_client, &cmd, 1); | 1766 | i2c_master_send(&dev->i2c_client, &cmd, 1); |
@@ -1729,16 +1771,38 @@ static int em28xx_hint_sensor(struct em28xx *dev) | |||
1729 | version = be16_to_cpu(version_be); | 1771 | version = be16_to_cpu(version_be); |
1730 | 1772 | ||
1731 | switch (version) { | 1773 | switch (version) { |
1732 | case MT9V011_VERSION: | 1774 | case 0x8243: /* mt9v011 640x480 1.3 Mpix sensor */ |
1733 | dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; | 1775 | dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; |
1734 | sensor_name = "mt9v011"; | 1776 | sensor_name = "mt9v011"; |
1777 | dev->em28xx_sensor = EM28XX_MT9V011; | ||
1778 | dev->sensor_xres = 640; | ||
1779 | dev->sensor_yres = 480; | ||
1780 | dev->sensor_xtal = 6300000; | ||
1781 | |||
1782 | /* probably means GRGB 16 bit bayer */ | ||
1783 | dev->vinmode = 0x0d; | ||
1784 | dev->vinctl = 0x00; | ||
1785 | |||
1786 | break; | ||
1787 | case 0x8431: | ||
1788 | dev->model = EM2750_BOARD_UNKNOWN; | ||
1789 | sensor_name = "mt9m001"; | ||
1790 | dev->em28xx_sensor = EM28XX_MT9M001; | ||
1791 | em28xx_initialize_mt9m001(dev); | ||
1792 | dev->sensor_xres = 1280; | ||
1793 | dev->sensor_yres = 1024; | ||
1794 | |||
1795 | /* probably means BGGR 16 bit bayer */ | ||
1796 | dev->vinmode = 0x0c; | ||
1797 | dev->vinctl = 0x00; | ||
1798 | |||
1735 | break; | 1799 | break; |
1736 | default: | 1800 | default: |
1737 | printk("Unknown Sensor 0x%04x\n", be16_to_cpu(version)); | 1801 | printk("Unknown Micron Sensor 0x%04x\n", be16_to_cpu(version)); |
1738 | return -EINVAL; | 1802 | return -EINVAL; |
1739 | } | 1803 | } |
1740 | 1804 | ||
1741 | em28xx_errdev("Sensor is %s, assuming that webcam is %s\n", | 1805 | em28xx_errdev("Sensor is %s, using model %s entry.\n", |
1742 | sensor_name, em28xx_boards[dev->model].name); | 1806 | sensor_name, em28xx_boards[dev->model].name); |
1743 | 1807 | ||
1744 | return 0; | 1808 | return 0; |
@@ -1772,10 +1836,7 @@ void em28xx_pre_card_setup(struct em28xx *dev) | |||
1772 | em28xx_info("chip ID is em2750\n"); | 1836 | em28xx_info("chip ID is em2750\n"); |
1773 | break; | 1837 | break; |
1774 | case CHIP_ID_EM2820: | 1838 | case CHIP_ID_EM2820: |
1775 | if (dev->board.is_27xx) | 1839 | em28xx_info("chip ID is em2710 or em2820\n"); |
1776 | em28xx_info("chip is em2710\n"); | ||
1777 | else | ||
1778 | em28xx_info("chip ID is em2820\n"); | ||
1779 | break; | 1840 | break; |
1780 | case CHIP_ID_EM2840: | 1841 | case CHIP_ID_EM2840: |
1781 | em28xx_info("chip ID is em2840\n"); | 1842 | em28xx_info("chip ID is em2840\n"); |
@@ -1929,6 +1990,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) | |||
1929 | ctl->demod = XC3028_FE_ZARLINK456; | 1990 | ctl->demod = XC3028_FE_ZARLINK456; |
1930 | break; | 1991 | break; |
1931 | case EM2880_BOARD_TERRATEC_HYBRID_XS: | 1992 | case EM2880_BOARD_TERRATEC_HYBRID_XS: |
1993 | case EM2881_BOARD_PINNACLE_HYBRID_PRO: | ||
1932 | ctl->demod = XC3028_FE_ZARLINK456; | 1994 | ctl->demod = XC3028_FE_ZARLINK456; |
1933 | break; | 1995 | break; |
1934 | case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: | 1996 | case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: |
@@ -2225,6 +2287,7 @@ void em28xx_card_setup(struct em28xx *dev) | |||
2225 | em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, | 2287 | em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, |
2226 | so make the call now so the analog GPIOs are set properly | 2288 | so make the call now so the analog GPIOs are set properly |
2227 | before probing the i2c bus. */ | 2289 | before probing the i2c bus. */ |
2290 | em28xx_gpio_set(dev, dev->board.tuner_gpio); | ||
2228 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); | 2291 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); |
2229 | break; | 2292 | break; |
2230 | case EM2820_BOARD_SILVERCREST_WEBCAM: | 2293 | case EM2820_BOARD_SILVERCREST_WEBCAM: |
@@ -2262,9 +2325,14 @@ void em28xx_card_setup(struct em28xx *dev) | |||
2262 | v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, | 2325 | v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, |
2263 | "tvp5150", "tvp5150", tvp5150_addrs); | 2326 | "tvp5150", "tvp5150", tvp5150_addrs); |
2264 | 2327 | ||
2265 | if (dev->board.decoder == EM28XX_MT9V011) | 2328 | if (dev->em28xx_sensor == EM28XX_MT9V011) { |
2266 | v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, | 2329 | struct v4l2_subdev *sd; |
2267 | "mt9v011", "mt9v011", mt9v011_addrs); | 2330 | |
2331 | sd = v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, | ||
2332 | &dev->i2c_adap, "mt9v011", "mt9v011", mt9v011_addrs); | ||
2333 | v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); | ||
2334 | } | ||
2335 | |||
2268 | 2336 | ||
2269 | if (dev->board.adecoder == EM28XX_TVAUDIO) | 2337 | if (dev->board.adecoder == EM28XX_TVAUDIO) |
2270 | v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, | 2338 | v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, |
@@ -2410,7 +2478,19 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, | |||
2410 | return errCode; | 2478 | return errCode; |
2411 | } | 2479 | } |
2412 | 2480 | ||
2413 | em28xx_hint_sensor(dev); | 2481 | /* |
2482 | * Default format, used for tvp5150 or saa711x output formats | ||
2483 | */ | ||
2484 | dev->vinmode = 0x10; | ||
2485 | dev->vinctl = 0x11; | ||
2486 | |||
2487 | /* | ||
2488 | * If the device can be a webcam, seek for a sensor. | ||
2489 | * If sensor is not found, then it isn't a webcam. | ||
2490 | */ | ||
2491 | if (dev->board.is_webcam) | ||
2492 | if (em28xx_hint_sensor(dev) < 0) | ||
2493 | dev->board.is_webcam = 0; | ||
2414 | 2494 | ||
2415 | /* Do board specific init and eeprom reading */ | 2495 | /* Do board specific init and eeprom reading */ |
2416 | em28xx_card_setup(dev); | 2496 | em28xx_card_setup(dev); |
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c index 079ab4d563a6..5b78e199abd1 100644 --- a/drivers/media/video/em28xx/em28xx-core.c +++ b/drivers/media/video/em28xx/em28xx-core.c | |||
@@ -648,28 +648,17 @@ int em28xx_capture_start(struct em28xx *dev, int start) | |||
648 | int em28xx_set_outfmt(struct em28xx *dev) | 648 | int em28xx_set_outfmt(struct em28xx *dev) |
649 | { | 649 | { |
650 | int ret; | 650 | int ret; |
651 | int vinmode, vinctl, outfmt; | ||
652 | |||
653 | outfmt = dev->format->reg; | ||
654 | |||
655 | if (dev->board.is_27xx) { | ||
656 | vinmode = 0x0d; | ||
657 | vinctl = 0x00; | ||
658 | } else { | ||
659 | vinmode = 0x10; | ||
660 | vinctl = 0x11; | ||
661 | } | ||
662 | 651 | ||
663 | ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT, | 652 | ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT, |
664 | outfmt | 0x20, 0xff); | 653 | dev->format->reg | 0x20, 0xff); |
665 | if (ret < 0) | 654 | if (ret < 0) |
666 | return ret; | 655 | return ret; |
667 | 656 | ||
668 | ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, vinmode); | 657 | ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode); |
669 | if (ret < 0) | 658 | if (ret < 0) |
670 | return ret; | 659 | return ret; |
671 | 660 | ||
672 | return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctl); | 661 | return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, dev->vinctl); |
673 | } | 662 | } |
674 | 663 | ||
675 | static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax, | 664 | static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax, |
@@ -707,10 +696,7 @@ static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v) | |||
707 | u8 mode; | 696 | u8 mode; |
708 | /* the em2800 scaler only supports scaling down to 50% */ | 697 | /* the em2800 scaler only supports scaling down to 50% */ |
709 | 698 | ||
710 | if (dev->board.is_27xx) { | 699 | if (dev->board.is_em2800) { |
711 | /* FIXME: Don't use the scaler yet */ | ||
712 | mode = 0; | ||
713 | } else if (dev->board.is_em2800) { | ||
714 | mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); | 700 | mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); |
715 | } else { | 701 | } else { |
716 | u8 buf[2]; | 702 | u8 buf[2]; |
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c index 3da97c32b8fa..cf0ac7f2a30d 100644 --- a/drivers/media/video/em28xx/em28xx-dvb.c +++ b/drivers/media/video/em28xx/em28xx-dvb.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include "lgdt330x.h" | 31 | #include "lgdt330x.h" |
32 | #include "zl10353.h" | 32 | #include "zl10353.h" |
33 | #include "s5h1409.h" | 33 | #include "s5h1409.h" |
34 | #include "mt352.h" | ||
35 | #include "mt352_priv.h" /* FIXME */ | ||
34 | 36 | ||
35 | MODULE_DESCRIPTION("driver for em28xx based DVB cards"); | 37 | MODULE_DESCRIPTION("driver for em28xx based DVB cards"); |
36 | MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); | 38 | MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); |
@@ -243,7 +245,7 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = { | |||
243 | .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK | 245 | .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK |
244 | }; | 246 | }; |
245 | 247 | ||
246 | static struct zl10353_config em28xx_terratec_xs_zl10353_xc3028 = { | 248 | static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = { |
247 | .demod_address = (0x1e >> 1), | 249 | .demod_address = (0x1e >> 1), |
248 | .no_tuner = 1, | 250 | .no_tuner = 1, |
249 | .disable_i2c_gate_ctrl = 1, | 251 | .disable_i2c_gate_ctrl = 1, |
@@ -258,6 +260,41 @@ static struct drx397xD_config em28xx_drx397xD_with_xc3028 = { | |||
258 | }; | 260 | }; |
259 | #endif | 261 | #endif |
260 | 262 | ||
263 | static int mt352_terratec_xs_init(struct dvb_frontend *fe) | ||
264 | { | ||
265 | /* Values extracted from a USB trace of the Terratec Windows driver */ | ||
266 | static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x2c }; | ||
267 | static u8 reset[] = { RESET, 0x80 }; | ||
268 | static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; | ||
269 | static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0xa0 }; | ||
270 | static u8 input_freq_cfg[] = { INPUT_FREQ_1, 0x31, 0xb8 }; | ||
271 | static u8 rs_err_cfg[] = { RS_ERR_PER_1, 0x00, 0x4d }; | ||
272 | static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; | ||
273 | static u8 trl_nom_cfg[] = { TRL_NOMINAL_RATE_1, 0x64, 0x00 }; | ||
274 | static u8 tps_given_cfg[] = { TPS_GIVEN_1, 0x40, 0x80, 0x50 }; | ||
275 | static u8 tuner_go[] = { TUNER_GO, 0x01}; | ||
276 | |||
277 | mt352_write(fe, clock_config, sizeof(clock_config)); | ||
278 | udelay(200); | ||
279 | mt352_write(fe, reset, sizeof(reset)); | ||
280 | mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); | ||
281 | mt352_write(fe, agc_cfg, sizeof(agc_cfg)); | ||
282 | mt352_write(fe, input_freq_cfg, sizeof(input_freq_cfg)); | ||
283 | mt352_write(fe, rs_err_cfg, sizeof(rs_err_cfg)); | ||
284 | mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); | ||
285 | mt352_write(fe, trl_nom_cfg, sizeof(trl_nom_cfg)); | ||
286 | mt352_write(fe, tps_given_cfg, sizeof(tps_given_cfg)); | ||
287 | mt352_write(fe, tuner_go, sizeof(tuner_go)); | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static struct mt352_config terratec_xs_mt352_cfg = { | ||
292 | .demod_address = (0x1e >> 1), | ||
293 | .no_tuner = 1, | ||
294 | .if2 = 45600, | ||
295 | .demod_init = mt352_terratec_xs_init, | ||
296 | }; | ||
297 | |||
261 | /* ------------------------------------------------------------------ */ | 298 | /* ------------------------------------------------------------------ */ |
262 | 299 | ||
263 | static int attach_xc3028(u8 addr, struct em28xx *dev) | 300 | static int attach_xc3028(u8 addr, struct em28xx *dev) |
@@ -440,7 +477,6 @@ static int dvb_init(struct em28xx *dev) | |||
440 | goto out_free; | 477 | goto out_free; |
441 | } | 478 | } |
442 | break; | 479 | break; |
443 | case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: | ||
444 | case EM2880_BOARD_KWORLD_DVB_310U: | 480 | case EM2880_BOARD_KWORLD_DVB_310U: |
445 | case EM2880_BOARD_EMPIRE_DUAL_TV: | 481 | case EM2880_BOARD_EMPIRE_DUAL_TV: |
446 | dvb->frontend = dvb_attach(zl10353_attach, | 482 | dvb->frontend = dvb_attach(zl10353_attach, |
@@ -451,20 +487,28 @@ static int dvb_init(struct em28xx *dev) | |||
451 | goto out_free; | 487 | goto out_free; |
452 | } | 488 | } |
453 | break; | 489 | break; |
490 | case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: | ||
491 | dvb->frontend = dvb_attach(zl10353_attach, | ||
492 | &em28xx_zl10353_xc3028_no_i2c_gate, | ||
493 | &dev->i2c_adap); | ||
494 | if (attach_xc3028(0x61, dev) < 0) { | ||
495 | result = -EINVAL; | ||
496 | goto out_free; | ||
497 | } | ||
498 | break; | ||
454 | case EM2880_BOARD_TERRATEC_HYBRID_XS: | 499 | case EM2880_BOARD_TERRATEC_HYBRID_XS: |
500 | case EM2881_BOARD_PINNACLE_HYBRID_PRO: | ||
455 | dvb->frontend = dvb_attach(zl10353_attach, | 501 | dvb->frontend = dvb_attach(zl10353_attach, |
456 | &em28xx_terratec_xs_zl10353_xc3028, | 502 | &em28xx_zl10353_xc3028_no_i2c_gate, |
457 | &dev->i2c_adap); | 503 | &dev->i2c_adap); |
458 | if (dvb->frontend == NULL) { | 504 | if (dvb->frontend == NULL) { |
459 | /* This board could have either a zl10353 or a mt352. | 505 | /* This board could have either a zl10353 or a mt352. |
460 | If the chip id isn't for zl10353, try mt352 */ | 506 | If the chip id isn't for zl10353, try mt352 */ |
461 | 507 | dvb->frontend = dvb_attach(mt352_attach, | |
462 | /* FIXME: make support for mt352 work */ | 508 | &terratec_xs_mt352_cfg, |
463 | printk(KERN_ERR "version of this board with mt352 not " | 509 | &dev->i2c_adap); |
464 | "currently supported\n"); | ||
465 | result = -EINVAL; | ||
466 | goto out_free; | ||
467 | } | 510 | } |
511 | |||
468 | if (attach_xc3028(0x61, dev) < 0) { | 512 | if (attach_xc3028(0x61, dev) < 0) { |
469 | result = -EINVAL; | 513 | result = -EINVAL; |
470 | goto out_free; | 514 | goto out_free; |
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index 14316c912179..ff37b4c15f44 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c | |||
@@ -657,8 +657,8 @@ static void get_scale(struct em28xx *dev, | |||
657 | unsigned int width, unsigned int height, | 657 | unsigned int width, unsigned int height, |
658 | unsigned int *hscale, unsigned int *vscale) | 658 | unsigned int *hscale, unsigned int *vscale) |
659 | { | 659 | { |
660 | unsigned int maxw = norm_maxw(dev); | 660 | unsigned int maxw = norm_maxw(dev); |
661 | unsigned int maxh = norm_maxh(dev); | 661 | unsigned int maxh = norm_maxh(dev); |
662 | 662 | ||
663 | *hscale = (((unsigned long)maxw) << 12) / width - 4096L; | 663 | *hscale = (((unsigned long)maxw) << 12) / width - 4096L; |
664 | if (*hscale >= 0x4000) | 664 | if (*hscale >= 0x4000) |
@@ -726,11 +726,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
726 | return -EINVAL; | 726 | return -EINVAL; |
727 | } | 727 | } |
728 | 728 | ||
729 | if (dev->board.is_27xx) { | 729 | if (dev->board.is_em2800) { |
730 | /* FIXME: This is the only supported fmt */ | ||
731 | width = 640; | ||
732 | height = 480; | ||
733 | } else if (dev->board.is_em2800) { | ||
734 | /* the em2800 can only scale down to 50% */ | 730 | /* the em2800 can only scale down to 50% */ |
735 | height = height > (3 * maxh / 4) ? maxh : maxh / 2; | 731 | height = height > (3 * maxh / 4) ? maxh : maxh / 2; |
736 | width = width > (3 * maxw / 4) ? maxw : maxw / 2; | 732 | width = width > (3 * maxw / 4) ? maxw : maxw / 2; |
@@ -767,12 +763,6 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc, | |||
767 | { | 763 | { |
768 | struct em28xx_fmt *fmt; | 764 | struct em28xx_fmt *fmt; |
769 | 765 | ||
770 | /* FIXME: This is the only supported fmt */ | ||
771 | if (dev->board.is_27xx) { | ||
772 | width = 640; | ||
773 | height = 480; | ||
774 | } | ||
775 | |||
776 | fmt = format_by_fourcc(fourcc); | 766 | fmt = format_by_fourcc(fourcc); |
777 | if (!fmt) | 767 | if (!fmt) |
778 | return -EINVAL; | 768 | return -EINVAL; |
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h index d90fef463764..45bd513f62dc 100644 --- a/drivers/media/video/em28xx/em28xx.h +++ b/drivers/media/video/em28xx/em28xx.h | |||
@@ -358,10 +358,15 @@ struct em28xx_input { | |||
358 | #define INPUT(nr) (&em28xx_boards[dev->model].input[nr]) | 358 | #define INPUT(nr) (&em28xx_boards[dev->model].input[nr]) |
359 | 359 | ||
360 | enum em28xx_decoder { | 360 | enum em28xx_decoder { |
361 | EM28XX_NODECODER, | 361 | EM28XX_NODECODER = 0, |
362 | EM28XX_TVP5150, | 362 | EM28XX_TVP5150, |
363 | EM28XX_SAA711X, | 363 | EM28XX_SAA711X, |
364 | }; | ||
365 | |||
366 | enum em28xx_sensor { | ||
367 | EM28XX_NOSENSOR = 0, | ||
364 | EM28XX_MT9V011, | 368 | EM28XX_MT9V011, |
369 | EM28XX_MT9M001, | ||
365 | }; | 370 | }; |
366 | 371 | ||
367 | enum em28xx_adecoder { | 372 | enum em28xx_adecoder { |
@@ -390,7 +395,7 @@ struct em28xx_board { | |||
390 | unsigned int max_range_640_480:1; | 395 | unsigned int max_range_640_480:1; |
391 | unsigned int has_dvb:1; | 396 | unsigned int has_dvb:1; |
392 | unsigned int has_snapshot_button:1; | 397 | unsigned int has_snapshot_button:1; |
393 | unsigned int is_27xx:1; | 398 | unsigned int is_webcam:1; |
394 | unsigned int valid:1; | 399 | unsigned int valid:1; |
395 | 400 | ||
396 | unsigned char xclk, i2c_speed; | 401 | unsigned char xclk, i2c_speed; |
@@ -474,6 +479,14 @@ struct em28xx { | |||
474 | struct v4l2_device v4l2_dev; | 479 | struct v4l2_device v4l2_dev; |
475 | struct em28xx_board board; | 480 | struct em28xx_board board; |
476 | 481 | ||
482 | /* Webcam specific fields */ | ||
483 | enum em28xx_sensor em28xx_sensor; | ||
484 | int sensor_xres, sensor_yres; | ||
485 | int sensor_xtal; | ||
486 | |||
487 | /* Vinmode/Vinctl used at the driver */ | ||
488 | int vinmode, vinctl; | ||
489 | |||
477 | unsigned int stream_on:1; /* Locks streams */ | 490 | unsigned int stream_on:1; /* Locks streams */ |
478 | unsigned int has_audio_class:1; | 491 | unsigned int has_audio_class:1; |
479 | unsigned int has_alsa_audio:1; | 492 | unsigned int has_alsa_audio:1; |
@@ -754,17 +767,23 @@ static inline int em28xx_gamma_set(struct em28xx *dev, s32 val) | |||
754 | /*FIXME: maxw should be dependent of alt mode */ | 767 | /*FIXME: maxw should be dependent of alt mode */ |
755 | static inline unsigned int norm_maxw(struct em28xx *dev) | 768 | static inline unsigned int norm_maxw(struct em28xx *dev) |
756 | { | 769 | { |
770 | if (dev->board.is_webcam) | ||
771 | return dev->sensor_xres; | ||
772 | |||
757 | if (dev->board.max_range_640_480) | 773 | if (dev->board.max_range_640_480) |
758 | return 640; | 774 | return 640; |
759 | else | 775 | |
760 | return 720; | 776 | return 720; |
761 | } | 777 | } |
762 | 778 | ||
763 | static inline unsigned int norm_maxh(struct em28xx *dev) | 779 | static inline unsigned int norm_maxh(struct em28xx *dev) |
764 | { | 780 | { |
781 | if (dev->board.is_webcam) | ||
782 | return dev->sensor_yres; | ||
783 | |||
765 | if (dev->board.max_range_640_480) | 784 | if (dev->board.max_range_640_480) |
766 | return 480; | 785 | return 480; |
767 | else | 786 | |
768 | return (dev->norm & V4L2_STD_625_50) ? 576 : 480; | 787 | return (dev->norm & V4L2_STD_625_50) ? 576 : 480; |
769 | } | 788 | } |
770 | #endif | 789 | #endif |
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig index 578dc4ffc965..34f46f2bc040 100644 --- a/drivers/media/video/gspca/Kconfig +++ b/drivers/media/video/gspca/Kconfig | |||
@@ -102,6 +102,22 @@ config USB_GSPCA_PAC7311 | |||
102 | To compile this driver as a module, choose M here: the | 102 | To compile this driver as a module, choose M here: the |
103 | module will be called gspca_pac7311. | 103 | module will be called gspca_pac7311. |
104 | 104 | ||
105 | config USB_GSPCA_SN9C20X | ||
106 | tristate "SN9C20X USB Camera Driver" | ||
107 | depends on VIDEO_V4L2 && USB_GSPCA | ||
108 | help | ||
109 | Say Y here if you want support for cameras based on the | ||
110 | sn9c20x chips (SN9C201 and SN9C202). | ||
111 | |||
112 | To compile this driver as a module, choose M here: the | ||
113 | module will be called gspca_sn9c20x. | ||
114 | |||
115 | config USB_GSPCA_SN9C20X_EVDEV | ||
116 | bool "Enable evdev support" | ||
117 | depends on USB_GSPCA_SN9C20X | ||
118 | ---help--- | ||
119 | Say Y here in order to enable evdev support for sn9c20x webcam button. | ||
120 | |||
105 | config USB_GSPCA_SONIXB | 121 | config USB_GSPCA_SONIXB |
106 | tristate "SONIX Bayer USB Camera Driver" | 122 | tristate "SONIX Bayer USB Camera Driver" |
107 | depends on VIDEO_V4L2 && USB_GSPCA | 123 | depends on VIDEO_V4L2 && USB_GSPCA |
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile index 8a6643e8eb96..f6d3b86e9ad5 100644 --- a/drivers/media/video/gspca/Makefile +++ b/drivers/media/video/gspca/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o | |||
8 | obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o | 8 | obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o |
9 | obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o | 9 | obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o |
10 | obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o | 10 | obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o |
11 | obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o | ||
11 | obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o | 12 | obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o |
12 | obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o | 13 | obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o |
13 | obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o | 14 | obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o |
@@ -35,6 +36,7 @@ gspca_ov519-objs := ov519.o | |||
35 | gspca_ov534-objs := ov534.o | 36 | gspca_ov534-objs := ov534.o |
36 | gspca_pac207-objs := pac207.o | 37 | gspca_pac207-objs := pac207.o |
37 | gspca_pac7311-objs := pac7311.o | 38 | gspca_pac7311-objs := pac7311.o |
39 | gspca_sn9c20x-objs := sn9c20x.o | ||
38 | gspca_sonixb-objs := sonixb.o | 40 | gspca_sonixb-objs := sonixb.o |
39 | gspca_sonixj-objs := sonixj.o | 41 | gspca_sonixj-objs := sonixj.o |
40 | gspca_spca500-objs := spca500.o | 42 | gspca_spca500-objs := spca500.o |
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c index 219cfa6fb877..8d48ea1742c2 100644 --- a/drivers/media/video/gspca/conex.c +++ b/drivers/media/video/gspca/conex.c | |||
@@ -846,6 +846,8 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
846 | 846 | ||
847 | /* create the JPEG header */ | 847 | /* create the JPEG header */ |
848 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | 848 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); |
849 | if (!sd->jpeg_hdr) | ||
850 | return -ENOMEM; | ||
849 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, | 851 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, |
850 | 0x22); /* JPEG 411 */ | 852 | 0x22); /* JPEG 411 */ |
851 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | 853 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); |
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index 1e89600986c8..b8561dfb6c8c 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c | |||
@@ -727,6 +727,74 @@ static int gspca_get_mode(struct gspca_dev *gspca_dev, | |||
727 | return -EINVAL; | 727 | return -EINVAL; |
728 | } | 728 | } |
729 | 729 | ||
730 | #ifdef CONFIG_VIDEO_ADV_DEBUG | ||
731 | static int vidioc_g_register(struct file *file, void *priv, | ||
732 | struct v4l2_dbg_register *reg) | ||
733 | { | ||
734 | int ret; | ||
735 | struct gspca_dev *gspca_dev = priv; | ||
736 | |||
737 | if (!gspca_dev->sd_desc->get_chip_ident) | ||
738 | return -EINVAL; | ||
739 | |||
740 | if (!gspca_dev->sd_desc->get_register) | ||
741 | return -EINVAL; | ||
742 | |||
743 | if (mutex_lock_interruptible(&gspca_dev->usb_lock)) | ||
744 | return -ERESTARTSYS; | ||
745 | if (gspca_dev->present) | ||
746 | ret = gspca_dev->sd_desc->get_register(gspca_dev, reg); | ||
747 | else | ||
748 | ret = -ENODEV; | ||
749 | mutex_unlock(&gspca_dev->usb_lock); | ||
750 | |||
751 | return ret; | ||
752 | } | ||
753 | |||
754 | static int vidioc_s_register(struct file *file, void *priv, | ||
755 | struct v4l2_dbg_register *reg) | ||
756 | { | ||
757 | int ret; | ||
758 | struct gspca_dev *gspca_dev = priv; | ||
759 | |||
760 | if (!gspca_dev->sd_desc->get_chip_ident) | ||
761 | return -EINVAL; | ||
762 | |||
763 | if (!gspca_dev->sd_desc->set_register) | ||
764 | return -EINVAL; | ||
765 | |||
766 | if (mutex_lock_interruptible(&gspca_dev->usb_lock)) | ||
767 | return -ERESTARTSYS; | ||
768 | if (gspca_dev->present) | ||
769 | ret = gspca_dev->sd_desc->set_register(gspca_dev, reg); | ||
770 | else | ||
771 | ret = -ENODEV; | ||
772 | mutex_unlock(&gspca_dev->usb_lock); | ||
773 | |||
774 | return ret; | ||
775 | } | ||
776 | #endif | ||
777 | |||
778 | static int vidioc_g_chip_ident(struct file *file, void *priv, | ||
779 | struct v4l2_dbg_chip_ident *chip) | ||
780 | { | ||
781 | int ret; | ||
782 | struct gspca_dev *gspca_dev = priv; | ||
783 | |||
784 | if (!gspca_dev->sd_desc->get_chip_ident) | ||
785 | return -EINVAL; | ||
786 | |||
787 | if (mutex_lock_interruptible(&gspca_dev->usb_lock)) | ||
788 | return -ERESTARTSYS; | ||
789 | if (gspca_dev->present) | ||
790 | ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip); | ||
791 | else | ||
792 | ret = -ENODEV; | ||
793 | mutex_unlock(&gspca_dev->usb_lock); | ||
794 | |||
795 | return ret; | ||
796 | } | ||
797 | |||
730 | static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, | 798 | static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, |
731 | struct v4l2_fmtdesc *fmtdesc) | 799 | struct v4l2_fmtdesc *fmtdesc) |
732 | { | 800 | { |
@@ -1883,6 +1951,11 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = { | |||
1883 | .vidioc_s_parm = vidioc_s_parm, | 1951 | .vidioc_s_parm = vidioc_s_parm, |
1884 | .vidioc_s_std = vidioc_s_std, | 1952 | .vidioc_s_std = vidioc_s_std, |
1885 | .vidioc_enum_framesizes = vidioc_enum_framesizes, | 1953 | .vidioc_enum_framesizes = vidioc_enum_framesizes, |
1954 | #ifdef CONFIG_VIDEO_ADV_DEBUG | ||
1955 | .vidioc_g_register = vidioc_g_register, | ||
1956 | .vidioc_s_register = vidioc_s_register, | ||
1957 | #endif | ||
1958 | .vidioc_g_chip_ident = vidioc_g_chip_ident, | ||
1886 | #ifdef CONFIG_VIDEO_V4L1_COMPAT | 1959 | #ifdef CONFIG_VIDEO_V4L1_COMPAT |
1887 | .vidiocgmbuf = vidiocgmbuf, | 1960 | .vidiocgmbuf = vidiocgmbuf, |
1888 | #endif | 1961 | #endif |
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h index bd1faff88644..46c4effdfcd5 100644 --- a/drivers/media/video/gspca/gspca.h +++ b/drivers/media/video/gspca/gspca.h | |||
@@ -69,6 +69,10 @@ typedef void (*cam_v_op) (struct gspca_dev *); | |||
69 | typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *); | 69 | typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *); |
70 | typedef int (*cam_jpg_op) (struct gspca_dev *, | 70 | typedef int (*cam_jpg_op) (struct gspca_dev *, |
71 | struct v4l2_jpegcompression *); | 71 | struct v4l2_jpegcompression *); |
72 | typedef int (*cam_reg_op) (struct gspca_dev *, | ||
73 | struct v4l2_dbg_register *); | ||
74 | typedef int (*cam_ident_op) (struct gspca_dev *, | ||
75 | struct v4l2_dbg_chip_ident *); | ||
72 | typedef int (*cam_streamparm_op) (struct gspca_dev *, | 76 | typedef int (*cam_streamparm_op) (struct gspca_dev *, |
73 | struct v4l2_streamparm *); | 77 | struct v4l2_streamparm *); |
74 | typedef int (*cam_qmnu_op) (struct gspca_dev *, | 78 | typedef int (*cam_qmnu_op) (struct gspca_dev *, |
@@ -105,6 +109,11 @@ struct sd_desc { | |||
105 | cam_qmnu_op querymenu; | 109 | cam_qmnu_op querymenu; |
106 | cam_streamparm_op get_streamparm; | 110 | cam_streamparm_op get_streamparm; |
107 | cam_streamparm_op set_streamparm; | 111 | cam_streamparm_op set_streamparm; |
112 | #ifdef CONFIG_VIDEO_ADV_DEBUG | ||
113 | cam_reg_op set_register; | ||
114 | cam_reg_op get_register; | ||
115 | #endif | ||
116 | cam_ident_op get_chip_ident; | ||
108 | }; | 117 | }; |
109 | 118 | ||
110 | /* packet types when moving from iso buf to frame buf */ | 119 | /* packet types when moving from iso buf to frame buf */ |
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c index 191bcd718979..0163903d1c0f 100644 --- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c +++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c | |||
@@ -476,9 +476,6 @@ static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val) | |||
476 | err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); | 476 | err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); |
477 | if (err < 0) | 477 | if (err < 0) |
478 | return err; | 478 | return err; |
479 | err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); | ||
480 | if (err < 0) | ||
481 | return err; | ||
482 | 479 | ||
483 | err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); | 480 | err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); |
484 | if (err < 0) | 481 | if (err < 0) |
@@ -524,9 +521,6 @@ static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val) | |||
524 | err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); | 521 | err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); |
525 | if (err < 0) | 522 | if (err < 0) |
526 | return err; | 523 | return err; |
527 | err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); | ||
528 | if (err < 0) | ||
529 | return err; | ||
530 | 524 | ||
531 | err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); | 525 | err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); |
532 | if (err < 0) | 526 | if (err < 0) |
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c index 75e8d14e4ac7..de769caf013d 100644 --- a/drivers/media/video/gspca/mars.c +++ b/drivers/media/video/gspca/mars.c | |||
@@ -201,6 +201,8 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
201 | 201 | ||
202 | /* create the JPEG header */ | 202 | /* create the JPEG header */ |
203 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | 203 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); |
204 | if (!sd->jpeg_hdr) | ||
205 | return -ENOMEM; | ||
204 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, | 206 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, |
205 | 0x21); /* JPEG 422 */ | 207 | 0x21); /* JPEG 422 */ |
206 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | 208 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); |
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c new file mode 100644 index 000000000000..fcfbbd329b4c --- /dev/null +++ b/drivers/media/video/gspca/sn9c20x.c | |||
@@ -0,0 +1,2434 @@ | |||
1 | /* | ||
2 | * Sonix sn9c201 sn9c202 library | ||
3 | * Copyright (C) 2008-2009 microdia project <microdia@googlegroups.com> | ||
4 | * Copyright (C) 2009 Brian Johnson <brijohn@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV | ||
22 | #include <linux/kthread.h> | ||
23 | #include <linux/freezer.h> | ||
24 | #include <linux/usb/input.h> | ||
25 | #include <linux/input.h> | ||
26 | #endif | ||
27 | |||
28 | #include "gspca.h" | ||
29 | #include "jpeg.h" | ||
30 | |||
31 | #include <media/v4l2-chip-ident.h> | ||
32 | |||
33 | MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, " | ||
34 | "microdia project <microdia@googlegroups.com>"); | ||
35 | MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver"); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | |||
38 | #define MODULE_NAME "sn9c20x" | ||
39 | |||
40 | #define MODE_RAW 0x10 | ||
41 | #define MODE_JPEG 0x20 | ||
42 | #define MODE_SXGA 0x80 | ||
43 | |||
44 | #define SENSOR_OV9650 0 | ||
45 | #define SENSOR_OV9655 1 | ||
46 | #define SENSOR_SOI968 2 | ||
47 | #define SENSOR_OV7660 3 | ||
48 | #define SENSOR_OV7670 4 | ||
49 | #define SENSOR_MT9V011 5 | ||
50 | #define SENSOR_MT9V111 6 | ||
51 | #define SENSOR_MT9V112 7 | ||
52 | #define SENSOR_MT9M001 8 | ||
53 | #define SENSOR_MT9M111 9 | ||
54 | #define SENSOR_HV7131R 10 | ||
55 | #define SENSOR_MT9VPRB 20 | ||
56 | |||
57 | /* specific webcam descriptor */ | ||
58 | struct sd { | ||
59 | struct gspca_dev gspca_dev; | ||
60 | |||
61 | #define MIN_AVG_LUM 80 | ||
62 | #define MAX_AVG_LUM 130 | ||
63 | atomic_t avg_lum; | ||
64 | u8 old_step; | ||
65 | u8 older_step; | ||
66 | u8 exposure_step; | ||
67 | |||
68 | u8 brightness; | ||
69 | u8 contrast; | ||
70 | u8 saturation; | ||
71 | s16 hue; | ||
72 | u8 gamma; | ||
73 | u8 red; | ||
74 | u8 blue; | ||
75 | |||
76 | u8 hflip; | ||
77 | u8 vflip; | ||
78 | u8 gain; | ||
79 | u16 exposure; | ||
80 | u8 auto_exposure; | ||
81 | |||
82 | u8 i2c_addr; | ||
83 | u8 sensor; | ||
84 | u8 hstart; | ||
85 | u8 vstart; | ||
86 | |||
87 | u8 *jpeg_hdr; | ||
88 | u8 quality; | ||
89 | |||
90 | #ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV | ||
91 | struct input_dev *input_dev; | ||
92 | u8 input_gpio; | ||
93 | struct task_struct *input_task; | ||
94 | #endif | ||
95 | }; | ||
96 | |||
97 | static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val); | ||
98 | static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val); | ||
99 | static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val); | ||
100 | static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val); | ||
101 | static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val); | ||
102 | static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val); | ||
103 | static int sd_sethue(struct gspca_dev *gspca_dev, s32 val); | ||
104 | static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val); | ||
105 | static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val); | ||
106 | static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val); | ||
107 | static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val); | ||
108 | static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val); | ||
109 | static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val); | ||
110 | static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val); | ||
111 | static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val); | ||
112 | static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val); | ||
113 | static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val); | ||
114 | static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val); | ||
115 | static int sd_setgain(struct gspca_dev *gspca_dev, s32 val); | ||
116 | static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val); | ||
117 | static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val); | ||
118 | static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val); | ||
119 | static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val); | ||
120 | static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val); | ||
121 | |||
122 | static struct ctrl sd_ctrls[] = { | ||
123 | { | ||
124 | #define BRIGHTNESS_IDX 0 | ||
125 | { | ||
126 | .id = V4L2_CID_BRIGHTNESS, | ||
127 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
128 | .name = "Brightness", | ||
129 | .minimum = 0, | ||
130 | .maximum = 0xff, | ||
131 | .step = 1, | ||
132 | #define BRIGHTNESS_DEFAULT 0x7f | ||
133 | .default_value = BRIGHTNESS_DEFAULT, | ||
134 | }, | ||
135 | .set = sd_setbrightness, | ||
136 | .get = sd_getbrightness, | ||
137 | }, | ||
138 | { | ||
139 | #define CONTRAST_IDX 1 | ||
140 | { | ||
141 | .id = V4L2_CID_CONTRAST, | ||
142 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
143 | .name = "Contrast", | ||
144 | .minimum = 0, | ||
145 | .maximum = 0xff, | ||
146 | .step = 1, | ||
147 | #define CONTRAST_DEFAULT 0x7f | ||
148 | .default_value = CONTRAST_DEFAULT, | ||
149 | }, | ||
150 | .set = sd_setcontrast, | ||
151 | .get = sd_getcontrast, | ||
152 | }, | ||
153 | { | ||
154 | #define SATURATION_IDX 2 | ||
155 | { | ||
156 | .id = V4L2_CID_SATURATION, | ||
157 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
158 | .name = "Saturation", | ||
159 | .minimum = 0, | ||
160 | .maximum = 0xff, | ||
161 | .step = 1, | ||
162 | #define SATURATION_DEFAULT 0x7f | ||
163 | .default_value = SATURATION_DEFAULT, | ||
164 | }, | ||
165 | .set = sd_setsaturation, | ||
166 | .get = sd_getsaturation, | ||
167 | }, | ||
168 | { | ||
169 | #define HUE_IDX 3 | ||
170 | { | ||
171 | .id = V4L2_CID_HUE, | ||
172 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
173 | .name = "Hue", | ||
174 | .minimum = -180, | ||
175 | .maximum = 180, | ||
176 | .step = 1, | ||
177 | #define HUE_DEFAULT 0 | ||
178 | .default_value = HUE_DEFAULT, | ||
179 | }, | ||
180 | .set = sd_sethue, | ||
181 | .get = sd_gethue, | ||
182 | }, | ||
183 | { | ||
184 | #define GAMMA_IDX 4 | ||
185 | { | ||
186 | .id = V4L2_CID_GAMMA, | ||
187 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
188 | .name = "Gamma", | ||
189 | .minimum = 0, | ||
190 | .maximum = 0xff, | ||
191 | .step = 1, | ||
192 | #define GAMMA_DEFAULT 0x10 | ||
193 | .default_value = GAMMA_DEFAULT, | ||
194 | }, | ||
195 | .set = sd_setgamma, | ||
196 | .get = sd_getgamma, | ||
197 | }, | ||
198 | { | ||
199 | #define BLUE_IDX 5 | ||
200 | { | ||
201 | .id = V4L2_CID_BLUE_BALANCE, | ||
202 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
203 | .name = "Blue Balance", | ||
204 | .minimum = 0, | ||
205 | .maximum = 0x7f, | ||
206 | .step = 1, | ||
207 | #define BLUE_DEFAULT 0x28 | ||
208 | .default_value = BLUE_DEFAULT, | ||
209 | }, | ||
210 | .set = sd_setbluebalance, | ||
211 | .get = sd_getbluebalance, | ||
212 | }, | ||
213 | { | ||
214 | #define RED_IDX 6 | ||
215 | { | ||
216 | .id = V4L2_CID_RED_BALANCE, | ||
217 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
218 | .name = "Red Balance", | ||
219 | .minimum = 0, | ||
220 | .maximum = 0x7f, | ||
221 | .step = 1, | ||
222 | #define RED_DEFAULT 0x28 | ||
223 | .default_value = RED_DEFAULT, | ||
224 | }, | ||
225 | .set = sd_setredbalance, | ||
226 | .get = sd_getredbalance, | ||
227 | }, | ||
228 | { | ||
229 | #define HFLIP_IDX 7 | ||
230 | { | ||
231 | .id = V4L2_CID_HFLIP, | ||
232 | .type = V4L2_CTRL_TYPE_BOOLEAN, | ||
233 | .name = "Horizontal Flip", | ||
234 | .minimum = 0, | ||
235 | .maximum = 1, | ||
236 | .step = 1, | ||
237 | #define HFLIP_DEFAULT 0 | ||
238 | .default_value = HFLIP_DEFAULT, | ||
239 | }, | ||
240 | .set = sd_sethflip, | ||
241 | .get = sd_gethflip, | ||
242 | }, | ||
243 | { | ||
244 | #define VFLIP_IDX 8 | ||
245 | { | ||
246 | .id = V4L2_CID_VFLIP, | ||
247 | .type = V4L2_CTRL_TYPE_BOOLEAN, | ||
248 | .name = "Vertical Flip", | ||
249 | .minimum = 0, | ||
250 | .maximum = 1, | ||
251 | .step = 1, | ||
252 | #define VFLIP_DEFAULT 0 | ||
253 | .default_value = VFLIP_DEFAULT, | ||
254 | }, | ||
255 | .set = sd_setvflip, | ||
256 | .get = sd_getvflip, | ||
257 | }, | ||
258 | { | ||
259 | #define EXPOSURE_IDX 9 | ||
260 | { | ||
261 | .id = V4L2_CID_EXPOSURE, | ||
262 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
263 | .name = "Exposure", | ||
264 | .minimum = 0, | ||
265 | .maximum = 0x1780, | ||
266 | .step = 1, | ||
267 | #define EXPOSURE_DEFAULT 0x33 | ||
268 | .default_value = EXPOSURE_DEFAULT, | ||
269 | }, | ||
270 | .set = sd_setexposure, | ||
271 | .get = sd_getexposure, | ||
272 | }, | ||
273 | { | ||
274 | #define GAIN_IDX 10 | ||
275 | { | ||
276 | .id = V4L2_CID_GAIN, | ||
277 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
278 | .name = "Gain", | ||
279 | .minimum = 0, | ||
280 | .maximum = 28, | ||
281 | .step = 1, | ||
282 | #define GAIN_DEFAULT 0x00 | ||
283 | .default_value = GAIN_DEFAULT, | ||
284 | }, | ||
285 | .set = sd_setgain, | ||
286 | .get = sd_getgain, | ||
287 | }, | ||
288 | { | ||
289 | #define AUTOGAIN_IDX 11 | ||
290 | { | ||
291 | .id = V4L2_CID_AUTOGAIN, | ||
292 | .type = V4L2_CTRL_TYPE_BOOLEAN, | ||
293 | .name = "Auto Exposure", | ||
294 | .minimum = 0, | ||
295 | .maximum = 1, | ||
296 | .step = 1, | ||
297 | #define AUTO_EXPOSURE_DEFAULT 1 | ||
298 | .default_value = AUTO_EXPOSURE_DEFAULT, | ||
299 | }, | ||
300 | .set = sd_setautoexposure, | ||
301 | .get = sd_getautoexposure, | ||
302 | }, | ||
303 | }; | ||
304 | |||
305 | static const struct v4l2_pix_format vga_mode[] = { | ||
306 | {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
307 | .bytesperline = 240, | ||
308 | .sizeimage = 240 * 120, | ||
309 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
310 | .priv = 0 | MODE_JPEG}, | ||
311 | {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | ||
312 | .bytesperline = 160, | ||
313 | .sizeimage = 160 * 120, | ||
314 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
315 | .priv = 0 | MODE_RAW}, | ||
316 | {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, | ||
317 | .bytesperline = 240, | ||
318 | .sizeimage = 240 * 120, | ||
319 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
320 | .priv = 0}, | ||
321 | {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
322 | .bytesperline = 480, | ||
323 | .sizeimage = 480 * 240 , | ||
324 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
325 | .priv = 1 | MODE_JPEG}, | ||
326 | {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | ||
327 | .bytesperline = 320, | ||
328 | .sizeimage = 320 * 240 , | ||
329 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
330 | .priv = 1 | MODE_RAW}, | ||
331 | {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, | ||
332 | .bytesperline = 480, | ||
333 | .sizeimage = 480 * 240 , | ||
334 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
335 | .priv = 1}, | ||
336 | {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
337 | .bytesperline = 960, | ||
338 | .sizeimage = 960 * 480, | ||
339 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
340 | .priv = 2 | MODE_JPEG}, | ||
341 | {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | ||
342 | .bytesperline = 640, | ||
343 | .sizeimage = 640 * 480, | ||
344 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
345 | .priv = 2 | MODE_RAW}, | ||
346 | {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, | ||
347 | .bytesperline = 960, | ||
348 | .sizeimage = 960 * 480, | ||
349 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
350 | .priv = 2}, | ||
351 | }; | ||
352 | |||
353 | static const struct v4l2_pix_format sxga_mode[] = { | ||
354 | {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
355 | .bytesperline = 240, | ||
356 | .sizeimage = 240 * 120, | ||
357 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
358 | .priv = 0 | MODE_JPEG}, | ||
359 | {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | ||
360 | .bytesperline = 160, | ||
361 | .sizeimage = 160 * 120, | ||
362 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
363 | .priv = 0 | MODE_RAW}, | ||
364 | {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, | ||
365 | .bytesperline = 240, | ||
366 | .sizeimage = 240 * 120, | ||
367 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
368 | .priv = 0}, | ||
369 | {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
370 | .bytesperline = 480, | ||
371 | .sizeimage = 480 * 240 , | ||
372 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
373 | .priv = 1 | MODE_JPEG}, | ||
374 | {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | ||
375 | .bytesperline = 320, | ||
376 | .sizeimage = 320 * 240 , | ||
377 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
378 | .priv = 1 | MODE_RAW}, | ||
379 | {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, | ||
380 | .bytesperline = 480, | ||
381 | .sizeimage = 480 * 240 , | ||
382 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
383 | .priv = 1}, | ||
384 | {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
385 | .bytesperline = 960, | ||
386 | .sizeimage = 960 * 480, | ||
387 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
388 | .priv = 2 | MODE_JPEG}, | ||
389 | {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | ||
390 | .bytesperline = 640, | ||
391 | .sizeimage = 640 * 480, | ||
392 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
393 | .priv = 2 | MODE_RAW}, | ||
394 | {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, | ||
395 | .bytesperline = 960, | ||
396 | .sizeimage = 960 * 480, | ||
397 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
398 | .priv = 2}, | ||
399 | {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | ||
400 | .bytesperline = 1280, | ||
401 | .sizeimage = (1280 * 1024) + 64, | ||
402 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
403 | .priv = 3 | MODE_RAW | MODE_SXGA}, | ||
404 | }; | ||
405 | |||
406 | static const int hsv_red_x[] = { | ||
407 | 41, 44, 46, 48, 50, 52, 54, 56, | ||
408 | 58, 60, 62, 64, 66, 68, 70, 72, | ||
409 | 74, 76, 78, 80, 81, 83, 85, 87, | ||
410 | 88, 90, 92, 93, 95, 97, 98, 100, | ||
411 | 101, 102, 104, 105, 107, 108, 109, 110, | ||
412 | 112, 113, 114, 115, 116, 117, 118, 119, | ||
413 | 120, 121, 122, 123, 123, 124, 125, 125, | ||
414 | 126, 127, 127, 128, 128, 129, 129, 129, | ||
415 | 130, 130, 130, 130, 131, 131, 131, 131, | ||
416 | 131, 131, 131, 131, 130, 130, 130, 130, | ||
417 | 129, 129, 129, 128, 128, 127, 127, 126, | ||
418 | 125, 125, 124, 123, 122, 122, 121, 120, | ||
419 | 119, 118, 117, 116, 115, 114, 112, 111, | ||
420 | 110, 109, 107, 106, 105, 103, 102, 101, | ||
421 | 99, 98, 96, 94, 93, 91, 90, 88, | ||
422 | 86, 84, 83, 81, 79, 77, 75, 74, | ||
423 | 72, 70, 68, 66, 64, 62, 60, 58, | ||
424 | 56, 54, 52, 49, 47, 45, 43, 41, | ||
425 | 39, 36, 34, 32, 30, 28, 25, 23, | ||
426 | 21, 19, 16, 14, 12, 9, 7, 5, | ||
427 | 3, 0, -1, -3, -6, -8, -10, -12, | ||
428 | -15, -17, -19, -22, -24, -26, -28, -30, | ||
429 | -33, -35, -37, -39, -41, -44, -46, -48, | ||
430 | -50, -52, -54, -56, -58, -60, -62, -64, | ||
431 | -66, -68, -70, -72, -74, -76, -78, -80, | ||
432 | -81, -83, -85, -87, -88, -90, -92, -93, | ||
433 | -95, -97, -98, -100, -101, -102, -104, -105, | ||
434 | -107, -108, -109, -110, -112, -113, -114, -115, | ||
435 | -116, -117, -118, -119, -120, -121, -122, -123, | ||
436 | -123, -124, -125, -125, -126, -127, -127, -128, | ||
437 | -128, -128, -128, -128, -128, -128, -128, -128, | ||
438 | -128, -128, -128, -128, -128, -128, -128, -128, | ||
439 | -128, -128, -128, -128, -128, -128, -128, -128, | ||
440 | -128, -127, -127, -126, -125, -125, -124, -123, | ||
441 | -122, -122, -121, -120, -119, -118, -117, -116, | ||
442 | -115, -114, -112, -111, -110, -109, -107, -106, | ||
443 | -105, -103, -102, -101, -99, -98, -96, -94, | ||
444 | -93, -91, -90, -88, -86, -84, -83, -81, | ||
445 | -79, -77, -75, -74, -72, -70, -68, -66, | ||
446 | -64, -62, -60, -58, -56, -54, -52, -49, | ||
447 | -47, -45, -43, -41, -39, -36, -34, -32, | ||
448 | -30, -28, -25, -23, -21, -19, -16, -14, | ||
449 | -12, -9, -7, -5, -3, 0, 1, 3, | ||
450 | 6, 8, 10, 12, 15, 17, 19, 22, | ||
451 | 24, 26, 28, 30, 33, 35, 37, 39, 41 | ||
452 | }; | ||
453 | |||
454 | static const int hsv_red_y[] = { | ||
455 | 82, 80, 78, 76, 74, 73, 71, 69, | ||
456 | 67, 65, 63, 61, 58, 56, 54, 52, | ||
457 | 50, 48, 46, 44, 41, 39, 37, 35, | ||
458 | 32, 30, 28, 26, 23, 21, 19, 16, | ||
459 | 14, 12, 10, 7, 5, 3, 0, -1, | ||
460 | -3, -6, -8, -10, -13, -15, -17, -19, | ||
461 | -22, -24, -26, -29, -31, -33, -35, -38, | ||
462 | -40, -42, -44, -46, -48, -51, -53, -55, | ||
463 | -57, -59, -61, -63, -65, -67, -69, -71, | ||
464 | -73, -75, -77, -79, -81, -82, -84, -86, | ||
465 | -88, -89, -91, -93, -94, -96, -98, -99, | ||
466 | -101, -102, -104, -105, -106, -108, -109, -110, | ||
467 | -112, -113, -114, -115, -116, -117, -119, -120, | ||
468 | -120, -121, -122, -123, -124, -125, -126, -126, | ||
469 | -127, -128, -128, -128, -128, -128, -128, -128, | ||
470 | -128, -128, -128, -128, -128, -128, -128, -128, | ||
471 | -128, -128, -128, -128, -128, -128, -128, -128, | ||
472 | -128, -128, -128, -128, -128, -128, -128, -128, | ||
473 | -127, -127, -126, -125, -125, -124, -123, -122, | ||
474 | -121, -120, -119, -118, -117, -116, -115, -114, | ||
475 | -113, -111, -110, -109, -107, -106, -105, -103, | ||
476 | -102, -100, -99, -97, -96, -94, -92, -91, | ||
477 | -89, -87, -85, -84, -82, -80, -78, -76, | ||
478 | -74, -73, -71, -69, -67, -65, -63, -61, | ||
479 | -58, -56, -54, -52, -50, -48, -46, -44, | ||
480 | -41, -39, -37, -35, -32, -30, -28, -26, | ||
481 | -23, -21, -19, -16, -14, -12, -10, -7, | ||
482 | -5, -3, 0, 1, 3, 6, 8, 10, | ||
483 | 13, 15, 17, 19, 22, 24, 26, 29, | ||
484 | 31, 33, 35, 38, 40, 42, 44, 46, | ||
485 | 48, 51, 53, 55, 57, 59, 61, 63, | ||
486 | 65, 67, 69, 71, 73, 75, 77, 79, | ||
487 | 81, 82, 84, 86, 88, 89, 91, 93, | ||
488 | 94, 96, 98, 99, 101, 102, 104, 105, | ||
489 | 106, 108, 109, 110, 112, 113, 114, 115, | ||
490 | 116, 117, 119, 120, 120, 121, 122, 123, | ||
491 | 124, 125, 126, 126, 127, 128, 128, 129, | ||
492 | 129, 130, 130, 131, 131, 131, 131, 132, | ||
493 | 132, 132, 132, 132, 132, 132, 132, 132, | ||
494 | 132, 132, 132, 131, 131, 131, 130, 130, | ||
495 | 130, 129, 129, 128, 127, 127, 126, 125, | ||
496 | 125, 124, 123, 122, 121, 120, 119, 118, | ||
497 | 117, 116, 115, 114, 113, 111, 110, 109, | ||
498 | 107, 106, 105, 103, 102, 100, 99, 97, | ||
499 | 96, 94, 92, 91, 89, 87, 85, 84, 82 | ||
500 | }; | ||
501 | |||
502 | static const int hsv_green_x[] = { | ||
503 | -124, -124, -125, -125, -125, -125, -125, -125, | ||
504 | -125, -126, -126, -125, -125, -125, -125, -125, | ||
505 | -125, -124, -124, -124, -123, -123, -122, -122, | ||
506 | -121, -121, -120, -120, -119, -118, -117, -117, | ||
507 | -116, -115, -114, -113, -112, -111, -110, -109, | ||
508 | -108, -107, -105, -104, -103, -102, -100, -99, | ||
509 | -98, -96, -95, -93, -92, -91, -89, -87, | ||
510 | -86, -84, -83, -81, -79, -77, -76, -74, | ||
511 | -72, -70, -69, -67, -65, -63, -61, -59, | ||
512 | -57, -55, -53, -51, -49, -47, -45, -43, | ||
513 | -41, -39, -37, -35, -33, -30, -28, -26, | ||
514 | -24, -22, -20, -18, -15, -13, -11, -9, | ||
515 | -7, -4, -2, 0, 1, 3, 6, 8, | ||
516 | 10, 12, 14, 17, 19, 21, 23, 25, | ||
517 | 27, 29, 32, 34, 36, 38, 40, 42, | ||
518 | 44, 46, 48, 50, 52, 54, 56, 58, | ||
519 | 60, 62, 64, 66, 68, 70, 71, 73, | ||
520 | 75, 77, 78, 80, 82, 83, 85, 87, | ||
521 | 88, 90, 91, 93, 94, 96, 97, 98, | ||
522 | 100, 101, 102, 104, 105, 106, 107, 108, | ||
523 | 109, 111, 112, 113, 113, 114, 115, 116, | ||
524 | 117, 118, 118, 119, 120, 120, 121, 122, | ||
525 | 122, 123, 123, 124, 124, 124, 125, 125, | ||
526 | 125, 125, 125, 125, 125, 126, 126, 125, | ||
527 | 125, 125, 125, 125, 125, 124, 124, 124, | ||
528 | 123, 123, 122, 122, 121, 121, 120, 120, | ||
529 | 119, 118, 117, 117, 116, 115, 114, 113, | ||
530 | 112, 111, 110, 109, 108, 107, 105, 104, | ||
531 | 103, 102, 100, 99, 98, 96, 95, 93, | ||
532 | 92, 91, 89, 87, 86, 84, 83, 81, | ||
533 | 79, 77, 76, 74, 72, 70, 69, 67, | ||
534 | 65, 63, 61, 59, 57, 55, 53, 51, | ||
535 | 49, 47, 45, 43, 41, 39, 37, 35, | ||
536 | 33, 30, 28, 26, 24, 22, 20, 18, | ||
537 | 15, 13, 11, 9, 7, 4, 2, 0, | ||
538 | -1, -3, -6, -8, -10, -12, -14, -17, | ||
539 | -19, -21, -23, -25, -27, -29, -32, -34, | ||
540 | -36, -38, -40, -42, -44, -46, -48, -50, | ||
541 | -52, -54, -56, -58, -60, -62, -64, -66, | ||
542 | -68, -70, -71, -73, -75, -77, -78, -80, | ||
543 | -82, -83, -85, -87, -88, -90, -91, -93, | ||
544 | -94, -96, -97, -98, -100, -101, -102, -104, | ||
545 | -105, -106, -107, -108, -109, -111, -112, -113, | ||
546 | -113, -114, -115, -116, -117, -118, -118, -119, | ||
547 | -120, -120, -121, -122, -122, -123, -123, -124, -124 | ||
548 | }; | ||
549 | |||
550 | static const int hsv_green_y[] = { | ||
551 | -100, -99, -98, -97, -95, -94, -93, -91, | ||
552 | -90, -89, -87, -86, -84, -83, -81, -80, | ||
553 | -78, -76, -75, -73, -71, -70, -68, -66, | ||
554 | -64, -63, -61, -59, -57, -55, -53, -51, | ||
555 | -49, -48, -46, -44, -42, -40, -38, -36, | ||
556 | -34, -32, -30, -27, -25, -23, -21, -19, | ||
557 | -17, -15, -13, -11, -9, -7, -4, -2, | ||
558 | 0, 1, 3, 5, 7, 9, 11, 14, | ||
559 | 16, 18, 20, 22, 24, 26, 28, 30, | ||
560 | 32, 34, 36, 38, 40, 42, 44, 46, | ||
561 | 48, 50, 52, 54, 56, 58, 59, 61, | ||
562 | 63, 65, 67, 68, 70, 72, 74, 75, | ||
563 | 77, 78, 80, 82, 83, 85, 86, 88, | ||
564 | 89, 90, 92, 93, 95, 96, 97, 98, | ||
565 | 100, 101, 102, 103, 104, 105, 106, 107, | ||
566 | 108, 109, 110, 111, 112, 112, 113, 114, | ||
567 | 115, 115, 116, 116, 117, 117, 118, 118, | ||
568 | 119, 119, 119, 120, 120, 120, 120, 120, | ||
569 | 121, 121, 121, 121, 121, 121, 120, 120, | ||
570 | 120, 120, 120, 119, 119, 119, 118, 118, | ||
571 | 117, 117, 116, 116, 115, 114, 114, 113, | ||
572 | 112, 111, 111, 110, 109, 108, 107, 106, | ||
573 | 105, 104, 103, 102, 100, 99, 98, 97, | ||
574 | 95, 94, 93, 91, 90, 89, 87, 86, | ||
575 | 84, 83, 81, 80, 78, 76, 75, 73, | ||
576 | 71, 70, 68, 66, 64, 63, 61, 59, | ||
577 | 57, 55, 53, 51, 49, 48, 46, 44, | ||
578 | 42, 40, 38, 36, 34, 32, 30, 27, | ||
579 | 25, 23, 21, 19, 17, 15, 13, 11, | ||
580 | 9, 7, 4, 2, 0, -1, -3, -5, | ||
581 | -7, -9, -11, -14, -16, -18, -20, -22, | ||
582 | -24, -26, -28, -30, -32, -34, -36, -38, | ||
583 | -40, -42, -44, -46, -48, -50, -52, -54, | ||
584 | -56, -58, -59, -61, -63, -65, -67, -68, | ||
585 | -70, -72, -74, -75, -77, -78, -80, -82, | ||
586 | -83, -85, -86, -88, -89, -90, -92, -93, | ||
587 | -95, -96, -97, -98, -100, -101, -102, -103, | ||
588 | -104, -105, -106, -107, -108, -109, -110, -111, | ||
589 | -112, -112, -113, -114, -115, -115, -116, -116, | ||
590 | -117, -117, -118, -118, -119, -119, -119, -120, | ||
591 | -120, -120, -120, -120, -121, -121, -121, -121, | ||
592 | -121, -121, -120, -120, -120, -120, -120, -119, | ||
593 | -119, -119, -118, -118, -117, -117, -116, -116, | ||
594 | -115, -114, -114, -113, -112, -111, -111, -110, | ||
595 | -109, -108, -107, -106, -105, -104, -103, -102, -100 | ||
596 | }; | ||
597 | |||
598 | static const int hsv_blue_x[] = { | ||
599 | 112, 113, 114, 114, 115, 116, 117, 117, | ||
600 | 118, 118, 119, 119, 120, 120, 120, 121, | ||
601 | 121, 121, 122, 122, 122, 122, 122, 122, | ||
602 | 122, 122, 122, 122, 122, 122, 121, 121, | ||
603 | 121, 120, 120, 120, 119, 119, 118, 118, | ||
604 | 117, 116, 116, 115, 114, 113, 113, 112, | ||
605 | 111, 110, 109, 108, 107, 106, 105, 104, | ||
606 | 103, 102, 100, 99, 98, 97, 95, 94, | ||
607 | 93, 91, 90, 88, 87, 85, 84, 82, | ||
608 | 80, 79, 77, 76, 74, 72, 70, 69, | ||
609 | 67, 65, 63, 61, 60, 58, 56, 54, | ||
610 | 52, 50, 48, 46, 44, 42, 40, 38, | ||
611 | 36, 34, 32, 30, 28, 26, 24, 22, | ||
612 | 19, 17, 15, 13, 11, 9, 7, 5, | ||
613 | 2, 0, -1, -3, -5, -7, -9, -12, | ||
614 | -14, -16, -18, -20, -22, -24, -26, -28, | ||
615 | -31, -33, -35, -37, -39, -41, -43, -45, | ||
616 | -47, -49, -51, -53, -54, -56, -58, -60, | ||
617 | -62, -64, -66, -67, -69, -71, -73, -74, | ||
618 | -76, -78, -79, -81, -83, -84, -86, -87, | ||
619 | -89, -90, -92, -93, -94, -96, -97, -98, | ||
620 | -99, -101, -102, -103, -104, -105, -106, -107, | ||
621 | -108, -109, -110, -111, -112, -113, -114, -114, | ||
622 | -115, -116, -117, -117, -118, -118, -119, -119, | ||
623 | -120, -120, -120, -121, -121, -121, -122, -122, | ||
624 | -122, -122, -122, -122, -122, -122, -122, -122, | ||
625 | -122, -122, -121, -121, -121, -120, -120, -120, | ||
626 | -119, -119, -118, -118, -117, -116, -116, -115, | ||
627 | -114, -113, -113, -112, -111, -110, -109, -108, | ||
628 | -107, -106, -105, -104, -103, -102, -100, -99, | ||
629 | -98, -97, -95, -94, -93, -91, -90, -88, | ||
630 | -87, -85, -84, -82, -80, -79, -77, -76, | ||
631 | -74, -72, -70, -69, -67, -65, -63, -61, | ||
632 | -60, -58, -56, -54, -52, -50, -48, -46, | ||
633 | -44, -42, -40, -38, -36, -34, -32, -30, | ||
634 | -28, -26, -24, -22, -19, -17, -15, -13, | ||
635 | -11, -9, -7, -5, -2, 0, 1, 3, | ||
636 | 5, 7, 9, 12, 14, 16, 18, 20, | ||
637 | 22, 24, 26, 28, 31, 33, 35, 37, | ||
638 | 39, 41, 43, 45, 47, 49, 51, 53, | ||
639 | 54, 56, 58, 60, 62, 64, 66, 67, | ||
640 | 69, 71, 73, 74, 76, 78, 79, 81, | ||
641 | 83, 84, 86, 87, 89, 90, 92, 93, | ||
642 | 94, 96, 97, 98, 99, 101, 102, 103, | ||
643 | 104, 105, 106, 107, 108, 109, 110, 111, 112 | ||
644 | }; | ||
645 | |||
646 | static const int hsv_blue_y[] = { | ||
647 | -11, -13, -15, -17, -19, -21, -23, -25, | ||
648 | -27, -29, -31, -33, -35, -37, -39, -41, | ||
649 | -43, -45, -46, -48, -50, -52, -54, -55, | ||
650 | -57, -59, -61, -62, -64, -66, -67, -69, | ||
651 | -71, -72, -74, -75, -77, -78, -80, -81, | ||
652 | -83, -84, -86, -87, -88, -90, -91, -92, | ||
653 | -93, -95, -96, -97, -98, -99, -100, -101, | ||
654 | -102, -103, -104, -105, -106, -106, -107, -108, | ||
655 | -109, -109, -110, -111, -111, -112, -112, -113, | ||
656 | -113, -114, -114, -114, -115, -115, -115, -115, | ||
657 | -116, -116, -116, -116, -116, -116, -116, -116, | ||
658 | -116, -115, -115, -115, -115, -114, -114, -114, | ||
659 | -113, -113, -112, -112, -111, -111, -110, -110, | ||
660 | -109, -108, -108, -107, -106, -105, -104, -103, | ||
661 | -102, -101, -100, -99, -98, -97, -96, -95, | ||
662 | -94, -93, -91, -90, -89, -88, -86, -85, | ||
663 | -84, -82, -81, -79, -78, -76, -75, -73, | ||
664 | -71, -70, -68, -67, -65, -63, -62, -60, | ||
665 | -58, -56, -55, -53, -51, -49, -47, -45, | ||
666 | -44, -42, -40, -38, -36, -34, -32, -30, | ||
667 | -28, -26, -24, -22, -20, -18, -16, -14, | ||
668 | -12, -10, -8, -6, -4, -2, 0, 1, | ||
669 | 3, 5, 7, 9, 11, 13, 15, 17, | ||
670 | 19, 21, 23, 25, 27, 29, 31, 33, | ||
671 | 35, 37, 39, 41, 43, 45, 46, 48, | ||
672 | 50, 52, 54, 55, 57, 59, 61, 62, | ||
673 | 64, 66, 67, 69, 71, 72, 74, 75, | ||
674 | 77, 78, 80, 81, 83, 84, 86, 87, | ||
675 | 88, 90, 91, 92, 93, 95, 96, 97, | ||
676 | 98, 99, 100, 101, 102, 103, 104, 105, | ||
677 | 106, 106, 107, 108, 109, 109, 110, 111, | ||
678 | 111, 112, 112, 113, 113, 114, 114, 114, | ||
679 | 115, 115, 115, 115, 116, 116, 116, 116, | ||
680 | 116, 116, 116, 116, 116, 115, 115, 115, | ||
681 | 115, 114, 114, 114, 113, 113, 112, 112, | ||
682 | 111, 111, 110, 110, 109, 108, 108, 107, | ||
683 | 106, 105, 104, 103, 102, 101, 100, 99, | ||
684 | 98, 97, 96, 95, 94, 93, 91, 90, | ||
685 | 89, 88, 86, 85, 84, 82, 81, 79, | ||
686 | 78, 76, 75, 73, 71, 70, 68, 67, | ||
687 | 65, 63, 62, 60, 58, 56, 55, 53, | ||
688 | 51, 49, 47, 45, 44, 42, 40, 38, | ||
689 | 36, 34, 32, 30, 28, 26, 24, 22, | ||
690 | 20, 18, 16, 14, 12, 10, 8, 6, | ||
691 | 4, 2, 0, -1, -3, -5, -7, -9, -11 | ||
692 | }; | ||
693 | |||
694 | static u16 i2c_ident[] = { | ||
695 | V4L2_IDENT_OV9650, | ||
696 | V4L2_IDENT_OV9655, | ||
697 | V4L2_IDENT_SOI968, | ||
698 | V4L2_IDENT_OV7660, | ||
699 | V4L2_IDENT_OV7670, | ||
700 | V4L2_IDENT_MT9V011, | ||
701 | V4L2_IDENT_MT9V111, | ||
702 | V4L2_IDENT_MT9V112, | ||
703 | V4L2_IDENT_MT9M001C12ST, | ||
704 | V4L2_IDENT_MT9M111, | ||
705 | V4L2_IDENT_HV7131R, | ||
706 | }; | ||
707 | |||
708 | static u16 bridge_init[][2] = { | ||
709 | {0x1000, 0x78}, {0x1001, 0x40}, {0x1002, 0x1c}, | ||
710 | {0x1020, 0x80}, {0x1061, 0x01}, {0x1067, 0x40}, | ||
711 | {0x1068, 0x30}, {0x1069, 0x20}, {0x106a, 0x10}, | ||
712 | {0x106b, 0x08}, {0x1188, 0x87}, {0x11a1, 0x00}, | ||
713 | {0x11a2, 0x00}, {0x11a3, 0x6a}, {0x11a4, 0x50}, | ||
714 | {0x11ab, 0x00}, {0x11ac, 0x00}, {0x11ad, 0x50}, | ||
715 | {0x11ae, 0x3c}, {0x118a, 0x04}, {0x0395, 0x04}, | ||
716 | {0x11b8, 0x3a}, {0x118b, 0x0e}, {0x10f7, 0x05}, | ||
717 | {0x10f8, 0x14}, {0x10fa, 0xff}, {0x10f9, 0x00}, | ||
718 | {0x11ba, 0x0a}, {0x11a5, 0x2d}, {0x11a6, 0x2d}, | ||
719 | {0x11a7, 0x3a}, {0x11a8, 0x05}, {0x11a9, 0x04}, | ||
720 | {0x11aa, 0x3f}, {0x11af, 0x28}, {0x11b0, 0xd8}, | ||
721 | {0x11b1, 0x14}, {0x11b2, 0xec}, {0x11b3, 0x32}, | ||
722 | {0x11b4, 0xdd}, {0x11b5, 0x32}, {0x11b6, 0xdd}, | ||
723 | {0x10e0, 0x2c}, {0x11bc, 0x40}, {0x11bd, 0x01}, | ||
724 | {0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f}, | ||
725 | {0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f}, | ||
726 | {0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01}, | ||
727 | {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80} | ||
728 | }; | ||
729 | |||
730 | /* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */ | ||
731 | static u8 ov_gain[] = { | ||
732 | 0x00 /* 1x */, 0x04 /* 1.25x */, 0x08 /* 1.5x */, 0x0c /* 1.75x */, | ||
733 | 0x10 /* 2x */, 0x12 /* 2.25x */, 0x14 /* 2.5x */, 0x16 /* 2.75x */, | ||
734 | 0x18 /* 3x */, 0x1a /* 3.25x */, 0x1c /* 3.5x */, 0x1e /* 3.75x */, | ||
735 | 0x30 /* 4x */, 0x31 /* 4.25x */, 0x32 /* 4.5x */, 0x33 /* 4.75x */, | ||
736 | 0x34 /* 5x */, 0x35 /* 5.25x */, 0x36 /* 5.5x */, 0x37 /* 5.75x */, | ||
737 | 0x38 /* 6x */, 0x39 /* 6.25x */, 0x3a /* 6.5x */, 0x3b /* 6.75x */, | ||
738 | 0x3c /* 7x */, 0x3d /* 7.25x */, 0x3e /* 7.5x */, 0x3f /* 7.75x */, | ||
739 | 0x70 /* 8x */ | ||
740 | }; | ||
741 | |||
742 | /* Gain = (bit[8] + 1) * (bit[7] + 1) * (bit[6:0] * 0.03125) */ | ||
743 | static u16 micron1_gain[] = { | ||
744 | /* 1x 1.25x 1.5x 1.75x */ | ||
745 | 0x0020, 0x0028, 0x0030, 0x0038, | ||
746 | /* 2x 2.25x 2.5x 2.75x */ | ||
747 | 0x00a0, 0x00a4, 0x00a8, 0x00ac, | ||
748 | /* 3x 3.25x 3.5x 3.75x */ | ||
749 | 0x00b0, 0x00b4, 0x00b8, 0x00bc, | ||
750 | /* 4x 4.25x 4.5x 4.75x */ | ||
751 | 0x00c0, 0x00c4, 0x00c8, 0x00cc, | ||
752 | /* 5x 5.25x 5.5x 5.75x */ | ||
753 | 0x00d0, 0x00d4, 0x00d8, 0x00dc, | ||
754 | /* 6x 6.25x 6.5x 6.75x */ | ||
755 | 0x00e0, 0x00e4, 0x00e8, 0x00ec, | ||
756 | /* 7x 7.25x 7.5x 7.75x */ | ||
757 | 0x00f0, 0x00f4, 0x00f8, 0x00fc, | ||
758 | /* 8x */ | ||
759 | 0x01c0 | ||
760 | }; | ||
761 | |||
762 | /* mt9m001 sensor uses a different gain formula then other micron sensors */ | ||
763 | /* Gain = (bit[6] + 1) * (bit[5-0] * 0.125) */ | ||
764 | static u16 micron2_gain[] = { | ||
765 | /* 1x 1.25x 1.5x 1.75x */ | ||
766 | 0x0008, 0x000a, 0x000c, 0x000e, | ||
767 | /* 2x 2.25x 2.5x 2.75x */ | ||
768 | 0x0010, 0x0012, 0x0014, 0x0016, | ||
769 | /* 3x 3.25x 3.5x 3.75x */ | ||
770 | 0x0018, 0x001a, 0x001c, 0x001e, | ||
771 | /* 4x 4.25x 4.5x 4.75x */ | ||
772 | 0x0020, 0x0051, 0x0052, 0x0053, | ||
773 | /* 5x 5.25x 5.5x 5.75x */ | ||
774 | 0x0054, 0x0055, 0x0056, 0x0057, | ||
775 | /* 6x 6.25x 6.5x 6.75x */ | ||
776 | 0x0058, 0x0059, 0x005a, 0x005b, | ||
777 | /* 7x 7.25x 7.5x 7.75x */ | ||
778 | 0x005c, 0x005d, 0x005e, 0x005f, | ||
779 | /* 8x */ | ||
780 | 0x0060 | ||
781 | }; | ||
782 | |||
783 | /* Gain = .5 + bit[7:0] / 16 */ | ||
784 | static u8 hv7131r_gain[] = { | ||
785 | 0x08 /* 1x */, 0x0c /* 1.25x */, 0x10 /* 1.5x */, 0x14 /* 1.75x */, | ||
786 | 0x18 /* 2x */, 0x1c /* 2.25x */, 0x20 /* 2.5x */, 0x24 /* 2.75x */, | ||
787 | 0x28 /* 3x */, 0x2c /* 3.25x */, 0x30 /* 3.5x */, 0x34 /* 3.75x */, | ||
788 | 0x38 /* 4x */, 0x3c /* 4.25x */, 0x40 /* 4.5x */, 0x44 /* 4.75x */, | ||
789 | 0x48 /* 5x */, 0x4c /* 5.25x */, 0x50 /* 5.5x */, 0x54 /* 5.75x */, | ||
790 | 0x58 /* 6x */, 0x5c /* 6.25x */, 0x60 /* 6.5x */, 0x64 /* 6.75x */, | ||
791 | 0x68 /* 7x */, 0x6c /* 7.25x */, 0x70 /* 7.5x */, 0x74 /* 7.75x */, | ||
792 | 0x78 /* 8x */ | ||
793 | }; | ||
794 | |||
795 | static u8 soi968_init[][2] = { | ||
796 | {0x12, 0x80}, {0x0c, 0x00}, {0x0f, 0x1f}, | ||
797 | {0x11, 0x80}, {0x38, 0x52}, {0x1e, 0x00}, | ||
798 | {0x33, 0x08}, {0x35, 0x8c}, {0x36, 0x0c}, | ||
799 | {0x37, 0x04}, {0x45, 0x04}, {0x47, 0xff}, | ||
800 | {0x3e, 0x00}, {0x3f, 0x00}, {0x3b, 0x20}, | ||
801 | {0x3a, 0x96}, {0x3d, 0x0a}, {0x14, 0x8e}, | ||
802 | {0x13, 0x8a}, {0x12, 0x40}, {0x17, 0x13}, | ||
803 | {0x18, 0x63}, {0x19, 0x01}, {0x1a, 0x79}, | ||
804 | {0x32, 0x24}, {0x03, 0x00}, {0x11, 0x40}, | ||
805 | {0x2a, 0x10}, {0x2b, 0xe0}, {0x10, 0x32}, | ||
806 | {0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80}, | ||
807 | }; | ||
808 | |||
809 | static u8 ov7660_init[][2] = { | ||
810 | {0x0e, 0x80}, {0x0d, 0x08}, {0x0f, 0xc3}, | ||
811 | {0x04, 0xc3}, {0x10, 0x40}, {0x11, 0x40}, | ||
812 | {0x12, 0x05}, {0x13, 0xba}, {0x14, 0x2a}, | ||
813 | {0x37, 0x0f}, {0x38, 0x02}, {0x39, 0x43}, | ||
814 | {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0xf6}, | ||
815 | {0x2e, 0x0b}, {0x01, 0x78}, {0x02, 0x50}, | ||
816 | }; | ||
817 | |||
818 | static u8 ov7670_init[][2] = { | ||
819 | {0x12, 0x80}, {0x11, 0x80}, {0x3a, 0x04}, {0x12, 0x01}, | ||
820 | {0x32, 0xb6}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00}, | ||
821 | {0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0}, | ||
822 | {0xa2, 0x02}, {0x13, 0xe0}, {0x00, 0x00}, {0x10, 0x00}, | ||
823 | {0x0d, 0x40}, {0x14, 0x28}, {0xa5, 0x05}, {0xab, 0x07}, | ||
824 | {0x24, 0x95}, {0x25, 0x33}, {0x26, 0xe3}, {0x9f, 0x75}, | ||
825 | {0xa0, 0x65}, {0xa1, 0x0b}, {0xa6, 0xd8}, {0xa7, 0xd8}, | ||
826 | {0xa8, 0xf0}, {0xa9, 0x90}, {0xaa, 0x94}, {0x13, 0xe5}, | ||
827 | {0x0e, 0x61}, {0x0f, 0x4b}, {0x16, 0x02}, {0x1e, 0x27}, | ||
828 | {0x21, 0x02}, {0x22, 0x91}, {0x29, 0x07}, {0x33, 0x0b}, | ||
829 | {0x35, 0x0b}, {0x37, 0x1d}, {0x38, 0x71}, {0x39, 0x2a}, | ||
830 | {0x3c, 0x78}, {0x4d, 0x40}, {0x4e, 0x20}, {0x69, 0x00}, | ||
831 | {0x74, 0x19}, {0x8d, 0x4f}, {0x8e, 0x00}, {0x8f, 0x00}, | ||
832 | {0x90, 0x00}, {0x91, 0x00}, {0x96, 0x00}, {0x9a, 0x80}, | ||
833 | {0xb0, 0x84}, {0xb1, 0x0c}, {0xb2, 0x0e}, {0xb3, 0x82}, | ||
834 | {0xb8, 0x0a}, {0x43, 0x0a}, {0x44, 0xf0}, {0x45, 0x20}, | ||
835 | {0x46, 0x7d}, {0x47, 0x29}, {0x48, 0x4a}, {0x59, 0x8c}, | ||
836 | {0x5a, 0xa5}, {0x5b, 0xde}, {0x5c, 0x96}, {0x5d, 0x66}, | ||
837 | {0x5e, 0x10}, {0x6c, 0x0a}, {0x6d, 0x55}, {0x6e, 0x11}, | ||
838 | {0x6f, 0x9e}, {0x6a, 0x40}, {0x01, 0x40}, {0x02, 0x40}, | ||
839 | {0x13, 0xe7}, {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x02}, | ||
840 | {0x52, 0x1d}, {0x53, 0x56}, {0x54, 0x73}, {0x55, 0x0a}, | ||
841 | {0x56, 0x55}, {0x57, 0x80}, {0x58, 0x9e}, {0x41, 0x08}, | ||
842 | {0x3f, 0x02}, {0x75, 0x03}, {0x76, 0x63}, {0x4c, 0x04}, | ||
843 | {0x77, 0x06}, {0x3d, 0x02}, {0x4b, 0x09}, {0xc9, 0x30}, | ||
844 | {0x41, 0x08}, {0x56, 0x48}, {0x34, 0x11}, {0xa4, 0x88}, | ||
845 | {0x96, 0x00}, {0x97, 0x30}, {0x98, 0x20}, {0x99, 0x30}, | ||
846 | {0x9a, 0x84}, {0x9b, 0x29}, {0x9c, 0x03}, {0x9d, 0x99}, | ||
847 | {0x9e, 0x7f}, {0x78, 0x04}, {0x79, 0x01}, {0xc8, 0xf0}, | ||
848 | {0x79, 0x0f}, {0xc8, 0x00}, {0x79, 0x10}, {0xc8, 0x7e}, | ||
849 | {0x79, 0x0a}, {0xc8, 0x80}, {0x79, 0x0b}, {0xc8, 0x01}, | ||
850 | {0x79, 0x0c}, {0xc8, 0x0f}, {0x79, 0x0d}, {0xc8, 0x20}, | ||
851 | {0x79, 0x09}, {0xc8, 0x80}, {0x79, 0x02}, {0xc8, 0xc0}, | ||
852 | {0x79, 0x03}, {0xc8, 0x40}, {0x79, 0x05}, {0xc8, 0x30}, | ||
853 | {0x79, 0x26}, {0x62, 0x20}, {0x63, 0x00}, {0x64, 0x06}, | ||
854 | {0x65, 0x00}, {0x66, 0x05}, {0x94, 0x05}, {0x95, 0x0a}, | ||
855 | {0x17, 0x13}, {0x18, 0x01}, {0x19, 0x02}, {0x1a, 0x7a}, | ||
856 | {0x46, 0x59}, {0x47, 0x30}, {0x58, 0x9a}, {0x59, 0x84}, | ||
857 | {0x5a, 0x91}, {0x5b, 0x57}, {0x5c, 0x75}, {0x5d, 0x6d}, | ||
858 | {0x5e, 0x13}, {0x64, 0x07}, {0x94, 0x07}, {0x95, 0x0d}, | ||
859 | {0xa6, 0xdf}, {0xa7, 0xdf}, {0x48, 0x4d}, {0x51, 0x00}, | ||
860 | {0x6b, 0x0a}, {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, | ||
861 | {0x92, 0x00}, {0x93, 0x00}, {0x55, 0x0a}, {0x56, 0x60}, | ||
862 | {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, | ||
863 | {0x53, 0x56}, {0x54, 0x73}, {0x58, 0x9a}, {0x4f, 0x6e}, | ||
864 | {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, {0x53, 0x56}, | ||
865 | {0x54, 0x73}, {0x58, 0x9a}, {0x3f, 0x01}, {0x7b, 0x03}, | ||
866 | {0x7c, 0x09}, {0x7d, 0x16}, {0x7e, 0x38}, {0x7f, 0x47}, | ||
867 | {0x80, 0x53}, {0x81, 0x5e}, {0x82, 0x6a}, {0x83, 0x74}, | ||
868 | {0x84, 0x80}, {0x85, 0x8c}, {0x86, 0x9b}, {0x87, 0xb2}, | ||
869 | {0x88, 0xcc}, {0x89, 0xe5}, {0x7a, 0x24}, {0x3b, 0x00}, | ||
870 | {0x9f, 0x76}, {0xa0, 0x65}, {0x13, 0xe2}, {0x6b, 0x0a}, | ||
871 | {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, {0x92, 0x00}, | ||
872 | {0x93, 0x00}, | ||
873 | }; | ||
874 | |||
875 | static u8 ov9650_init[][2] = { | ||
876 | {0x12, 0x80}, {0x00, 0x00}, {0x01, 0x78}, | ||
877 | {0x02, 0x78}, {0x03, 0x36}, {0x04, 0x03}, | ||
878 | {0x05, 0x00}, {0x06, 0x00}, {0x08, 0x00}, | ||
879 | {0x09, 0x01}, {0x0c, 0x00}, {0x0d, 0x00}, | ||
880 | {0x0e, 0xa0}, {0x0f, 0x52}, {0x10, 0x7c}, | ||
881 | {0x11, 0x80}, {0x12, 0x45}, {0x13, 0xc2}, | ||
882 | {0x14, 0x2e}, {0x15, 0x00}, {0x16, 0x07}, | ||
883 | {0x17, 0x24}, {0x18, 0xc5}, {0x19, 0x00}, | ||
884 | {0x1a, 0x3c}, {0x1b, 0x00}, {0x1e, 0x04}, | ||
885 | {0x1f, 0x00}, {0x24, 0x78}, {0x25, 0x68}, | ||
886 | {0x26, 0xd4}, {0x27, 0x80}, {0x28, 0x80}, | ||
887 | {0x29, 0x30}, {0x2a, 0x00}, {0x2b, 0x00}, | ||
888 | {0x2c, 0x80}, {0x2d, 0x00}, {0x2e, 0x00}, | ||
889 | {0x2f, 0x00}, {0x30, 0x08}, {0x31, 0x30}, | ||
890 | {0x32, 0x84}, {0x33, 0xe2}, {0x34, 0xbf}, | ||
891 | {0x35, 0x81}, {0x36, 0xf9}, {0x37, 0x00}, | ||
892 | {0x38, 0x93}, {0x39, 0x50}, {0x3a, 0x01}, | ||
893 | {0x3b, 0x01}, {0x3c, 0x73}, {0x3d, 0x19}, | ||
894 | {0x3e, 0x0b}, {0x3f, 0x80}, {0x40, 0xc1}, | ||
895 | {0x41, 0x00}, {0x42, 0x08}, {0x67, 0x80}, | ||
896 | {0x68, 0x80}, {0x69, 0x40}, {0x6a, 0x00}, | ||
897 | {0x6b, 0x0a}, {0x8b, 0x06}, {0x8c, 0x20}, | ||
898 | {0x8d, 0x00}, {0x8e, 0x00}, {0x8f, 0xdf}, | ||
899 | {0x92, 0x00}, {0x93, 0x00}, {0x94, 0x88}, | ||
900 | {0x95, 0x88}, {0x96, 0x04}, {0xa1, 0x00}, | ||
901 | {0xa5, 0x80}, {0xa8, 0x80}, {0xa9, 0xb8}, | ||
902 | {0xaa, 0x92}, {0xab, 0x0a}, | ||
903 | }; | ||
904 | |||
905 | static u8 ov9655_init[][2] = { | ||
906 | {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61}, | ||
907 | {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24}, | ||
908 | {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08}, | ||
909 | {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf}, | ||
910 | {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12}, | ||
911 | {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, | ||
912 | {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, | ||
913 | {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, | ||
914 | {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, | ||
915 | {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04}, | ||
916 | {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02}, | ||
917 | {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c}, | ||
918 | {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60}, | ||
919 | {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04}, | ||
920 | {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80}, | ||
921 | {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf}, | ||
922 | {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b}, | ||
923 | {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01}, | ||
924 | {0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0}, | ||
925 | {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61}, | ||
926 | {0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a}, | ||
927 | {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01}, | ||
928 | {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10}, | ||
929 | {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04}, | ||
930 | {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00}, | ||
931 | {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80}, | ||
932 | {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d}, | ||
933 | {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14}, | ||
934 | {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf}, | ||
935 | {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00}, | ||
936 | {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00}, | ||
937 | {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03}, | ||
938 | {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03}, | ||
939 | {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13}, | ||
940 | }; | ||
941 | |||
942 | static u16 mt9v112_init[][2] = { | ||
943 | {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0020}, | ||
944 | {0x34, 0xc019}, {0x0a, 0x0011}, {0x0b, 0x000b}, | ||
945 | {0x20, 0x0703}, {0x35, 0x2022}, {0xf0, 0x0001}, | ||
946 | {0x05, 0x0000}, {0x06, 0x340c}, {0x3b, 0x042a}, | ||
947 | {0x3c, 0x0400}, {0xf0, 0x0002}, {0x2e, 0x0c58}, | ||
948 | {0x5b, 0x0001}, {0xc8, 0x9f0b}, {0xf0, 0x0001}, | ||
949 | {0x9b, 0x5300}, {0xf0, 0x0000}, {0x2b, 0x0020}, | ||
950 | {0x2c, 0x002a}, {0x2d, 0x0032}, {0x2e, 0x0020}, | ||
951 | {0x09, 0x01dc}, {0x01, 0x000c}, {0x02, 0x0020}, | ||
952 | {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, | ||
953 | {0x05, 0x0098}, {0x20, 0x0703}, {0x09, 0x01f2}, | ||
954 | {0x2b, 0x00a0}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, | ||
955 | {0x2e, 0x00a0}, {0x01, 0x000c}, {0x02, 0x0020}, | ||
956 | {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, | ||
957 | {0x05, 0x0098}, {0x09, 0x01c1}, {0x2b, 0x00ae}, | ||
958 | {0x2c, 0x00ae}, {0x2d, 0x00ae}, {0x2e, 0x00ae}, | ||
959 | }; | ||
960 | |||
961 | static u16 mt9v111_init[][2] = { | ||
962 | {0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000}, | ||
963 | {0x01, 0x0001}, {0x02, 0x0016}, {0x03, 0x01e1}, | ||
964 | {0x04, 0x0281}, {0x05, 0x0004}, {0x07, 0x3002}, | ||
965 | {0x21, 0x0000}, {0x25, 0x4024}, {0x26, 0xff03}, | ||
966 | {0x27, 0xff10}, {0x2b, 0x7828}, {0x2c, 0xb43c}, | ||
967 | {0x2d, 0xf0a0}, {0x2e, 0x0c64}, {0x2f, 0x0064}, | ||
968 | {0x67, 0x4010}, {0x06, 0x301e}, {0x08, 0x0480}, | ||
969 | {0x01, 0x0004}, {0x02, 0x0016}, {0x03, 0x01e6}, | ||
970 | {0x04, 0x0286}, {0x05, 0x0004}, {0x06, 0x0000}, | ||
971 | {0x07, 0x3002}, {0x08, 0x0008}, {0x0c, 0x0000}, | ||
972 | {0x0d, 0x0000}, {0x0e, 0x0000}, {0x0f, 0x0000}, | ||
973 | {0x10, 0x0000}, {0x11, 0x0000}, {0x12, 0x00b0}, | ||
974 | {0x13, 0x007c}, {0x14, 0x0000}, {0x15, 0x0000}, | ||
975 | {0x16, 0x0000}, {0x17, 0x0000}, {0x18, 0x0000}, | ||
976 | {0x19, 0x0000}, {0x1a, 0x0000}, {0x1b, 0x0000}, | ||
977 | {0x1c, 0x0000}, {0x1d, 0x0000}, {0x30, 0x0000}, | ||
978 | {0x30, 0x0005}, {0x31, 0x0000}, {0x02, 0x0016}, | ||
979 | {0x03, 0x01e1}, {0x04, 0x0281}, {0x05, 0x0004}, | ||
980 | {0x06, 0x0000}, {0x07, 0x3002}, {0x06, 0x002d}, | ||
981 | {0x05, 0x0004}, {0x09, 0x0064}, {0x2b, 0x00a0}, | ||
982 | {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x00a0}, | ||
983 | {0x02, 0x0016}, {0x03, 0x01e1}, {0x04, 0x0281}, | ||
984 | {0x05, 0x0004}, {0x06, 0x002d}, {0x07, 0x3002}, | ||
985 | {0x0e, 0x0008}, {0x06, 0x002d}, {0x05, 0x0004}, | ||
986 | }; | ||
987 | |||
988 | static u16 mt9v011_init[][2] = { | ||
989 | {0x07, 0x0002}, {0x0d, 0x0001}, {0x0d, 0x0000}, | ||
990 | {0x01, 0x0008}, {0x02, 0x0016}, {0x03, 0x01e1}, | ||
991 | {0x04, 0x0281}, {0x05, 0x0083}, {0x06, 0x0006}, | ||
992 | {0x0d, 0x0002}, {0x0a, 0x0000}, {0x0b, 0x0000}, | ||
993 | {0x0c, 0x0000}, {0x0d, 0x0000}, {0x0e, 0x0000}, | ||
994 | {0x0f, 0x0000}, {0x10, 0x0000}, {0x11, 0x0000}, | ||
995 | {0x12, 0x0000}, {0x13, 0x0000}, {0x14, 0x0000}, | ||
996 | {0x15, 0x0000}, {0x16, 0x0000}, {0x17, 0x0000}, | ||
997 | {0x18, 0x0000}, {0x19, 0x0000}, {0x1a, 0x0000}, | ||
998 | {0x1b, 0x0000}, {0x1c, 0x0000}, {0x1d, 0x0000}, | ||
999 | {0x32, 0x0000}, {0x20, 0x1101}, {0x21, 0x0000}, | ||
1000 | {0x22, 0x0000}, {0x23, 0x0000}, {0x24, 0x0000}, | ||
1001 | {0x25, 0x0000}, {0x26, 0x0000}, {0x27, 0x0024}, | ||
1002 | {0x2f, 0xf7b0}, {0x30, 0x0005}, {0x31, 0x0000}, | ||
1003 | {0x32, 0x0000}, {0x33, 0x0000}, {0x34, 0x0100}, | ||
1004 | {0x3d, 0x068f}, {0x40, 0x01e0}, {0x41, 0x00d1}, | ||
1005 | {0x44, 0x0082}, {0x5a, 0x0000}, {0x5b, 0x0000}, | ||
1006 | {0x5c, 0x0000}, {0x5d, 0x0000}, {0x5e, 0x0000}, | ||
1007 | {0x5f, 0xa31d}, {0x62, 0x0611}, {0x0a, 0x0000}, | ||
1008 | {0x06, 0x0029}, {0x05, 0x0009}, {0x20, 0x1101}, | ||
1009 | {0x20, 0x1101}, {0x09, 0x0064}, {0x07, 0x0003}, | ||
1010 | {0x2b, 0x0033}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, | ||
1011 | {0x2e, 0x0033}, {0x07, 0x0002}, {0x06, 0x0000}, | ||
1012 | {0x06, 0x0029}, {0x05, 0x0009}, | ||
1013 | }; | ||
1014 | |||
1015 | static u16 mt9m001_init[][2] = { | ||
1016 | {0x0d, 0x0001}, {0x0d, 0x0000}, {0x01, 0x000e}, | ||
1017 | {0x02, 0x0014}, {0x03, 0x03c1}, {0x04, 0x0501}, | ||
1018 | {0x05, 0x0083}, {0x06, 0x0006}, {0x0d, 0x0002}, | ||
1019 | {0x0a, 0x0000}, {0x0c, 0x0000}, {0x11, 0x0000}, | ||
1020 | {0x1e, 0x8000}, {0x5f, 0x8904}, {0x60, 0x0000}, | ||
1021 | {0x61, 0x0000}, {0x62, 0x0498}, {0x63, 0x0000}, | ||
1022 | {0x64, 0x0000}, {0x20, 0x111d}, {0x06, 0x00f2}, | ||
1023 | {0x05, 0x0013}, {0x09, 0x10f2}, {0x07, 0x0003}, | ||
1024 | {0x2b, 0x002a}, {0x2d, 0x002a}, {0x2c, 0x002a}, | ||
1025 | {0x2e, 0x0029}, {0x07, 0x0002}, | ||
1026 | }; | ||
1027 | |||
1028 | static u16 mt9m111_init[][2] = { | ||
1029 | {0xf0, 0x0000}, {0x0d, 0x0008}, {0x0d, 0x0009}, | ||
1030 | {0x0d, 0x0008}, {0xf0, 0x0001}, {0x3a, 0x4300}, | ||
1031 | {0x9b, 0x4300}, {0xa1, 0x0280}, {0xa4, 0x0200}, | ||
1032 | {0x06, 0x308e}, {0xf0, 0x0000}, | ||
1033 | }; | ||
1034 | |||
1035 | static u8 hv7131r_init[][2] = { | ||
1036 | {0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08}, | ||
1037 | {0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0}, | ||
1038 | {0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08}, | ||
1039 | {0x01, 0x08}, {0x01, 0x08}, {0x25, 0x07}, | ||
1040 | {0x26, 0xc3}, {0x27, 0x50}, {0x30, 0x62}, | ||
1041 | {0x31, 0x10}, {0x32, 0x06}, {0x33, 0x10}, | ||
1042 | {0x20, 0x00}, {0x21, 0xd0}, {0x22, 0x00}, | ||
1043 | {0x23, 0x09}, {0x01, 0x08}, | ||
1044 | }; | ||
1045 | |||
1046 | int reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length) | ||
1047 | { | ||
1048 | struct usb_device *dev = gspca_dev->dev; | ||
1049 | int result; | ||
1050 | result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), | ||
1051 | 0x00, | ||
1052 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, | ||
1053 | reg, | ||
1054 | 0x00, | ||
1055 | gspca_dev->usb_buf, | ||
1056 | length, | ||
1057 | 500); | ||
1058 | if (unlikely(result < 0 || result != length)) { | ||
1059 | err("Read register failed 0x%02X", reg); | ||
1060 | return -EIO; | ||
1061 | } | ||
1062 | return 0; | ||
1063 | } | ||
1064 | |||
1065 | int reg_w(struct gspca_dev *gspca_dev, u16 reg, const u8 *buffer, int length) | ||
1066 | { | ||
1067 | struct usb_device *dev = gspca_dev->dev; | ||
1068 | int result; | ||
1069 | memcpy(gspca_dev->usb_buf, buffer, length); | ||
1070 | result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | ||
1071 | 0x08, | ||
1072 | USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, | ||
1073 | reg, | ||
1074 | 0x00, | ||
1075 | gspca_dev->usb_buf, | ||
1076 | length, | ||
1077 | 500); | ||
1078 | if (unlikely(result < 0 || result != length)) { | ||
1079 | err("Write register failed index 0x%02X", reg); | ||
1080 | return -EIO; | ||
1081 | } | ||
1082 | return 0; | ||
1083 | } | ||
1084 | |||
1085 | int reg_w1(struct gspca_dev *gspca_dev, u16 reg, const u8 value) | ||
1086 | { | ||
1087 | u8 data[1] = {value}; | ||
1088 | return reg_w(gspca_dev, reg, data, 1); | ||
1089 | } | ||
1090 | |||
1091 | int i2c_w(struct gspca_dev *gspca_dev, const u8 *buffer) | ||
1092 | { | ||
1093 | int i; | ||
1094 | reg_w(gspca_dev, 0x10c0, buffer, 8); | ||
1095 | for (i = 0; i < 5; i++) { | ||
1096 | reg_r(gspca_dev, 0x10c0, 1); | ||
1097 | if (gspca_dev->usb_buf[0] & 0x04) { | ||
1098 | if (gspca_dev->usb_buf[0] & 0x08) | ||
1099 | return -1; | ||
1100 | return 0; | ||
1101 | } | ||
1102 | msleep(1); | ||
1103 | } | ||
1104 | return -1; | ||
1105 | } | ||
1106 | |||
1107 | int i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) | ||
1108 | { | ||
1109 | struct sd *sd = (struct sd *) gspca_dev; | ||
1110 | |||
1111 | u8 row[8]; | ||
1112 | |||
1113 | /* | ||
1114 | * from the point of view of the bridge, the length | ||
1115 | * includes the address | ||
1116 | */ | ||
1117 | row[0] = 0x81 | (2 << 4); | ||
1118 | row[1] = sd->i2c_addr; | ||
1119 | row[2] = reg; | ||
1120 | row[3] = val; | ||
1121 | row[4] = 0x00; | ||
1122 | row[5] = 0x00; | ||
1123 | row[6] = 0x00; | ||
1124 | row[7] = 0x10; | ||
1125 | |||
1126 | return i2c_w(gspca_dev, row); | ||
1127 | } | ||
1128 | |||
1129 | int i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val) | ||
1130 | { | ||
1131 | struct sd *sd = (struct sd *) gspca_dev; | ||
1132 | u8 row[8]; | ||
1133 | |||
1134 | /* | ||
1135 | * from the point of view of the bridge, the length | ||
1136 | * includes the address | ||
1137 | */ | ||
1138 | row[0] = 0x81 | (3 << 4); | ||
1139 | row[1] = sd->i2c_addr; | ||
1140 | row[2] = reg; | ||
1141 | row[3] = (val >> 8) & 0xff; | ||
1142 | row[4] = val & 0xff; | ||
1143 | row[5] = 0x00; | ||
1144 | row[6] = 0x00; | ||
1145 | row[7] = 0x10; | ||
1146 | |||
1147 | return i2c_w(gspca_dev, row); | ||
1148 | } | ||
1149 | |||
1150 | int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val) | ||
1151 | { | ||
1152 | struct sd *sd = (struct sd *) gspca_dev; | ||
1153 | u8 row[8]; | ||
1154 | |||
1155 | row[0] = 0x81 | 0x10; | ||
1156 | row[1] = sd->i2c_addr; | ||
1157 | row[2] = reg; | ||
1158 | row[3] = 0; | ||
1159 | row[4] = 0; | ||
1160 | row[5] = 0; | ||
1161 | row[6] = 0; | ||
1162 | row[7] = 0x10; | ||
1163 | reg_w(gspca_dev, 0x10c0, row, 8); | ||
1164 | msleep(1); | ||
1165 | row[0] = 0x81 | (2 << 4) | 0x02; | ||
1166 | row[2] = 0; | ||
1167 | reg_w(gspca_dev, 0x10c0, row, 8); | ||
1168 | msleep(1); | ||
1169 | reg_r(gspca_dev, 0x10c2, 5); | ||
1170 | *val = gspca_dev->usb_buf[3]; | ||
1171 | return 0; | ||
1172 | } | ||
1173 | |||
1174 | int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val) | ||
1175 | { | ||
1176 | struct sd *sd = (struct sd *) gspca_dev; | ||
1177 | u8 row[8]; | ||
1178 | |||
1179 | row[0] = 0x81 | 0x10; | ||
1180 | row[1] = sd->i2c_addr; | ||
1181 | row[2] = reg; | ||
1182 | row[3] = 0; | ||
1183 | row[4] = 0; | ||
1184 | row[5] = 0; | ||
1185 | row[6] = 0; | ||
1186 | row[7] = 0x10; | ||
1187 | reg_w(gspca_dev, 0x10c0, row, 8); | ||
1188 | msleep(1); | ||
1189 | row[0] = 0x81 | (3 << 4) | 0x02; | ||
1190 | row[2] = 0; | ||
1191 | reg_w(gspca_dev, 0x10c0, row, 8); | ||
1192 | msleep(1); | ||
1193 | reg_r(gspca_dev, 0x10c2, 5); | ||
1194 | *val = (gspca_dev->usb_buf[2] << 8) | gspca_dev->usb_buf[3]; | ||
1195 | return 0; | ||
1196 | } | ||
1197 | |||
1198 | static int ov9650_init_sensor(struct gspca_dev *gspca_dev) | ||
1199 | { | ||
1200 | int i; | ||
1201 | struct sd *sd = (struct sd *) gspca_dev; | ||
1202 | |||
1203 | for (i = 0; i < ARRAY_SIZE(ov9650_init); i++) { | ||
1204 | if (i2c_w1(gspca_dev, ov9650_init[i][0], | ||
1205 | ov9650_init[i][1]) < 0) { | ||
1206 | err("OV9650 sensor initialization failed"); | ||
1207 | return -ENODEV; | ||
1208 | } | ||
1209 | } | ||
1210 | sd->hstart = 1; | ||
1211 | sd->vstart = 7; | ||
1212 | return 0; | ||
1213 | } | ||
1214 | |||
1215 | static int ov9655_init_sensor(struct gspca_dev *gspca_dev) | ||
1216 | { | ||
1217 | int i; | ||
1218 | struct sd *sd = (struct sd *) gspca_dev; | ||
1219 | |||
1220 | for (i = 0; i < ARRAY_SIZE(ov9655_init); i++) { | ||
1221 | if (i2c_w1(gspca_dev, ov9655_init[i][0], | ||
1222 | ov9655_init[i][1]) < 0) { | ||
1223 | err("OV9655 sensor initialization failed"); | ||
1224 | return -ENODEV; | ||
1225 | } | ||
1226 | } | ||
1227 | /* disable hflip and vflip */ | ||
1228 | gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); | ||
1229 | sd->hstart = 0; | ||
1230 | sd->vstart = 7; | ||
1231 | return 0; | ||
1232 | } | ||
1233 | |||
1234 | static int soi968_init_sensor(struct gspca_dev *gspca_dev) | ||
1235 | { | ||
1236 | int i; | ||
1237 | struct sd *sd = (struct sd *) gspca_dev; | ||
1238 | |||
1239 | for (i = 0; i < ARRAY_SIZE(soi968_init); i++) { | ||
1240 | if (i2c_w1(gspca_dev, soi968_init[i][0], | ||
1241 | soi968_init[i][1]) < 0) { | ||
1242 | err("SOI968 sensor initialization failed"); | ||
1243 | return -ENODEV; | ||
1244 | } | ||
1245 | } | ||
1246 | /* disable hflip and vflip */ | ||
1247 | gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); | ||
1248 | sd->hstart = 60; | ||
1249 | sd->vstart = 11; | ||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | static int ov7660_init_sensor(struct gspca_dev *gspca_dev) | ||
1254 | { | ||
1255 | int i; | ||
1256 | struct sd *sd = (struct sd *) gspca_dev; | ||
1257 | |||
1258 | for (i = 0; i < ARRAY_SIZE(ov7660_init); i++) { | ||
1259 | if (i2c_w1(gspca_dev, ov7660_init[i][0], | ||
1260 | ov7660_init[i][1]) < 0) { | ||
1261 | err("OV7660 sensor initialization failed"); | ||
1262 | return -ENODEV; | ||
1263 | } | ||
1264 | } | ||
1265 | /* disable hflip and vflip */ | ||
1266 | gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); | ||
1267 | sd->hstart = 1; | ||
1268 | sd->vstart = 1; | ||
1269 | return 0; | ||
1270 | } | ||
1271 | |||
1272 | static int ov7670_init_sensor(struct gspca_dev *gspca_dev) | ||
1273 | { | ||
1274 | int i; | ||
1275 | struct sd *sd = (struct sd *) gspca_dev; | ||
1276 | |||
1277 | for (i = 0; i < ARRAY_SIZE(ov7670_init); i++) { | ||
1278 | if (i2c_w1(gspca_dev, ov7670_init[i][0], | ||
1279 | ov7670_init[i][1]) < 0) { | ||
1280 | err("OV7670 sensor initialization failed"); | ||
1281 | return -ENODEV; | ||
1282 | } | ||
1283 | } | ||
1284 | /* disable hflip and vflip */ | ||
1285 | gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); | ||
1286 | sd->hstart = 0; | ||
1287 | sd->vstart = 1; | ||
1288 | return 0; | ||
1289 | } | ||
1290 | |||
1291 | static int mt9v_init_sensor(struct gspca_dev *gspca_dev) | ||
1292 | { | ||
1293 | struct sd *sd = (struct sd *) gspca_dev; | ||
1294 | int i; | ||
1295 | u16 value; | ||
1296 | int ret; | ||
1297 | |||
1298 | sd->i2c_addr = 0x5d; | ||
1299 | ret = i2c_r2(gspca_dev, 0xff, &value); | ||
1300 | if ((ret == 0) && (value == 0x8243)) { | ||
1301 | for (i = 0; i < ARRAY_SIZE(mt9v011_init); i++) { | ||
1302 | if (i2c_w2(gspca_dev, mt9v011_init[i][0], | ||
1303 | mt9v011_init[i][1]) < 0) { | ||
1304 | err("MT9V011 sensor initialization failed"); | ||
1305 | return -ENODEV; | ||
1306 | } | ||
1307 | } | ||
1308 | sd->hstart = 2; | ||
1309 | sd->vstart = 2; | ||
1310 | sd->sensor = SENSOR_MT9V011; | ||
1311 | info("MT9V011 sensor detected"); | ||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | sd->i2c_addr = 0x5c; | ||
1316 | i2c_w2(gspca_dev, 0x01, 0x0004); | ||
1317 | ret = i2c_r2(gspca_dev, 0xff, &value); | ||
1318 | if ((ret == 0) && (value == 0x823a)) { | ||
1319 | for (i = 0; i < ARRAY_SIZE(mt9v111_init); i++) { | ||
1320 | if (i2c_w2(gspca_dev, mt9v111_init[i][0], | ||
1321 | mt9v111_init[i][1]) < 0) { | ||
1322 | err("MT9V111 sensor initialization failed"); | ||
1323 | return -ENODEV; | ||
1324 | } | ||
1325 | } | ||
1326 | sd->hstart = 2; | ||
1327 | sd->vstart = 2; | ||
1328 | sd->sensor = SENSOR_MT9V111; | ||
1329 | info("MT9V111 sensor detected"); | ||
1330 | return 0; | ||
1331 | } | ||
1332 | |||
1333 | sd->i2c_addr = 0x5d; | ||
1334 | ret = i2c_w2(gspca_dev, 0xf0, 0x0000); | ||
1335 | if (ret < 0) { | ||
1336 | sd->i2c_addr = 0x48; | ||
1337 | i2c_w2(gspca_dev, 0xf0, 0x0000); | ||
1338 | } | ||
1339 | ret = i2c_r2(gspca_dev, 0x00, &value); | ||
1340 | if ((ret == 0) && (value == 0x1229)) { | ||
1341 | for (i = 0; i < ARRAY_SIZE(mt9v112_init); i++) { | ||
1342 | if (i2c_w2(gspca_dev, mt9v112_init[i][0], | ||
1343 | mt9v112_init[i][1]) < 0) { | ||
1344 | err("MT9V112 sensor initialization failed"); | ||
1345 | return -ENODEV; | ||
1346 | } | ||
1347 | } | ||
1348 | sd->hstart = 6; | ||
1349 | sd->vstart = 2; | ||
1350 | sd->sensor = SENSOR_MT9V112; | ||
1351 | info("MT9V112 sensor detected"); | ||
1352 | return 0; | ||
1353 | } | ||
1354 | |||
1355 | return -ENODEV; | ||
1356 | } | ||
1357 | |||
1358 | static int mt9m111_init_sensor(struct gspca_dev *gspca_dev) | ||
1359 | { | ||
1360 | struct sd *sd = (struct sd *) gspca_dev; | ||
1361 | int i; | ||
1362 | for (i = 0; i < ARRAY_SIZE(mt9m111_init); i++) { | ||
1363 | if (i2c_w2(gspca_dev, mt9m111_init[i][0], | ||
1364 | mt9m111_init[i][1]) < 0) { | ||
1365 | err("MT9M111 sensor initialization failed"); | ||
1366 | return -ENODEV; | ||
1367 | } | ||
1368 | } | ||
1369 | sd->hstart = 0; | ||
1370 | sd->vstart = 2; | ||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | static int mt9m001_init_sensor(struct gspca_dev *gspca_dev) | ||
1375 | { | ||
1376 | struct sd *sd = (struct sd *) gspca_dev; | ||
1377 | int i; | ||
1378 | for (i = 0; i < ARRAY_SIZE(mt9m001_init); i++) { | ||
1379 | if (i2c_w2(gspca_dev, mt9m001_init[i][0], | ||
1380 | mt9m001_init[i][1]) < 0) { | ||
1381 | err("MT9M001 sensor initialization failed"); | ||
1382 | return -ENODEV; | ||
1383 | } | ||
1384 | } | ||
1385 | /* disable hflip and vflip */ | ||
1386 | gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); | ||
1387 | sd->hstart = 2; | ||
1388 | sd->vstart = 2; | ||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1392 | static int hv7131r_init_sensor(struct gspca_dev *gspca_dev) | ||
1393 | { | ||
1394 | int i; | ||
1395 | struct sd *sd = (struct sd *) gspca_dev; | ||
1396 | |||
1397 | for (i = 0; i < ARRAY_SIZE(hv7131r_init); i++) { | ||
1398 | if (i2c_w1(gspca_dev, hv7131r_init[i][0], | ||
1399 | hv7131r_init[i][1]) < 0) { | ||
1400 | err("HV7131R Sensor initialization failed"); | ||
1401 | return -ENODEV; | ||
1402 | } | ||
1403 | } | ||
1404 | sd->hstart = 0; | ||
1405 | sd->vstart = 1; | ||
1406 | return 0; | ||
1407 | } | ||
1408 | |||
1409 | #ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV | ||
1410 | static int input_kthread(void *data) | ||
1411 | { | ||
1412 | struct gspca_dev *gspca_dev = (struct gspca_dev *)data; | ||
1413 | struct sd *sd = (struct sd *) gspca_dev; | ||
1414 | |||
1415 | DECLARE_WAIT_QUEUE_HEAD(wait); | ||
1416 | set_freezable(); | ||
1417 | for (;;) { | ||
1418 | if (kthread_should_stop()) | ||
1419 | break; | ||
1420 | |||
1421 | if (reg_r(gspca_dev, 0x1005, 1) < 0) | ||
1422 | continue; | ||
1423 | |||
1424 | input_report_key(sd->input_dev, | ||
1425 | KEY_CAMERA, | ||
1426 | gspca_dev->usb_buf[0] & sd->input_gpio); | ||
1427 | input_sync(sd->input_dev); | ||
1428 | |||
1429 | wait_event_freezable_timeout(wait, | ||
1430 | kthread_should_stop(), | ||
1431 | msecs_to_jiffies(100)); | ||
1432 | } | ||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | |||
1437 | static int sn9c20x_input_init(struct gspca_dev *gspca_dev) | ||
1438 | { | ||
1439 | struct sd *sd = (struct sd *) gspca_dev; | ||
1440 | if (sd->input_gpio == 0) | ||
1441 | return 0; | ||
1442 | |||
1443 | sd->input_dev = input_allocate_device(); | ||
1444 | if (!sd->input_dev) | ||
1445 | return -ENOMEM; | ||
1446 | |||
1447 | sd->input_dev->name = "SN9C20X Webcam"; | ||
1448 | |||
1449 | sd->input_dev->phys = kasprintf(GFP_KERNEL, "usb-%s-%s", | ||
1450 | gspca_dev->dev->bus->bus_name, | ||
1451 | gspca_dev->dev->devpath); | ||
1452 | |||
1453 | if (!sd->input_dev->phys) | ||
1454 | return -ENOMEM; | ||
1455 | |||
1456 | usb_to_input_id(gspca_dev->dev, &sd->input_dev->id); | ||
1457 | sd->input_dev->dev.parent = &gspca_dev->dev->dev; | ||
1458 | |||
1459 | set_bit(EV_KEY, sd->input_dev->evbit); | ||
1460 | set_bit(KEY_CAMERA, sd->input_dev->keybit); | ||
1461 | |||
1462 | if (input_register_device(sd->input_dev)) | ||
1463 | return -EINVAL; | ||
1464 | |||
1465 | sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%d", | ||
1466 | gspca_dev->vdev.minor); | ||
1467 | |||
1468 | if (IS_ERR(sd->input_task)) | ||
1469 | return -EINVAL; | ||
1470 | |||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | static void sn9c20x_input_cleanup(struct gspca_dev *gspca_dev) | ||
1475 | { | ||
1476 | struct sd *sd = (struct sd *) gspca_dev; | ||
1477 | if (sd->input_task != NULL && !IS_ERR(sd->input_task)) | ||
1478 | kthread_stop(sd->input_task); | ||
1479 | |||
1480 | if (sd->input_dev != NULL) { | ||
1481 | input_unregister_device(sd->input_dev); | ||
1482 | kfree(sd->input_dev->phys); | ||
1483 | input_free_device(sd->input_dev); | ||
1484 | sd->input_dev = NULL; | ||
1485 | } | ||
1486 | } | ||
1487 | #endif | ||
1488 | |||
1489 | static int set_cmatrix(struct gspca_dev *gspca_dev) | ||
1490 | { | ||
1491 | struct sd *sd = (struct sd *) gspca_dev; | ||
1492 | s32 hue_coord, hue_index = 180 + sd->hue; | ||
1493 | u8 cmatrix[21]; | ||
1494 | memset(cmatrix, 0, 21); | ||
1495 | |||
1496 | cmatrix[2] = (sd->contrast * 0x25 / 0x100) + 0x26; | ||
1497 | cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25; | ||
1498 | cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25; | ||
1499 | cmatrix[18] = sd->brightness - 0x80; | ||
1500 | |||
1501 | hue_coord = (hsv_red_x[hue_index] * sd->saturation) >> 8; | ||
1502 | cmatrix[6] = (unsigned char)(hue_coord & 0xff); | ||
1503 | cmatrix[7] = (unsigned char)((hue_coord >> 8) & 0x0f); | ||
1504 | |||
1505 | hue_coord = (hsv_red_y[hue_index] * sd->saturation) >> 8; | ||
1506 | cmatrix[8] = (unsigned char)(hue_coord & 0xff); | ||
1507 | cmatrix[9] = (unsigned char)((hue_coord >> 8) & 0x0f); | ||
1508 | |||
1509 | hue_coord = (hsv_green_x[hue_index] * sd->saturation) >> 8; | ||
1510 | cmatrix[10] = (unsigned char)(hue_coord & 0xff); | ||
1511 | cmatrix[11] = (unsigned char)((hue_coord >> 8) & 0x0f); | ||
1512 | |||
1513 | hue_coord = (hsv_green_y[hue_index] * sd->saturation) >> 8; | ||
1514 | cmatrix[12] = (unsigned char)(hue_coord & 0xff); | ||
1515 | cmatrix[13] = (unsigned char)((hue_coord >> 8) & 0x0f); | ||
1516 | |||
1517 | hue_coord = (hsv_blue_x[hue_index] * sd->saturation) >> 8; | ||
1518 | cmatrix[14] = (unsigned char)(hue_coord & 0xff); | ||
1519 | cmatrix[15] = (unsigned char)((hue_coord >> 8) & 0x0f); | ||
1520 | |||
1521 | hue_coord = (hsv_blue_y[hue_index] * sd->saturation) >> 8; | ||
1522 | cmatrix[16] = (unsigned char)(hue_coord & 0xff); | ||
1523 | cmatrix[17] = (unsigned char)((hue_coord >> 8) & 0x0f); | ||
1524 | |||
1525 | return reg_w(gspca_dev, 0x10e1, cmatrix, 21); | ||
1526 | } | ||
1527 | |||
1528 | static int set_gamma(struct gspca_dev *gspca_dev) | ||
1529 | { | ||
1530 | struct sd *sd = (struct sd *) gspca_dev; | ||
1531 | u8 gamma[17]; | ||
1532 | u8 gval = sd->gamma * 0xb8 / 0x100; | ||
1533 | |||
1534 | |||
1535 | gamma[0] = 0x0a; | ||
1536 | gamma[1] = 0x13 + (gval * (0xcb - 0x13) / 0xb8); | ||
1537 | gamma[2] = 0x25 + (gval * (0xee - 0x25) / 0xb8); | ||
1538 | gamma[3] = 0x37 + (gval * (0xfa - 0x37) / 0xb8); | ||
1539 | gamma[4] = 0x45 + (gval * (0xfc - 0x45) / 0xb8); | ||
1540 | gamma[5] = 0x55 + (gval * (0xfb - 0x55) / 0xb8); | ||
1541 | gamma[6] = 0x65 + (gval * (0xfc - 0x65) / 0xb8); | ||
1542 | gamma[7] = 0x74 + (gval * (0xfd - 0x74) / 0xb8); | ||
1543 | gamma[8] = 0x83 + (gval * (0xfe - 0x83) / 0xb8); | ||
1544 | gamma[9] = 0x92 + (gval * (0xfc - 0x92) / 0xb8); | ||
1545 | gamma[10] = 0xa1 + (gval * (0xfc - 0xa1) / 0xb8); | ||
1546 | gamma[11] = 0xb0 + (gval * (0xfc - 0xb0) / 0xb8); | ||
1547 | gamma[12] = 0xbf + (gval * (0xfb - 0xbf) / 0xb8); | ||
1548 | gamma[13] = 0xce + (gval * (0xfb - 0xce) / 0xb8); | ||
1549 | gamma[14] = 0xdf + (gval * (0xfd - 0xdf) / 0xb8); | ||
1550 | gamma[15] = 0xea + (gval * (0xf9 - 0xea) / 0xb8); | ||
1551 | gamma[16] = 0xf5; | ||
1552 | |||
1553 | return reg_w(gspca_dev, 0x1190, gamma, 17); | ||
1554 | } | ||
1555 | |||
1556 | static int set_redblue(struct gspca_dev *gspca_dev) | ||
1557 | { | ||
1558 | struct sd *sd = (struct sd *) gspca_dev; | ||
1559 | reg_w1(gspca_dev, 0x118c, sd->red); | ||
1560 | reg_w1(gspca_dev, 0x118f, sd->blue); | ||
1561 | return 0; | ||
1562 | } | ||
1563 | |||
1564 | static int set_hvflip(struct gspca_dev *gspca_dev) | ||
1565 | { | ||
1566 | u8 value, tslb; | ||
1567 | u16 value2; | ||
1568 | struct sd *sd = (struct sd *) gspca_dev; | ||
1569 | switch (sd->sensor) { | ||
1570 | case SENSOR_OV9650: | ||
1571 | i2c_r1(gspca_dev, 0x1e, &value); | ||
1572 | value &= ~0x30; | ||
1573 | tslb = 0x01; | ||
1574 | if (sd->hflip) | ||
1575 | value |= 0x20; | ||
1576 | if (sd->vflip) { | ||
1577 | value |= 0x10; | ||
1578 | tslb = 0x49; | ||
1579 | } | ||
1580 | i2c_w1(gspca_dev, 0x1e, value); | ||
1581 | i2c_w1(gspca_dev, 0x3a, tslb); | ||
1582 | break; | ||
1583 | case SENSOR_MT9V111: | ||
1584 | case SENSOR_MT9V011: | ||
1585 | i2c_r2(gspca_dev, 0x20, &value2); | ||
1586 | value2 &= ~0xc0a0; | ||
1587 | if (sd->hflip) | ||
1588 | value2 |= 0x8080; | ||
1589 | if (sd->vflip) | ||
1590 | value2 |= 0x4020; | ||
1591 | i2c_w2(gspca_dev, 0x20, value2); | ||
1592 | break; | ||
1593 | case SENSOR_MT9M111: | ||
1594 | case SENSOR_MT9V112: | ||
1595 | i2c_r2(gspca_dev, 0x20, &value2); | ||
1596 | value2 &= ~0x0003; | ||
1597 | if (sd->hflip) | ||
1598 | value2 |= 0x0002; | ||
1599 | if (sd->vflip) | ||
1600 | value2 |= 0x0001; | ||
1601 | i2c_w2(gspca_dev, 0x20, value2); | ||
1602 | break; | ||
1603 | case SENSOR_HV7131R: | ||
1604 | i2c_r1(gspca_dev, 0x01, &value); | ||
1605 | value &= ~0x03; | ||
1606 | if (sd->vflip) | ||
1607 | value |= 0x01; | ||
1608 | if (sd->hflip) | ||
1609 | value |= 0x02; | ||
1610 | i2c_w1(gspca_dev, 0x01, value); | ||
1611 | break; | ||
1612 | } | ||
1613 | return 0; | ||
1614 | } | ||
1615 | |||
1616 | static int set_exposure(struct gspca_dev *gspca_dev) | ||
1617 | { | ||
1618 | struct sd *sd = (struct sd *) gspca_dev; | ||
1619 | u8 exp[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e}; | ||
1620 | switch (sd->sensor) { | ||
1621 | case SENSOR_OV7660: | ||
1622 | case SENSOR_OV7670: | ||
1623 | case SENSOR_SOI968: | ||
1624 | case SENSOR_OV9655: | ||
1625 | case SENSOR_OV9650: | ||
1626 | exp[0] |= (3 << 4); | ||
1627 | exp[2] = 0x2d; | ||
1628 | exp[3] = sd->exposure & 0xff; | ||
1629 | exp[4] = sd->exposure >> 8; | ||
1630 | break; | ||
1631 | case SENSOR_MT9M001: | ||
1632 | case SENSOR_MT9M111: | ||
1633 | case SENSOR_MT9V112: | ||
1634 | case SENSOR_MT9V111: | ||
1635 | case SENSOR_MT9V011: | ||
1636 | exp[0] |= (3 << 4); | ||
1637 | exp[2] = 0x09; | ||
1638 | exp[3] = sd->exposure >> 8; | ||
1639 | exp[4] = sd->exposure & 0xff; | ||
1640 | break; | ||
1641 | case SENSOR_HV7131R: | ||
1642 | exp[0] |= (4 << 4); | ||
1643 | exp[2] = 0x25; | ||
1644 | exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16; | ||
1645 | exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8; | ||
1646 | exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff; | ||
1647 | break; | ||
1648 | } | ||
1649 | i2c_w(gspca_dev, exp); | ||
1650 | return 0; | ||
1651 | } | ||
1652 | |||
1653 | static int set_gain(struct gspca_dev *gspca_dev) | ||
1654 | { | ||
1655 | struct sd *sd = (struct sd *) gspca_dev; | ||
1656 | u8 gain[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d}; | ||
1657 | switch (sd->sensor) { | ||
1658 | case SENSOR_OV7660: | ||
1659 | case SENSOR_OV7670: | ||
1660 | case SENSOR_SOI968: | ||
1661 | case SENSOR_OV9655: | ||
1662 | case SENSOR_OV9650: | ||
1663 | gain[0] |= (2 << 4); | ||
1664 | gain[3] = ov_gain[sd->gain]; | ||
1665 | break; | ||
1666 | case SENSOR_MT9V011: | ||
1667 | case SENSOR_MT9V111: | ||
1668 | gain[0] |= (3 << 4); | ||
1669 | gain[2] = 0x35; | ||
1670 | gain[3] = micron1_gain[sd->gain] >> 8; | ||
1671 | gain[4] = micron1_gain[sd->gain] & 0xff; | ||
1672 | break; | ||
1673 | case SENSOR_MT9V112: | ||
1674 | case SENSOR_MT9M111: | ||
1675 | gain[0] |= (3 << 4); | ||
1676 | gain[2] = 0x2f; | ||
1677 | gain[3] = micron1_gain[sd->gain] >> 8; | ||
1678 | gain[4] = micron1_gain[sd->gain] & 0xff; | ||
1679 | break; | ||
1680 | case SENSOR_MT9M001: | ||
1681 | gain[0] |= (3 << 4); | ||
1682 | gain[2] = 0x2f; | ||
1683 | gain[3] = micron2_gain[sd->gain] >> 8; | ||
1684 | gain[4] = micron2_gain[sd->gain] & 0xff; | ||
1685 | break; | ||
1686 | case SENSOR_HV7131R: | ||
1687 | gain[0] |= (2 << 4); | ||
1688 | gain[2] = 0x30; | ||
1689 | gain[3] = hv7131r_gain[sd->gain]; | ||
1690 | break; | ||
1691 | } | ||
1692 | i2c_w(gspca_dev, gain); | ||
1693 | return 0; | ||
1694 | } | ||
1695 | |||
1696 | static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val) | ||
1697 | { | ||
1698 | struct sd *sd = (struct sd *) gspca_dev; | ||
1699 | |||
1700 | sd->brightness = val; | ||
1701 | if (gspca_dev->streaming) | ||
1702 | return set_cmatrix(gspca_dev); | ||
1703 | return 0; | ||
1704 | } | ||
1705 | |||
1706 | static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val) | ||
1707 | { | ||
1708 | struct sd *sd = (struct sd *) gspca_dev; | ||
1709 | *val = sd->brightness; | ||
1710 | return 0; | ||
1711 | } | ||
1712 | |||
1713 | |||
1714 | static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val) | ||
1715 | { | ||
1716 | struct sd *sd = (struct sd *) gspca_dev; | ||
1717 | |||
1718 | sd->contrast = val; | ||
1719 | if (gspca_dev->streaming) | ||
1720 | return set_cmatrix(gspca_dev); | ||
1721 | return 0; | ||
1722 | } | ||
1723 | |||
1724 | static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val) | ||
1725 | { | ||
1726 | struct sd *sd = (struct sd *) gspca_dev; | ||
1727 | *val = sd->contrast; | ||
1728 | return 0; | ||
1729 | } | ||
1730 | |||
1731 | static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val) | ||
1732 | { | ||
1733 | struct sd *sd = (struct sd *) gspca_dev; | ||
1734 | |||
1735 | sd->saturation = val; | ||
1736 | if (gspca_dev->streaming) | ||
1737 | return set_cmatrix(gspca_dev); | ||
1738 | return 0; | ||
1739 | } | ||
1740 | |||
1741 | static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val) | ||
1742 | { | ||
1743 | struct sd *sd = (struct sd *) gspca_dev; | ||
1744 | *val = sd->saturation; | ||
1745 | return 0; | ||
1746 | } | ||
1747 | |||
1748 | static int sd_sethue(struct gspca_dev *gspca_dev, s32 val) | ||
1749 | { | ||
1750 | struct sd *sd = (struct sd *) gspca_dev; | ||
1751 | |||
1752 | sd->hue = val; | ||
1753 | if (gspca_dev->streaming) | ||
1754 | return set_cmatrix(gspca_dev); | ||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val) | ||
1759 | { | ||
1760 | struct sd *sd = (struct sd *) gspca_dev; | ||
1761 | *val = sd->hue; | ||
1762 | return 0; | ||
1763 | } | ||
1764 | |||
1765 | static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val) | ||
1766 | { | ||
1767 | struct sd *sd = (struct sd *) gspca_dev; | ||
1768 | |||
1769 | sd->gamma = val; | ||
1770 | if (gspca_dev->streaming) | ||
1771 | return set_gamma(gspca_dev); | ||
1772 | return 0; | ||
1773 | } | ||
1774 | |||
1775 | static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val) | ||
1776 | { | ||
1777 | struct sd *sd = (struct sd *) gspca_dev; | ||
1778 | *val = sd->gamma; | ||
1779 | return 0; | ||
1780 | } | ||
1781 | |||
1782 | static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val) | ||
1783 | { | ||
1784 | struct sd *sd = (struct sd *) gspca_dev; | ||
1785 | |||
1786 | sd->red = val; | ||
1787 | if (gspca_dev->streaming) | ||
1788 | return set_redblue(gspca_dev); | ||
1789 | return 0; | ||
1790 | } | ||
1791 | |||
1792 | static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val) | ||
1793 | { | ||
1794 | struct sd *sd = (struct sd *) gspca_dev; | ||
1795 | *val = sd->red; | ||
1796 | return 0; | ||
1797 | } | ||
1798 | |||
1799 | static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val) | ||
1800 | { | ||
1801 | struct sd *sd = (struct sd *) gspca_dev; | ||
1802 | |||
1803 | sd->blue = val; | ||
1804 | if (gspca_dev->streaming) | ||
1805 | return set_redblue(gspca_dev); | ||
1806 | return 0; | ||
1807 | } | ||
1808 | |||
1809 | static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val) | ||
1810 | { | ||
1811 | struct sd *sd = (struct sd *) gspca_dev; | ||
1812 | *val = sd->blue; | ||
1813 | return 0; | ||
1814 | } | ||
1815 | |||
1816 | static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val) | ||
1817 | { | ||
1818 | struct sd *sd = (struct sd *) gspca_dev; | ||
1819 | |||
1820 | sd->hflip = val; | ||
1821 | if (gspca_dev->streaming) | ||
1822 | return set_hvflip(gspca_dev); | ||
1823 | return 0; | ||
1824 | } | ||
1825 | |||
1826 | static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val) | ||
1827 | { | ||
1828 | struct sd *sd = (struct sd *) gspca_dev; | ||
1829 | *val = sd->hflip; | ||
1830 | return 0; | ||
1831 | } | ||
1832 | |||
1833 | static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val) | ||
1834 | { | ||
1835 | struct sd *sd = (struct sd *) gspca_dev; | ||
1836 | |||
1837 | sd->vflip = val; | ||
1838 | if (gspca_dev->streaming) | ||
1839 | return set_hvflip(gspca_dev); | ||
1840 | return 0; | ||
1841 | } | ||
1842 | |||
1843 | static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val) | ||
1844 | { | ||
1845 | struct sd *sd = (struct sd *) gspca_dev; | ||
1846 | *val = sd->vflip; | ||
1847 | return 0; | ||
1848 | } | ||
1849 | |||
1850 | static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val) | ||
1851 | { | ||
1852 | struct sd *sd = (struct sd *) gspca_dev; | ||
1853 | |||
1854 | sd->exposure = val; | ||
1855 | if (gspca_dev->streaming) | ||
1856 | return set_exposure(gspca_dev); | ||
1857 | return 0; | ||
1858 | } | ||
1859 | |||
1860 | static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val) | ||
1861 | { | ||
1862 | struct sd *sd = (struct sd *) gspca_dev; | ||
1863 | *val = sd->exposure; | ||
1864 | return 0; | ||
1865 | } | ||
1866 | |||
1867 | static int sd_setgain(struct gspca_dev *gspca_dev, s32 val) | ||
1868 | { | ||
1869 | struct sd *sd = (struct sd *) gspca_dev; | ||
1870 | |||
1871 | sd->gain = val; | ||
1872 | if (gspca_dev->streaming) | ||
1873 | return set_gain(gspca_dev); | ||
1874 | return 0; | ||
1875 | } | ||
1876 | |||
1877 | static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val) | ||
1878 | { | ||
1879 | struct sd *sd = (struct sd *) gspca_dev; | ||
1880 | *val = sd->gain; | ||
1881 | return 0; | ||
1882 | } | ||
1883 | |||
1884 | static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val) | ||
1885 | { | ||
1886 | struct sd *sd = (struct sd *) gspca_dev; | ||
1887 | sd->auto_exposure = val; | ||
1888 | return 0; | ||
1889 | } | ||
1890 | |||
1891 | static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val) | ||
1892 | { | ||
1893 | struct sd *sd = (struct sd *) gspca_dev; | ||
1894 | *val = sd->auto_exposure; | ||
1895 | return 0; | ||
1896 | } | ||
1897 | |||
1898 | #ifdef CONFIG_VIDEO_ADV_DEBUG | ||
1899 | static int sd_dbg_g_register(struct gspca_dev *gspca_dev, | ||
1900 | struct v4l2_dbg_register *reg) | ||
1901 | { | ||
1902 | struct sd *sd = (struct sd *) gspca_dev; | ||
1903 | switch (reg->match.type) { | ||
1904 | case V4L2_CHIP_MATCH_HOST: | ||
1905 | if (reg->match.addr != 0) | ||
1906 | return -EINVAL; | ||
1907 | if (reg->reg < 0x1000 || reg->reg > 0x11ff) | ||
1908 | return -EINVAL; | ||
1909 | if (reg_r(gspca_dev, reg->reg, 1) < 0) | ||
1910 | return -EINVAL; | ||
1911 | reg->val = gspca_dev->usb_buf[0]; | ||
1912 | return 0; | ||
1913 | case V4L2_CHIP_MATCH_I2C_ADDR: | ||
1914 | if (reg->match.addr != sd->i2c_addr) | ||
1915 | return -EINVAL; | ||
1916 | if (sd->sensor >= SENSOR_MT9V011 && | ||
1917 | sd->sensor <= SENSOR_MT9M111) { | ||
1918 | if (i2c_r2(gspca_dev, reg->reg, (u16 *)®->val) < 0) | ||
1919 | return -EINVAL; | ||
1920 | } else { | ||
1921 | if (i2c_r1(gspca_dev, reg->reg, (u8 *)®->val) < 0) | ||
1922 | return -EINVAL; | ||
1923 | } | ||
1924 | return 0; | ||
1925 | } | ||
1926 | return -EINVAL; | ||
1927 | } | ||
1928 | |||
1929 | static int sd_dbg_s_register(struct gspca_dev *gspca_dev, | ||
1930 | struct v4l2_dbg_register *reg) | ||
1931 | { | ||
1932 | struct sd *sd = (struct sd *) gspca_dev; | ||
1933 | switch (reg->match.type) { | ||
1934 | case V4L2_CHIP_MATCH_HOST: | ||
1935 | if (reg->match.addr != 0) | ||
1936 | return -EINVAL; | ||
1937 | if (reg->reg < 0x1000 || reg->reg > 0x11ff) | ||
1938 | return -EINVAL; | ||
1939 | if (reg_w1(gspca_dev, reg->reg, reg->val) < 0) | ||
1940 | return -EINVAL; | ||
1941 | return 0; | ||
1942 | case V4L2_CHIP_MATCH_I2C_ADDR: | ||
1943 | if (reg->match.addr != sd->i2c_addr) | ||
1944 | return -EINVAL; | ||
1945 | if (sd->sensor >= SENSOR_MT9V011 && | ||
1946 | sd->sensor <= SENSOR_MT9M111) { | ||
1947 | if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0) | ||
1948 | return -EINVAL; | ||
1949 | } else { | ||
1950 | if (i2c_w1(gspca_dev, reg->reg, reg->val) < 0) | ||
1951 | return -EINVAL; | ||
1952 | } | ||
1953 | return 0; | ||
1954 | } | ||
1955 | return -EINVAL; | ||
1956 | } | ||
1957 | #endif | ||
1958 | |||
1959 | static int sd_chip_ident(struct gspca_dev *gspca_dev, | ||
1960 | struct v4l2_dbg_chip_ident *chip) | ||
1961 | { | ||
1962 | struct sd *sd = (struct sd *) gspca_dev; | ||
1963 | |||
1964 | switch (chip->match.type) { | ||
1965 | case V4L2_CHIP_MATCH_HOST: | ||
1966 | if (chip->match.addr != 0) | ||
1967 | return -EINVAL; | ||
1968 | chip->revision = 0; | ||
1969 | chip->ident = V4L2_IDENT_SN9C20X; | ||
1970 | return 0; | ||
1971 | case V4L2_CHIP_MATCH_I2C_ADDR: | ||
1972 | if (chip->match.addr != sd->i2c_addr) | ||
1973 | return -EINVAL; | ||
1974 | chip->revision = 0; | ||
1975 | chip->ident = i2c_ident[sd->sensor]; | ||
1976 | return 0; | ||
1977 | } | ||
1978 | return -EINVAL; | ||
1979 | } | ||
1980 | |||
1981 | static int sd_config(struct gspca_dev *gspca_dev, | ||
1982 | const struct usb_device_id *id) | ||
1983 | { | ||
1984 | struct sd *sd = (struct sd *) gspca_dev; | ||
1985 | struct cam *cam; | ||
1986 | |||
1987 | cam = &gspca_dev->cam; | ||
1988 | |||
1989 | sd->sensor = (id->driver_info >> 8) & 0xff; | ||
1990 | sd->i2c_addr = id->driver_info & 0xff; | ||
1991 | |||
1992 | switch (sd->sensor) { | ||
1993 | case SENSOR_OV9650: | ||
1994 | cam->cam_mode = sxga_mode; | ||
1995 | cam->nmodes = ARRAY_SIZE(sxga_mode); | ||
1996 | break; | ||
1997 | default: | ||
1998 | cam->cam_mode = vga_mode; | ||
1999 | cam->nmodes = ARRAY_SIZE(vga_mode); | ||
2000 | } | ||
2001 | |||
2002 | sd->old_step = 0; | ||
2003 | sd->older_step = 0; | ||
2004 | sd->exposure_step = 16; | ||
2005 | |||
2006 | sd->brightness = BRIGHTNESS_DEFAULT; | ||
2007 | sd->contrast = CONTRAST_DEFAULT; | ||
2008 | sd->saturation = SATURATION_DEFAULT; | ||
2009 | sd->hue = HUE_DEFAULT; | ||
2010 | sd->gamma = GAMMA_DEFAULT; | ||
2011 | sd->red = RED_DEFAULT; | ||
2012 | sd->blue = BLUE_DEFAULT; | ||
2013 | |||
2014 | sd->hflip = HFLIP_DEFAULT; | ||
2015 | sd->vflip = VFLIP_DEFAULT; | ||
2016 | sd->exposure = EXPOSURE_DEFAULT; | ||
2017 | sd->gain = GAIN_DEFAULT; | ||
2018 | sd->auto_exposure = AUTO_EXPOSURE_DEFAULT; | ||
2019 | |||
2020 | sd->quality = 95; | ||
2021 | |||
2022 | #ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV | ||
2023 | sd->input_gpio = (id->driver_info >> 16) & 0xff; | ||
2024 | if (sn9c20x_input_init(gspca_dev) < 0) | ||
2025 | return -ENODEV; | ||
2026 | #endif | ||
2027 | return 0; | ||
2028 | } | ||
2029 | |||
2030 | static int sd_init(struct gspca_dev *gspca_dev) | ||
2031 | { | ||
2032 | struct sd *sd = (struct sd *) gspca_dev; | ||
2033 | int i; | ||
2034 | u8 value; | ||
2035 | u8 i2c_init[9] = | ||
2036 | {0x80, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}; | ||
2037 | |||
2038 | for (i = 0; i < ARRAY_SIZE(bridge_init); i++) { | ||
2039 | value = bridge_init[i][1]; | ||
2040 | if (reg_w(gspca_dev, bridge_init[i][0], &value, 1) < 0) { | ||
2041 | err("Device initialization failed"); | ||
2042 | return -ENODEV; | ||
2043 | } | ||
2044 | } | ||
2045 | |||
2046 | if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) { | ||
2047 | err("Device initialization failed"); | ||
2048 | return -ENODEV; | ||
2049 | } | ||
2050 | |||
2051 | switch (sd->sensor) { | ||
2052 | case SENSOR_OV9650: | ||
2053 | if (ov9650_init_sensor(gspca_dev) < 0) | ||
2054 | return -ENODEV; | ||
2055 | info("OV9650 sensor detected"); | ||
2056 | break; | ||
2057 | case SENSOR_OV9655: | ||
2058 | if (ov9655_init_sensor(gspca_dev) < 0) | ||
2059 | return -ENODEV; | ||
2060 | info("OV9655 sensor detected"); | ||
2061 | break; | ||
2062 | case SENSOR_SOI968: | ||
2063 | if (soi968_init_sensor(gspca_dev) < 0) | ||
2064 | return -ENODEV; | ||
2065 | info("SOI968 sensor detected"); | ||
2066 | break; | ||
2067 | case SENSOR_OV7660: | ||
2068 | if (ov7660_init_sensor(gspca_dev) < 0) | ||
2069 | return -ENODEV; | ||
2070 | info("OV7660 sensor detected"); | ||
2071 | break; | ||
2072 | case SENSOR_OV7670: | ||
2073 | if (ov7670_init_sensor(gspca_dev) < 0) | ||
2074 | return -ENODEV; | ||
2075 | info("OV7670 sensor detected"); | ||
2076 | break; | ||
2077 | case SENSOR_MT9VPRB: | ||
2078 | if (mt9v_init_sensor(gspca_dev) < 0) | ||
2079 | return -ENODEV; | ||
2080 | break; | ||
2081 | case SENSOR_MT9M111: | ||
2082 | if (mt9m111_init_sensor(gspca_dev) < 0) | ||
2083 | return -ENODEV; | ||
2084 | info("MT9M111 sensor detected"); | ||
2085 | break; | ||
2086 | case SENSOR_MT9M001: | ||
2087 | if (mt9m001_init_sensor(gspca_dev) < 0) | ||
2088 | return -ENODEV; | ||
2089 | info("MT9M001 sensor detected"); | ||
2090 | break; | ||
2091 | case SENSOR_HV7131R: | ||
2092 | if (hv7131r_init_sensor(gspca_dev) < 0) | ||
2093 | return -ENODEV; | ||
2094 | info("HV7131R sensor detected"); | ||
2095 | break; | ||
2096 | default: | ||
2097 | info("Unsupported Sensor"); | ||
2098 | return -ENODEV; | ||
2099 | } | ||
2100 | |||
2101 | return 0; | ||
2102 | } | ||
2103 | |||
2104 | static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode) | ||
2105 | { | ||
2106 | struct sd *sd = (struct sd *) gspca_dev; | ||
2107 | u8 value; | ||
2108 | switch (sd->sensor) { | ||
2109 | case SENSOR_OV9650: | ||
2110 | if (mode & MODE_SXGA) { | ||
2111 | i2c_w1(gspca_dev, 0x17, 0x1b); | ||
2112 | i2c_w1(gspca_dev, 0x18, 0xbc); | ||
2113 | i2c_w1(gspca_dev, 0x19, 0x01); | ||
2114 | i2c_w1(gspca_dev, 0x1a, 0x82); | ||
2115 | i2c_r1(gspca_dev, 0x12, &value); | ||
2116 | i2c_w1(gspca_dev, 0x12, value & 0x07); | ||
2117 | } else { | ||
2118 | i2c_w1(gspca_dev, 0x17, 0x24); | ||
2119 | i2c_w1(gspca_dev, 0x18, 0xc5); | ||
2120 | i2c_w1(gspca_dev, 0x19, 0x00); | ||
2121 | i2c_w1(gspca_dev, 0x1a, 0x3c); | ||
2122 | i2c_r1(gspca_dev, 0x12, &value); | ||
2123 | i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40); | ||
2124 | } | ||
2125 | break; | ||
2126 | } | ||
2127 | } | ||
2128 | |||
2129 | #define HW_WIN(mode, hstart, vstart) \ | ||
2130 | ((const u8 []){hstart & 0xff, hstart >> 8, \ | ||
2131 | vstart & 0xff, vstart >> 8, \ | ||
2132 | (mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \ | ||
2133 | (mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)}) | ||
2134 | |||
2135 | #define CLR_WIN(width, height) \ | ||
2136 | ((const u8 [])\ | ||
2137 | {0, width >> 2, 0, height >> 1,\ | ||
2138 | ((width >> 10) & 0x01) | ((height >> 8) & 0x6)}) | ||
2139 | |||
2140 | static int sd_start(struct gspca_dev *gspca_dev) | ||
2141 | { | ||
2142 | struct sd *sd = (struct sd *) gspca_dev; | ||
2143 | int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; | ||
2144 | int width = gspca_dev->width; | ||
2145 | int height = gspca_dev->height; | ||
2146 | u8 fmt, scale = 0; | ||
2147 | |||
2148 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | ||
2149 | if (sd->jpeg_hdr == NULL) | ||
2150 | return -ENOMEM; | ||
2151 | |||
2152 | jpeg_define(sd->jpeg_hdr, height, width, | ||
2153 | 0x21); | ||
2154 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | ||
2155 | |||
2156 | if (mode & MODE_RAW) | ||
2157 | fmt = 0x2d; | ||
2158 | else if (mode & MODE_JPEG) | ||
2159 | fmt = 0x2c; | ||
2160 | else | ||
2161 | fmt = 0x2f; | ||
2162 | |||
2163 | switch (mode & 0x0f) { | ||
2164 | case 3: | ||
2165 | scale = 0xc0; | ||
2166 | info("Set 1280x1024"); | ||
2167 | break; | ||
2168 | case 2: | ||
2169 | scale = 0x80; | ||
2170 | info("Set 640x480"); | ||
2171 | break; | ||
2172 | case 1: | ||
2173 | scale = 0x90; | ||
2174 | info("Set 320x240"); | ||
2175 | break; | ||
2176 | case 0: | ||
2177 | scale = 0xa0; | ||
2178 | info("Set 160x120"); | ||
2179 | break; | ||
2180 | } | ||
2181 | |||
2182 | configure_sensor_output(gspca_dev, mode); | ||
2183 | reg_w(gspca_dev, 0x1100, sd->jpeg_hdr + JPEG_QT0_OFFSET, 64); | ||
2184 | reg_w(gspca_dev, 0x1140, sd->jpeg_hdr + JPEG_QT1_OFFSET, 64); | ||
2185 | reg_w(gspca_dev, 0x10fb, CLR_WIN(width, height), 5); | ||
2186 | reg_w(gspca_dev, 0x1180, HW_WIN(mode, sd->hstart, sd->vstart), 6); | ||
2187 | reg_w1(gspca_dev, 0x1189, scale); | ||
2188 | reg_w1(gspca_dev, 0x10e0, fmt); | ||
2189 | |||
2190 | set_cmatrix(gspca_dev); | ||
2191 | set_gamma(gspca_dev); | ||
2192 | set_redblue(gspca_dev); | ||
2193 | set_gain(gspca_dev); | ||
2194 | set_exposure(gspca_dev); | ||
2195 | set_hvflip(gspca_dev); | ||
2196 | |||
2197 | reg_r(gspca_dev, 0x1061, 1); | ||
2198 | reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02); | ||
2199 | return 0; | ||
2200 | } | ||
2201 | |||
2202 | static void sd_stopN(struct gspca_dev *gspca_dev) | ||
2203 | { | ||
2204 | reg_r(gspca_dev, 0x1061, 1); | ||
2205 | reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02); | ||
2206 | } | ||
2207 | |||
2208 | static void sd_stop0(struct gspca_dev *gspca_dev) | ||
2209 | { | ||
2210 | struct sd *sd = (struct sd *) gspca_dev; | ||
2211 | kfree(sd->jpeg_hdr); | ||
2212 | } | ||
2213 | |||
2214 | static void do_autoexposure(struct gspca_dev *gspca_dev) | ||
2215 | { | ||
2216 | struct sd *sd = (struct sd *) gspca_dev; | ||
2217 | int avg_lum, new_exp; | ||
2218 | |||
2219 | if (!sd->auto_exposure) | ||
2220 | return; | ||
2221 | |||
2222 | avg_lum = atomic_read(&sd->avg_lum); | ||
2223 | |||
2224 | /* | ||
2225 | * some hardcoded values are present | ||
2226 | * like those for maximal/minimal exposure | ||
2227 | * and exposure steps | ||
2228 | */ | ||
2229 | if (avg_lum < MIN_AVG_LUM) { | ||
2230 | if (sd->exposure > 0x1770) | ||
2231 | return; | ||
2232 | |||
2233 | new_exp = sd->exposure + sd->exposure_step; | ||
2234 | if (new_exp > 0x1770) | ||
2235 | new_exp = 0x1770; | ||
2236 | if (new_exp < 0x10) | ||
2237 | new_exp = 0x10; | ||
2238 | sd->exposure = new_exp; | ||
2239 | set_exposure(gspca_dev); | ||
2240 | |||
2241 | sd->older_step = sd->old_step; | ||
2242 | sd->old_step = 1; | ||
2243 | |||
2244 | if (sd->old_step ^ sd->older_step) | ||
2245 | sd->exposure_step /= 2; | ||
2246 | else | ||
2247 | sd->exposure_step += 2; | ||
2248 | } | ||
2249 | if (avg_lum > MAX_AVG_LUM) { | ||
2250 | if (sd->exposure < 0x10) | ||
2251 | return; | ||
2252 | new_exp = sd->exposure - sd->exposure_step; | ||
2253 | if (new_exp > 0x1700) | ||
2254 | new_exp = 0x1770; | ||
2255 | if (new_exp < 0x10) | ||
2256 | new_exp = 0x10; | ||
2257 | sd->exposure = new_exp; | ||
2258 | set_exposure(gspca_dev); | ||
2259 | sd->older_step = sd->old_step; | ||
2260 | sd->old_step = 0; | ||
2261 | |||
2262 | if (sd->old_step ^ sd->older_step) | ||
2263 | sd->exposure_step /= 2; | ||
2264 | else | ||
2265 | sd->exposure_step += 2; | ||
2266 | } | ||
2267 | } | ||
2268 | |||
2269 | static void sd_pkt_scan(struct gspca_dev *gspca_dev, | ||
2270 | struct gspca_frame *frame, /* target */ | ||
2271 | u8 *data, /* isoc packet */ | ||
2272 | int len) /* iso packet length */ | ||
2273 | { | ||
2274 | struct sd *sd = (struct sd *) gspca_dev; | ||
2275 | int avg_lum; | ||
2276 | static unsigned char frame_header[] = | ||
2277 | {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96}; | ||
2278 | if (len == 64 && memcmp(data, frame_header, 6) == 0) { | ||
2279 | avg_lum = ((data[35] >> 2) & 3) | | ||
2280 | (data[20] << 2) | | ||
2281 | (data[19] << 10); | ||
2282 | avg_lum += ((data[35] >> 4) & 3) | | ||
2283 | (data[22] << 2) | | ||
2284 | (data[21] << 10); | ||
2285 | avg_lum += ((data[35] >> 6) & 3) | | ||
2286 | (data[24] << 2) | | ||
2287 | (data[23] << 10); | ||
2288 | avg_lum += (data[36] & 3) | | ||
2289 | (data[26] << 2) | | ||
2290 | (data[25] << 10); | ||
2291 | avg_lum += ((data[36] >> 2) & 3) | | ||
2292 | (data[28] << 2) | | ||
2293 | (data[27] << 10); | ||
2294 | avg_lum += ((data[36] >> 4) & 3) | | ||
2295 | (data[30] << 2) | | ||
2296 | (data[29] << 10); | ||
2297 | avg_lum += ((data[36] >> 6) & 3) | | ||
2298 | (data[32] << 2) | | ||
2299 | (data[31] << 10); | ||
2300 | avg_lum += ((data[44] >> 4) & 3) | | ||
2301 | (data[34] << 2) | | ||
2302 | (data[33] << 10); | ||
2303 | avg_lum >>= 9; | ||
2304 | atomic_set(&sd->avg_lum, avg_lum); | ||
2305 | gspca_frame_add(gspca_dev, LAST_PACKET, | ||
2306 | frame, data, len); | ||
2307 | return; | ||
2308 | } | ||
2309 | if (gspca_dev->last_packet_type == LAST_PACKET) { | ||
2310 | if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv | ||
2311 | & MODE_JPEG) { | ||
2312 | gspca_frame_add(gspca_dev, FIRST_PACKET, frame, | ||
2313 | sd->jpeg_hdr, JPEG_HDR_SZ); | ||
2314 | gspca_frame_add(gspca_dev, INTER_PACKET, frame, | ||
2315 | data, len); | ||
2316 | } else { | ||
2317 | gspca_frame_add(gspca_dev, FIRST_PACKET, frame, | ||
2318 | data, len); | ||
2319 | } | ||
2320 | } else { | ||
2321 | gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); | ||
2322 | } | ||
2323 | } | ||
2324 | |||
2325 | /* sub-driver description */ | ||
2326 | static const struct sd_desc sd_desc = { | ||
2327 | .name = MODULE_NAME, | ||
2328 | .ctrls = sd_ctrls, | ||
2329 | .nctrls = ARRAY_SIZE(sd_ctrls), | ||
2330 | .config = sd_config, | ||
2331 | .init = sd_init, | ||
2332 | .start = sd_start, | ||
2333 | .stopN = sd_stopN, | ||
2334 | .stop0 = sd_stop0, | ||
2335 | .pkt_scan = sd_pkt_scan, | ||
2336 | .dq_callback = do_autoexposure, | ||
2337 | #ifdef CONFIG_VIDEO_ADV_DEBUG | ||
2338 | .set_register = sd_dbg_s_register, | ||
2339 | .get_register = sd_dbg_g_register, | ||
2340 | #endif | ||
2341 | .get_chip_ident = sd_chip_ident, | ||
2342 | }; | ||
2343 | |||
2344 | #define SN9C20X(sensor, i2c_addr, button_mask) \ | ||
2345 | .driver_info = (button_mask << 16) \ | ||
2346 | | (SENSOR_ ## sensor << 8) \ | ||
2347 | | (i2c_addr) | ||
2348 | |||
2349 | static const __devinitdata struct usb_device_id device_table[] = { | ||
2350 | {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, | ||
2351 | {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)}, | ||
2352 | {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, | ||
2353 | {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)}, | ||
2354 | {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)}, | ||
2355 | {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)}, | ||
2356 | {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)}, | ||
2357 | {USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)}, | ||
2358 | {USB_DEVICE(0x0c45, 0x6270), SN9C20X(MT9VPRB, 0x00, 0)}, | ||
2359 | {USB_DEVICE(0x0c45, 0x627b), SN9C20X(OV7660, 0x21, 0)}, | ||
2360 | {USB_DEVICE(0x0c45, 0x627c), SN9C20X(HV7131R, 0x11, 0)}, | ||
2361 | {USB_DEVICE(0x0c45, 0x627f), SN9C20X(OV9650, 0x30, 0)}, | ||
2362 | {USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)}, | ||
2363 | {USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)}, | ||
2364 | {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)}, | ||
2365 | {USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)}, | ||
2366 | {USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)}, | ||
2367 | {USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)}, | ||
2368 | {USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)}, | ||
2369 | {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, 0)}, | ||
2370 | {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, 0)}, | ||
2371 | {USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)}, | ||
2372 | {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)}, | ||
2373 | {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)}, | ||
2374 | {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)}, | ||
2375 | {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)}, | ||
2376 | {USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)}, | ||
2377 | {USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)}, | ||
2378 | {USB_DEVICE(0xa168, 0x0618), SN9C20X(HV7131R, 0x11, 0)}, | ||
2379 | {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, 0)}, | ||
2380 | {USB_DEVICE(0xa168, 0x0615), SN9C20X(MT9M111, 0x5d, 0)}, | ||
2381 | {USB_DEVICE(0xa168, 0x0617), SN9C20X(MT9M111, 0x5d, 0)}, | ||
2382 | {} | ||
2383 | }; | ||
2384 | MODULE_DEVICE_TABLE(usb, device_table); | ||
2385 | |||
2386 | /* -- device connect -- */ | ||
2387 | static int sd_probe(struct usb_interface *intf, | ||
2388 | const struct usb_device_id *id) | ||
2389 | { | ||
2390 | return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), | ||
2391 | THIS_MODULE); | ||
2392 | } | ||
2393 | |||
2394 | static void sd_disconnect(struct usb_interface *intf) | ||
2395 | { | ||
2396 | #ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV | ||
2397 | struct gspca_dev *gspca_dev = usb_get_intfdata(intf); | ||
2398 | |||
2399 | sn9c20x_input_cleanup(gspca_dev); | ||
2400 | #endif | ||
2401 | |||
2402 | gspca_disconnect(intf); | ||
2403 | } | ||
2404 | |||
2405 | static struct usb_driver sd_driver = { | ||
2406 | .name = MODULE_NAME, | ||
2407 | .id_table = device_table, | ||
2408 | .probe = sd_probe, | ||
2409 | .disconnect = sd_disconnect, | ||
2410 | #ifdef CONFIG_PM | ||
2411 | .suspend = gspca_suspend, | ||
2412 | .resume = gspca_resume, | ||
2413 | .reset_resume = gspca_resume, | ||
2414 | #endif | ||
2415 | }; | ||
2416 | |||
2417 | /* -- module insert / remove -- */ | ||
2418 | static int __init sd_mod_init(void) | ||
2419 | { | ||
2420 | int ret; | ||
2421 | ret = usb_register(&sd_driver); | ||
2422 | if (ret < 0) | ||
2423 | return ret; | ||
2424 | info("registered"); | ||
2425 | return 0; | ||
2426 | } | ||
2427 | static void __exit sd_mod_exit(void) | ||
2428 | { | ||
2429 | usb_deregister(&sd_driver); | ||
2430 | info("deregistered"); | ||
2431 | } | ||
2432 | |||
2433 | module_init(sd_mod_init); | ||
2434 | module_exit(sd_mod_exit); | ||
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 0d02f41fa7d0..d6332ab80669 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c | |||
@@ -1634,6 +1634,8 @@ static void setfreq(struct gspca_dev *gspca_dev) | |||
1634 | { | 1634 | { |
1635 | struct sd *sd = (struct sd *) gspca_dev; | 1635 | struct sd *sd = (struct sd *) gspca_dev; |
1636 | 1636 | ||
1637 | if (gspca_dev->ctrl_dis & (1 << FREQ_IDX)) | ||
1638 | return; | ||
1637 | if (sd->sensor == SENSOR_OV7660) { | 1639 | if (sd->sensor == SENSOR_OV7660) { |
1638 | switch (sd->freq) { | 1640 | switch (sd->freq) { |
1639 | case 0: /* Banding filter disabled */ | 1641 | case 0: /* Banding filter disabled */ |
@@ -1735,6 +1737,8 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
1735 | 1737 | ||
1736 | /* create the JPEG header */ | 1738 | /* create the JPEG header */ |
1737 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | 1739 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); |
1740 | if (!sd->jpeg_hdr) | ||
1741 | return -ENOMEM; | ||
1738 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, | 1742 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, |
1739 | 0x21); /* JPEG 422 */ | 1743 | 0x21); /* JPEG 422 */ |
1740 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | 1744 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); |
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c index 8806b2ff82be..fab7ef85a6c1 100644 --- a/drivers/media/video/gspca/spca500.c +++ b/drivers/media/video/gspca/spca500.c | |||
@@ -670,6 +670,8 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
670 | 670 | ||
671 | /* create the JPEG header */ | 671 | /* create the JPEG header */ |
672 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | 672 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); |
673 | if (!sd->jpeg_hdr) | ||
674 | return -ENOMEM; | ||
673 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, | 675 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, |
674 | 0x22); /* JPEG 411 */ | 676 | 0x22); /* JPEG 411 */ |
675 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | 677 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); |
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c index f25be20cf1a6..47628964801e 100644 --- a/drivers/media/video/gspca/stk014.c +++ b/drivers/media/video/gspca/stk014.c | |||
@@ -333,6 +333,8 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
333 | 333 | ||
334 | /* create the JPEG header */ | 334 | /* create the JPEG header */ |
335 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | 335 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); |
336 | if (!sd->jpeg_hdr) | ||
337 | return -ENOMEM; | ||
336 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, | 338 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, |
337 | 0x22); /* JPEG 411 */ | 339 | 0x22); /* JPEG 411 */ |
338 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | 340 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); |
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c index 3039ec208f3a..e5024c8496ef 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c +++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c | |||
@@ -64,7 +64,7 @@ static struct v4l2_pix_format hdcs1x00_mode[] = { | |||
64 | { | 64 | { |
65 | HDCS_1X00_DEF_WIDTH, | 65 | HDCS_1X00_DEF_WIDTH, |
66 | HDCS_1X00_DEF_HEIGHT, | 66 | HDCS_1X00_DEF_HEIGHT, |
67 | V4L2_PIX_FMT_SBGGR8, | 67 | V4L2_PIX_FMT_SGRBG8, |
68 | V4L2_FIELD_NONE, | 68 | V4L2_FIELD_NONE, |
69 | .sizeimage = | 69 | .sizeimage = |
70 | HDCS_1X00_DEF_WIDTH * HDCS_1X00_DEF_HEIGHT, | 70 | HDCS_1X00_DEF_WIDTH * HDCS_1X00_DEF_HEIGHT, |
@@ -80,7 +80,7 @@ static struct v4l2_pix_format hdcs1020_mode[] = { | |||
80 | { | 80 | { |
81 | HDCS_1020_DEF_WIDTH, | 81 | HDCS_1020_DEF_WIDTH, |
82 | HDCS_1020_DEF_HEIGHT, | 82 | HDCS_1020_DEF_HEIGHT, |
83 | V4L2_PIX_FMT_SBGGR8, | 83 | V4L2_PIX_FMT_SGRBG8, |
84 | V4L2_FIELD_NONE, | 84 | V4L2_FIELD_NONE, |
85 | .sizeimage = | 85 | .sizeimage = |
86 | HDCS_1020_DEF_WIDTH * HDCS_1020_DEF_HEIGHT, | 86 | HDCS_1020_DEF_WIDTH * HDCS_1020_DEF_HEIGHT, |
@@ -131,9 +131,11 @@ static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len) | |||
131 | (reg + len > 0xff))) | 131 | (reg + len > 0xff))) |
132 | return -EINVAL; | 132 | return -EINVAL; |
133 | 133 | ||
134 | for (i = 0; i < len; i++, reg++) { | 134 | for (i = 0; i < len; i++) { |
135 | regs[2*i] = reg; | 135 | regs[2 * i] = reg; |
136 | regs[2*i+1] = vals[i]; | 136 | regs[2 * i + 1] = vals[i]; |
137 | /* All addresses are shifted left one bit as bit 0 toggles r/w */ | ||
138 | reg += 2; | ||
137 | } | 139 | } |
138 | 140 | ||
139 | return stv06xx_write_sensor_bytes(sd, regs, len); | 141 | return stv06xx_write_sensor_bytes(sd, regs, len); |
@@ -174,7 +176,9 @@ static int hdcs_set_state(struct sd *sd, enum hdcs_power_state state) | |||
174 | } | 176 | } |
175 | 177 | ||
176 | ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), val); | 178 | ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), val); |
177 | if (ret < 0) | 179 | |
180 | /* Update the state if the write succeeded */ | ||
181 | if (!ret) | ||
178 | hdcs->state = state; | 182 | hdcs->state = state; |
179 | 183 | ||
180 | return ret; | 184 | return ret; |
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c index 9623f294bdac..5127bbf9dd26 100644 --- a/drivers/media/video/gspca/sunplus.c +++ b/drivers/media/video/gspca/sunplus.c | |||
@@ -973,6 +973,8 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
973 | 973 | ||
974 | /* create the JPEG header */ | 974 | /* create the JPEG header */ |
975 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | 975 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); |
976 | if (!sd->jpeg_hdr) | ||
977 | return -ENOMEM; | ||
976 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, | 978 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, |
977 | 0x22); /* JPEG 411 */ | 979 | 0x22); /* JPEG 411 */ |
978 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | 980 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); |
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c index 08422d315e68..3d2756f7874a 100644 --- a/drivers/media/video/gspca/zc3xx.c +++ b/drivers/media/video/gspca/zc3xx.c | |||
@@ -7243,6 +7243,8 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
7243 | 7243 | ||
7244 | /* create the JPEG header */ | 7244 | /* create the JPEG header */ |
7245 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); | 7245 | sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); |
7246 | if (!sd->jpeg_hdr) | ||
7247 | return -ENOMEM; | ||
7246 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, | 7248 | jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, |
7247 | 0x21); /* JPEG 422 */ | 7249 | 0x21); /* JPEG 422 */ |
7248 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); | 7250 | jpeg_set_qual(sd->jpeg_hdr, sd->quality); |
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c index 1fe8fc9183a7..b2260de645f0 100644 --- a/drivers/media/video/mt9v011.c +++ b/drivers/media/video/mt9v011.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/i2c.h> | 8 | #include <linux/i2c.h> |
9 | #include <linux/videodev2.h> | 9 | #include <linux/videodev2.h> |
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <asm/div64.h> | ||
11 | #include <media/v4l2-device.h> | 12 | #include <media/v4l2-device.h> |
12 | #include "mt9v011.h" | 13 | #include "mt9v011.h" |
13 | #include <media/v4l2-i2c-drv.h> | 14 | #include <media/v4l2-i2c-drv.h> |
@@ -57,6 +58,7 @@ static struct v4l2_queryctrl mt9v011_qctrl[] = { | |||
57 | struct mt9v011 { | 58 | struct mt9v011 { |
58 | struct v4l2_subdev sd; | 59 | struct v4l2_subdev sd; |
59 | unsigned width, height; | 60 | unsigned width, height; |
61 | unsigned xtal; | ||
60 | 62 | ||
61 | u16 global_gain, red_bal, blue_bal; | 63 | u16 global_gain, red_bal, blue_bal; |
62 | }; | 64 | }; |
@@ -131,7 +133,7 @@ static const struct i2c_reg_value mt9v011_init_default[] = { | |||
131 | { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 }, | 133 | { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 }, |
132 | { R20_MT9V011_READ_MODE, 0x1000 }, | 134 | { R20_MT9V011_READ_MODE, 0x1000 }, |
133 | 135 | ||
134 | { R07_MT9V011_OUT_CTRL, 0x000a }, /* chip enable */ | 136 | { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */ |
135 | }; | 137 | }; |
136 | 138 | ||
137 | static void set_balance(struct v4l2_subdev *sd) | 139 | static void set_balance(struct v4l2_subdev *sd) |
@@ -154,6 +156,31 @@ static void set_balance(struct v4l2_subdev *sd) | |||
154 | mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain); | 156 | mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain); |
155 | } | 157 | } |
156 | 158 | ||
159 | static void calc_fps(struct v4l2_subdev *sd) | ||
160 | { | ||
161 | struct mt9v011 *core = to_mt9v011(sd); | ||
162 | unsigned height, width, hblank, vblank, speed; | ||
163 | unsigned row_time, t_time; | ||
164 | u64 frames_per_ms; | ||
165 | unsigned tmp; | ||
166 | |||
167 | height = mt9v011_read(sd, R03_MT9V011_HEIGHT); | ||
168 | width = mt9v011_read(sd, R04_MT9V011_WIDTH); | ||
169 | hblank = mt9v011_read(sd, R05_MT9V011_HBLANK); | ||
170 | vblank = mt9v011_read(sd, R06_MT9V011_VBLANK); | ||
171 | speed = mt9v011_read(sd, R0A_MT9V011_CLK_SPEED); | ||
172 | |||
173 | row_time = (width + 113 + hblank) * (speed + 2); | ||
174 | t_time = row_time * (height + vblank + 1); | ||
175 | |||
176 | frames_per_ms = core->xtal * 1000l; | ||
177 | do_div(frames_per_ms, t_time); | ||
178 | tmp = frames_per_ms; | ||
179 | |||
180 | v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n", | ||
181 | tmp / 1000, tmp % 1000, t_time); | ||
182 | } | ||
183 | |||
157 | static void set_res(struct v4l2_subdev *sd) | 184 | static void set_res(struct v4l2_subdev *sd) |
158 | { | 185 | { |
159 | struct mt9v011 *core = to_mt9v011(sd); | 186 | struct mt9v011 *core = to_mt9v011(sd); |
@@ -175,10 +202,12 @@ static void set_res(struct v4l2_subdev *sd) | |||
175 | mt9v011_write(sd, R04_MT9V011_WIDTH, core->width); | 202 | mt9v011_write(sd, R04_MT9V011_WIDTH, core->width); |
176 | mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width); | 203 | mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width); |
177 | 204 | ||
178 | vstart = 8 + (640 - core->height) / 2; | 205 | vstart = 8 + (480 - core->height) / 2; |
179 | mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart); | 206 | mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart); |
180 | mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height); | 207 | mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height); |
181 | mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height); | 208 | mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height); |
209 | |||
210 | calc_fps(sd); | ||
182 | }; | 211 | }; |
183 | 212 | ||
184 | static int mt9v011_reset(struct v4l2_subdev *sd, u32 val) | 213 | static int mt9v011_reset(struct v4l2_subdev *sd, u32 val) |
@@ -215,6 +244,23 @@ static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) | |||
215 | return -EINVAL; | 244 | return -EINVAL; |
216 | } | 245 | } |
217 | 246 | ||
247 | static int mt9v011_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) | ||
248 | { | ||
249 | int i; | ||
250 | |||
251 | v4l2_dbg(1, debug, sd, "queryctrl called\n"); | ||
252 | |||
253 | for (i = 0; i < ARRAY_SIZE(mt9v011_qctrl); i++) | ||
254 | if (qc->id && qc->id == mt9v011_qctrl[i].id) { | ||
255 | memcpy(qc, &(mt9v011_qctrl[i]), | ||
256 | sizeof(*qc)); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | return -EINVAL; | ||
261 | } | ||
262 | |||
263 | |||
218 | static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) | 264 | static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) |
219 | { | 265 | { |
220 | struct mt9v011 *core = to_mt9v011(sd); | 266 | struct mt9v011 *core = to_mt9v011(sd); |
@@ -294,6 +340,22 @@ static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt) | |||
294 | return 0; | 340 | return 0; |
295 | } | 341 | } |
296 | 342 | ||
343 | static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data) | ||
344 | { | ||
345 | struct mt9v011 *core = to_mt9v011(sd); | ||
346 | unsigned *xtal = data; | ||
347 | |||
348 | v4l2_dbg(1, debug, sd, "s_config called\n"); | ||
349 | |||
350 | if (xtal) { | ||
351 | core->xtal = *xtal; | ||
352 | v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n", | ||
353 | *xtal / 1000000, (*xtal / 1000) % 1000); | ||
354 | } | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
297 | 359 | ||
298 | #ifdef CONFIG_VIDEO_ADV_DEBUG | 360 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
299 | static int mt9v011_g_register(struct v4l2_subdev *sd, | 361 | static int mt9v011_g_register(struct v4l2_subdev *sd, |
@@ -338,9 +400,11 @@ static int mt9v011_g_chip_ident(struct v4l2_subdev *sd, | |||
338 | } | 400 | } |
339 | 401 | ||
340 | static const struct v4l2_subdev_core_ops mt9v011_core_ops = { | 402 | static const struct v4l2_subdev_core_ops mt9v011_core_ops = { |
403 | .queryctrl = mt9v011_queryctrl, | ||
341 | .g_ctrl = mt9v011_g_ctrl, | 404 | .g_ctrl = mt9v011_g_ctrl, |
342 | .s_ctrl = mt9v011_s_ctrl, | 405 | .s_ctrl = mt9v011_s_ctrl, |
343 | .reset = mt9v011_reset, | 406 | .reset = mt9v011_reset, |
407 | .s_config = mt9v011_s_config, | ||
344 | .g_chip_ident = mt9v011_g_chip_ident, | 408 | .g_chip_ident = mt9v011_g_chip_ident, |
345 | #ifdef CONFIG_VIDEO_ADV_DEBUG | 409 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
346 | .g_register = mt9v011_g_register, | 410 | .g_register = mt9v011_g_register, |
@@ -395,6 +459,7 @@ static int mt9v011_probe(struct i2c_client *c, | |||
395 | core->global_gain = 0x0024; | 459 | core->global_gain = 0x0024; |
396 | core->width = 640; | 460 | core->width = 640; |
397 | core->height = 480; | 461 | core->height = 480; |
462 | core->xtal = 27000000; /* Hz */ | ||
398 | 463 | ||
399 | v4l_info(c, "chip found @ 0x%02x (%s)\n", | 464 | v4l_info(c, "chip found @ 0x%02x (%s)\n", |
400 | c->addr << 1, c->adapter->name); | 465 | c->addr << 1, c->adapter->name); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b5a7513df4eb..5f6509a5f640 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1732,6 +1732,7 @@ config KS8842 | |||
1732 | config KS8851 | 1732 | config KS8851 |
1733 | tristate "Micrel KS8851 SPI" | 1733 | tristate "Micrel KS8851 SPI" |
1734 | depends on SPI | 1734 | depends on SPI |
1735 | select MII | ||
1735 | help | 1736 | help |
1736 | SPI driver for Micrel KS8851 SPI attached network chip. | 1737 | SPI driver for Micrel KS8851 SPI attached network chip. |
1737 | 1738 | ||
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 2e7419a61191..5041d10bae9d 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c | |||
@@ -1228,7 +1228,6 @@ static int at91ether_resume(struct platform_device *pdev) | |||
1228 | #endif | 1228 | #endif |
1229 | 1229 | ||
1230 | static struct platform_driver at91ether_driver = { | 1230 | static struct platform_driver at91ether_driver = { |
1231 | .probe = at91ether_probe, | ||
1232 | .remove = __devexit_p(at91ether_remove), | 1231 | .remove = __devexit_p(at91ether_remove), |
1233 | .suspend = at91ether_suspend, | 1232 | .suspend = at91ether_suspend, |
1234 | .resume = at91ether_resume, | 1233 | .resume = at91ether_resume, |
@@ -1240,7 +1239,7 @@ static struct platform_driver at91ether_driver = { | |||
1240 | 1239 | ||
1241 | static int __init at91ether_init(void) | 1240 | static int __init at91ether_init(void) |
1242 | { | 1241 | { |
1243 | return platform_driver_register(&at91ether_driver); | 1242 | return platform_driver_probe(&at91ether_driver, at91ether_probe); |
1244 | } | 1243 | } |
1245 | 1244 | ||
1246 | static void __exit at91ether_exit(void) | 1245 | static void __exit at91ether_exit(void) |
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 18b566ad4fd1..cf30e278f182 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c | |||
@@ -318,7 +318,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) | |||
318 | pos3 = mca_read_stored_pos( slot, 3 ); | 318 | pos3 = mca_read_stored_pos( slot, 3 ); |
319 | pos4 = mca_read_stored_pos( slot, 4 ); | 319 | pos4 = mca_read_stored_pos( slot, 4 ); |
320 | 320 | ||
321 | for (l_i = 0; l_i < 0x09; l_i++) | 321 | for (l_i = 0; l_i < 8; l_i++) |
322 | if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i]) | 322 | if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i]) |
323 | break; | 323 | break; |
324 | ioaddr = at1700_mca_probe_list[l_i]; | 324 | ioaddr = at1700_mca_probe_list[l_i]; |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index c43f6a119295..dea3155688bb 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -667,7 +667,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
667 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 667 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
668 | struct be_rx_page_info *page_info; | 668 | struct be_rx_page_info *page_info; |
669 | u16 rxq_idx, i, num_rcvd, j; | 669 | u16 rxq_idx, i, num_rcvd, j; |
670 | u32 pktsize, hdr_len, curr_frag_len; | 670 | u32 pktsize, hdr_len, curr_frag_len, size; |
671 | u8 *start; | 671 | u8 *start; |
672 | 672 | ||
673 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 673 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
@@ -708,12 +708,13 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
708 | } | 708 | } |
709 | 709 | ||
710 | /* More frags present for this completion */ | 710 | /* More frags present for this completion */ |
711 | pktsize -= curr_frag_len; /* account for above copied frag */ | 711 | size = pktsize; |
712 | for (i = 1, j = 0; i < num_rcvd; i++) { | 712 | for (i = 1, j = 0; i < num_rcvd; i++) { |
713 | size -= curr_frag_len; | ||
713 | index_inc(&rxq_idx, rxq->len); | 714 | index_inc(&rxq_idx, rxq->len); |
714 | page_info = get_rx_page_info(adapter, rxq_idx); | 715 | page_info = get_rx_page_info(adapter, rxq_idx); |
715 | 716 | ||
716 | curr_frag_len = min(pktsize, rx_frag_size); | 717 | curr_frag_len = min(size, rx_frag_size); |
717 | 718 | ||
718 | /* Coalesce all frags from the same physical page in one slot */ | 719 | /* Coalesce all frags from the same physical page in one slot */ |
719 | if (page_info->page_offset == 0) { | 720 | if (page_info->page_offset == 0) { |
@@ -731,7 +732,6 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
731 | skb_shinfo(skb)->frags[j].size += curr_frag_len; | 732 | skb_shinfo(skb)->frags[j].size += curr_frag_len; |
732 | skb->len += curr_frag_len; | 733 | skb->len += curr_frag_len; |
733 | skb->data_len += curr_frag_len; | 734 | skb->data_len += curr_frag_len; |
734 | pktsize -= curr_frag_len; | ||
735 | 735 | ||
736 | memset(page_info, 0, sizeof(*page_info)); | 736 | memset(page_info, 0, sizeof(*page_info)); |
737 | } | 737 | } |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 4d1515f45ba2..4869d77cbe91 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -227,7 +227,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | |||
227 | } | 227 | } |
228 | 228 | ||
229 | rcu_read_lock(); | 229 | rcu_read_lock(); |
230 | ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); | 230 | ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); |
231 | if (ulp_ops) | 231 | if (ulp_ops) |
232 | ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); | 232 | ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); |
233 | rcu_read_unlock(); | 233 | rcu_read_unlock(); |
@@ -319,6 +319,20 @@ static int cnic_abort_prep(struct cnic_sock *csk) | |||
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | 321 | ||
322 | static void cnic_uio_stop(void) | ||
323 | { | ||
324 | struct cnic_dev *dev; | ||
325 | |||
326 | read_lock(&cnic_dev_lock); | ||
327 | list_for_each_entry(dev, &cnic_dev_list, list) { | ||
328 | struct cnic_local *cp = dev->cnic_priv; | ||
329 | |||
330 | if (cp->cnic_uinfo) | ||
331 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | ||
332 | } | ||
333 | read_unlock(&cnic_dev_lock); | ||
334 | } | ||
335 | |||
322 | int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) | 336 | int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) |
323 | { | 337 | { |
324 | struct cnic_dev *dev; | 338 | struct cnic_dev *dev; |
@@ -390,6 +404,9 @@ int cnic_unregister_driver(int ulp_type) | |||
390 | } | 404 | } |
391 | read_unlock(&cnic_dev_lock); | 405 | read_unlock(&cnic_dev_lock); |
392 | 406 | ||
407 | if (ulp_type == CNIC_ULP_ISCSI) | ||
408 | cnic_uio_stop(); | ||
409 | |||
393 | rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); | 410 | rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); |
394 | 411 | ||
395 | mutex_unlock(&cnic_lock); | 412 | mutex_unlock(&cnic_lock); |
@@ -632,7 +649,6 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
632 | int i = 0; | 649 | int i = 0; |
633 | 650 | ||
634 | if (cp->cnic_uinfo) { | 651 | if (cp->cnic_uinfo) { |
635 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | ||
636 | while (cp->uio_dev != -1 && i < 15) { | 652 | while (cp->uio_dev != -1 && i < 15) { |
637 | msleep(100); | 653 | msleep(100); |
638 | i++; | 654 | i++; |
@@ -1057,6 +1073,9 @@ static void cnic_ulp_stop(struct cnic_dev *dev) | |||
1057 | struct cnic_local *cp = dev->cnic_priv; | 1073 | struct cnic_local *cp = dev->cnic_priv; |
1058 | int if_type; | 1074 | int if_type; |
1059 | 1075 | ||
1076 | if (cp->cnic_uinfo) | ||
1077 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | ||
1078 | |||
1060 | rcu_read_lock(); | 1079 | rcu_read_lock(); |
1061 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { | 1080 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { |
1062 | struct cnic_ulp_ops *ulp_ops; | 1081 | struct cnic_ulp_ops *ulp_ops; |
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index cc2ab6412c73..4f7003485348 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
@@ -1784,7 +1784,7 @@ int __init init_module(void) | |||
1784 | printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); | 1784 | printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); |
1785 | } | 1785 | } |
1786 | 1786 | ||
1787 | for (i = 0; io[i] != -1 && i < MAX_EEPRO; i++) { | 1787 | for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { |
1788 | dev = alloc_etherdev(sizeof(struct eepro_local)); | 1788 | dev = alloc_etherdev(sizeof(struct eepro_local)); |
1789 | if (!dev) | 1789 | if (!dev) |
1790 | break; | 1790 | break; |
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 48385c42ab57..160655d24581 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c | |||
@@ -584,7 +584,8 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, | |||
584 | if (np->flags == HAS_MII_XCVR) { | 584 | if (np->flags == HAS_MII_XCVR) { |
585 | int phy, phy_idx = 0; | 585 | int phy, phy_idx = 0; |
586 | 586 | ||
587 | for (phy = 1; phy < 32 && phy_idx < 4; phy++) { | 587 | for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys); |
588 | phy++) { | ||
588 | int mii_status = mdio_read(dev, phy, 1); | 589 | int mii_status = mdio_read(dev, phy, 1); |
589 | 590 | ||
590 | if (mii_status != 0xffff && mii_status != 0x0000) { | 591 | if (mii_status != 0xffff && mii_status != 0x0000) { |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index b892c3ad9a74..2bc2d2b20644 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -754,17 +754,16 @@ static int fs_init_phy(struct net_device *dev) | |||
754 | fep->oldlink = 0; | 754 | fep->oldlink = 0; |
755 | fep->oldspeed = 0; | 755 | fep->oldspeed = 0; |
756 | fep->oldduplex = -1; | 756 | fep->oldduplex = -1; |
757 | if(fep->fpi->phy_node) | 757 | |
758 | phydev = of_phy_connect(dev, fep->fpi->phy_node, | 758 | phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, |
759 | &fs_adjust_link, 0, | 759 | PHY_INTERFACE_MODE_MII); |
760 | PHY_INTERFACE_MODE_MII); | 760 | if (!phydev) { |
761 | else { | 761 | phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, |
762 | printk("No phy bus ID specified in BSP code\n"); | 762 | PHY_INTERFACE_MODE_MII); |
763 | return -EINVAL; | ||
764 | } | 763 | } |
765 | if (IS_ERR(phydev)) { | 764 | if (!phydev) { |
766 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 765 | dev_err(&dev->dev, "Could not attach to PHY\n"); |
767 | return PTR_ERR(phydev); | 766 | return -ENODEV; |
768 | } | 767 | } |
769 | 768 | ||
770 | fep->phydev = phydev; | 769 | fep->phydev = phydev; |
@@ -1005,6 +1004,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, | |||
1005 | goto out_free_fpi; | 1004 | goto out_free_fpi; |
1006 | } | 1005 | } |
1007 | 1006 | ||
1007 | SET_NETDEV_DEV(ndev, &ofdev->dev); | ||
1008 | dev_set_drvdata(&ofdev->dev, ndev); | 1008 | dev_set_drvdata(&ofdev->dev, ndev); |
1009 | 1009 | ||
1010 | fep = netdev_priv(ndev); | 1010 | fep = netdev_priv(ndev); |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 43d813ed9f45..f8ffcbf0bc39 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -264,15 +264,6 @@ static int gfar_of_init(struct net_device *dev) | |||
264 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | 264 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; |
265 | 265 | ||
266 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); | 266 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
267 | if (!priv->phy_node) { | ||
268 | u32 *fixed_link; | ||
269 | |||
270 | fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); | ||
271 | if (!fixed_link) { | ||
272 | err = -ENODEV; | ||
273 | goto err_out; | ||
274 | } | ||
275 | } | ||
276 | 267 | ||
277 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | 268 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ |
278 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); | 269 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
@@ -659,13 +650,14 @@ static int init_phy(struct net_device *dev) | |||
659 | 650 | ||
660 | interface = gfar_get_interface(dev); | 651 | interface = gfar_get_interface(dev); |
661 | 652 | ||
662 | if (priv->phy_node) { | 653 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
663 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, | 654 | interface); |
664 | 0, interface); | 655 | if (!priv->phydev) |
665 | if (!priv->phydev) { | 656 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, |
666 | dev_err(&dev->dev, "error: Could not attach to PHY\n"); | 657 | interface); |
667 | return -ENODEV; | 658 | if (!priv->phydev) { |
668 | } | 659 | dev_err(&dev->dev, "could not attach to PHY\n"); |
660 | return -ENODEV; | ||
669 | } | 661 | } |
670 | 662 | ||
671 | if (interface == PHY_INTERFACE_MODE_SGMII) | 663 | if (interface == PHY_INTERFACE_MODE_SGMII) |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index cd22323cfd22..1b12c7ba275f 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -327,6 +327,7 @@ struct ixgbe_adapter { | |||
327 | #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) | 327 | #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) |
328 | #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) | 328 | #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) |
329 | #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) | 329 | #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) |
330 | #define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) | ||
330 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) | 331 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) |
331 | 332 | ||
332 | u32 flags2; | 333 | u32 flags2; |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index da2c8514b8d0..1c7265732900 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -139,6 +139,18 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | |||
139 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 139 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
140 | } | 140 | } |
141 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; | 141 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; |
142 | #ifdef IXGBE_FCOE | ||
143 | /* Turn on FCoE offload */ | ||
144 | if ((adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) && | ||
145 | (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))) { | ||
146 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; | ||
147 | adapter->ring_feature[RING_F_FCOE].indices = | ||
148 | IXGBE_FCRETA_SIZE; | ||
149 | netdev->features |= NETIF_F_FCOE_CRC; | ||
150 | netdev->features |= NETIF_F_FSO; | ||
151 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; | ||
152 | } | ||
153 | #endif /* IXGBE_FCOE */ | ||
142 | ixgbe_init_interrupt_scheme(adapter); | 154 | ixgbe_init_interrupt_scheme(adapter); |
143 | if (netif_running(netdev)) | 155 | if (netif_running(netdev)) |
144 | netdev->netdev_ops->ndo_open(netdev); | 156 | netdev->netdev_ops->ndo_open(netdev); |
@@ -156,6 +168,18 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | |||
156 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 168 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
157 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 169 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) |
158 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 170 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
171 | |||
172 | #ifdef IXGBE_FCOE | ||
173 | /* Turn off FCoE offload */ | ||
174 | if (adapter->flags & (IXGBE_FLAG_FCOE_CAPABLE | | ||
175 | IXGBE_FLAG_FCOE_ENABLED)) { | ||
176 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | ||
177 | adapter->ring_feature[RING_F_FCOE].indices = 0; | ||
178 | netdev->features &= ~NETIF_F_FCOE_CRC; | ||
179 | netdev->features &= ~NETIF_F_FSO; | ||
180 | netdev->fcoe_ddp_xid = 0; | ||
181 | } | ||
182 | #endif /* IXGBE_FCOE */ | ||
159 | ixgbe_init_interrupt_scheme(adapter); | 183 | ixgbe_init_interrupt_scheme(adapter); |
160 | if (netif_running(netdev)) | 184 | if (netif_running(netdev)) |
161 | netdev->netdev_ops->ndo_open(netdev); | 185 | netdev->netdev_ops->ndo_open(netdev); |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e3442f47f932..200454f30f6a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/in.h> | 34 | #include <linux/in.h> |
35 | #include <linux/ip.h> | 35 | #include <linux/ip.h> |
36 | #include <linux/tcp.h> | 36 | #include <linux/tcp.h> |
37 | #include <linux/pkt_sched.h> | ||
37 | #include <linux/ipv6.h> | 38 | #include <linux/ipv6.h> |
38 | #include <net/checksum.h> | 39 | #include <net/checksum.h> |
39 | #include <net/ip6_checksum.h> | 40 | #include <net/ip6_checksum.h> |
@@ -510,8 +511,11 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, | |||
510 | * @skb: skb currently being received and modified | 511 | * @skb: skb currently being received and modified |
511 | **/ | 512 | **/ |
512 | static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, | 513 | static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, |
513 | u32 status_err, struct sk_buff *skb) | 514 | union ixgbe_adv_rx_desc *rx_desc, |
515 | struct sk_buff *skb) | ||
514 | { | 516 | { |
517 | u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
518 | |||
515 | skb->ip_summed = CHECKSUM_NONE; | 519 | skb->ip_summed = CHECKSUM_NONE; |
516 | 520 | ||
517 | /* Rx csum disabled */ | 521 | /* Rx csum disabled */ |
@@ -529,6 +533,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, | |||
529 | return; | 533 | return; |
530 | 534 | ||
531 | if (status_err & IXGBE_RXDADV_ERR_TCPE) { | 535 | if (status_err & IXGBE_RXDADV_ERR_TCPE) { |
536 | u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | ||
537 | |||
538 | /* | ||
539 | * 82599 errata, UDP frames with a 0 checksum can be marked as | ||
540 | * checksum errors. | ||
541 | */ | ||
542 | if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && | ||
543 | (adapter->hw.mac.type == ixgbe_mac_82599EB)) | ||
544 | return; | ||
545 | |||
532 | adapter->hw_csum_rx_error++; | 546 | adapter->hw_csum_rx_error++; |
533 | return; | 547 | return; |
534 | } | 548 | } |
@@ -802,7 +816,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
802 | goto next_desc; | 816 | goto next_desc; |
803 | } | 817 | } |
804 | 818 | ||
805 | ixgbe_rx_checksum(adapter, staterr, skb); | 819 | ixgbe_rx_checksum(adapter, rx_desc, skb); |
806 | 820 | ||
807 | /* probably a little skewed due to removing CRC */ | 821 | /* probably a little skewed due to removing CRC */ |
808 | total_rx_bytes += skb->len; | 822 | total_rx_bytes += skb->len; |
@@ -3806,8 +3820,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3806 | adapter->atr_sample_rate = 20; | 3820 | adapter->atr_sample_rate = 20; |
3807 | adapter->fdir_pballoc = 0; | 3821 | adapter->fdir_pballoc = 0; |
3808 | #ifdef IXGBE_FCOE | 3822 | #ifdef IXGBE_FCOE |
3809 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; | 3823 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
3810 | adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; | 3824 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
3825 | adapter->ring_feature[RING_F_FCOE].indices = 0; | ||
3811 | #endif /* IXGBE_FCOE */ | 3826 | #endif /* IXGBE_FCOE */ |
3812 | } | 3827 | } |
3813 | 3828 | ||
@@ -5125,9 +5140,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
5125 | int count = 0; | 5140 | int count = 0; |
5126 | unsigned int f; | 5141 | unsigned int f; |
5127 | 5142 | ||
5128 | r_idx = skb->queue_mapping; | ||
5129 | tx_ring = &adapter->tx_ring[r_idx]; | ||
5130 | |||
5131 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | 5143 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { |
5132 | tx_flags |= vlan_tx_tag_get(skb); | 5144 | tx_flags |= vlan_tx_tag_get(skb); |
5133 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 5145 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
@@ -5137,11 +5149,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
5137 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5149 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5138 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 5150 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
5139 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 5151 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
5140 | tx_flags |= (skb->queue_mapping << 13); | 5152 | if (skb->priority != TC_PRIO_CONTROL) { |
5141 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5153 | tx_flags |= (skb->queue_mapping << 13); |
5142 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 5154 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5155 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | ||
5156 | } else { | ||
5157 | skb->queue_mapping = | ||
5158 | adapter->ring_feature[RING_F_DCB].indices-1; | ||
5159 | } | ||
5143 | } | 5160 | } |
5144 | 5161 | ||
5162 | r_idx = skb->queue_mapping; | ||
5163 | tx_ring = &adapter->tx_ring[r_idx]; | ||
5164 | |||
5145 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5165 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
5146 | (skb->protocol == htons(ETH_P_FCOE))) | 5166 | (skb->protocol == htons(ETH_P_FCOE))) |
5147 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | 5167 | tx_flags |= IXGBE_TX_FLAGS_FCOE; |
@@ -5580,16 +5600,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5580 | #endif | 5600 | #endif |
5581 | 5601 | ||
5582 | #ifdef IXGBE_FCOE | 5602 | #ifdef IXGBE_FCOE |
5583 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 5603 | if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { |
5584 | if (hw->mac.ops.get_device_caps) { | 5604 | if (hw->mac.ops.get_device_caps) { |
5585 | hw->mac.ops.get_device_caps(hw, &device_caps); | 5605 | hw->mac.ops.get_device_caps(hw, &device_caps); |
5586 | if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) { | 5606 | if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) |
5587 | netdev->features |= NETIF_F_FCOE_CRC; | 5607 | adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; |
5588 | netdev->features |= NETIF_F_FSO; | ||
5589 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; | ||
5590 | } else { | ||
5591 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | ||
5592 | } | ||
5593 | } | 5608 | } |
5594 | } | 5609 | } |
5595 | #endif /* IXGBE_FCOE */ | 5610 | #endif /* IXGBE_FCOE */ |
@@ -5638,7 +5653,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5638 | adapter->wol = 0; | 5653 | adapter->wol = 0; |
5639 | break; | 5654 | break; |
5640 | } | 5655 | } |
5641 | device_init_wakeup(&adapter->pdev->dev, true); | ||
5642 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | 5656 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); |
5643 | 5657 | ||
5644 | /* pick up the PCI bus settings for reporting later */ | 5658 | /* pick up the PCI bus settings for reporting later */ |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 5d3343ef3d86..7acf204e38c9 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -184,6 +184,13 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
184 | kfree(recv_ctx->rds_rings); | 184 | kfree(recv_ctx->rds_rings); |
185 | 185 | ||
186 | skip_rds: | 186 | skip_rds: |
187 | if (recv_ctx->sds_rings == NULL) | ||
188 | goto skip_sds; | ||
189 | |||
190 | for(ring = 0; ring < adapter->max_sds_rings; ring++) | ||
191 | recv_ctx->sds_rings[ring].consumer = 0; | ||
192 | |||
193 | skip_sds: | ||
187 | if (adapter->tx_ring == NULL) | 194 | if (adapter->tx_ring == NULL) |
188 | return; | 195 | return; |
189 | 196 | ||
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 33984b737233..22cdd451fb82 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #ifdef CONFIG_OF_GPIO | 31 | #ifdef CONFIG_OF_GPIO |
32 | #include <linux/of_gpio.h> | 32 | #include <linux/of_gpio.h> |
33 | #include <linux/of_mdio.h> | ||
33 | #include <linux/of_platform.h> | 34 | #include <linux/of_platform.h> |
34 | #endif | 35 | #endif |
35 | 36 | ||
@@ -81,13 +82,12 @@ static struct mdiobb_ops mdio_gpio_ops = { | |||
81 | .get_mdio_data = mdio_get, | 82 | .get_mdio_data = mdio_get, |
82 | }; | 83 | }; |
83 | 84 | ||
84 | static int __devinit mdio_gpio_bus_init(struct device *dev, | 85 | static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, |
85 | struct mdio_gpio_platform_data *pdata, | 86 | struct mdio_gpio_platform_data *pdata, |
86 | int bus_id) | 87 | int bus_id) |
87 | { | 88 | { |
88 | struct mii_bus *new_bus; | 89 | struct mii_bus *new_bus; |
89 | struct mdio_gpio_info *bitbang; | 90 | struct mdio_gpio_info *bitbang; |
90 | int ret = -ENOMEM; | ||
91 | int i; | 91 | int i; |
92 | 92 | ||
93 | bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); | 93 | bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); |
@@ -104,8 +104,6 @@ static int __devinit mdio_gpio_bus_init(struct device *dev, | |||
104 | 104 | ||
105 | new_bus->name = "GPIO Bitbanged MDIO", | 105 | new_bus->name = "GPIO Bitbanged MDIO", |
106 | 106 | ||
107 | ret = -ENODEV; | ||
108 | |||
109 | new_bus->phy_mask = pdata->phy_mask; | 107 | new_bus->phy_mask = pdata->phy_mask; |
110 | new_bus->irq = pdata->irqs; | 108 | new_bus->irq = pdata->irqs; |
111 | new_bus->parent = dev; | 109 | new_bus->parent = dev; |
@@ -129,15 +127,8 @@ static int __devinit mdio_gpio_bus_init(struct device *dev, | |||
129 | 127 | ||
130 | dev_set_drvdata(dev, new_bus); | 128 | dev_set_drvdata(dev, new_bus); |
131 | 129 | ||
132 | ret = mdiobus_register(new_bus); | 130 | return new_bus; |
133 | if (ret) | ||
134 | goto out_free_all; | ||
135 | |||
136 | return 0; | ||
137 | 131 | ||
138 | out_free_all: | ||
139 | dev_set_drvdata(dev, NULL); | ||
140 | gpio_free(bitbang->mdio); | ||
141 | out_free_mdc: | 132 | out_free_mdc: |
142 | gpio_free(bitbang->mdc); | 133 | gpio_free(bitbang->mdc); |
143 | out_free_bus: | 134 | out_free_bus: |
@@ -145,30 +136,47 @@ out_free_bus: | |||
145 | out_free_bitbang: | 136 | out_free_bitbang: |
146 | kfree(bitbang); | 137 | kfree(bitbang); |
147 | out: | 138 | out: |
148 | return ret; | 139 | return NULL; |
149 | } | 140 | } |
150 | 141 | ||
151 | static void __devexit mdio_gpio_bus_destroy(struct device *dev) | 142 | static void __devinit mdio_gpio_bus_deinit(struct device *dev) |
152 | { | 143 | { |
153 | struct mii_bus *bus = dev_get_drvdata(dev); | 144 | struct mii_bus *bus = dev_get_drvdata(dev); |
154 | struct mdio_gpio_info *bitbang = bus->priv; | 145 | struct mdio_gpio_info *bitbang = bus->priv; |
155 | 146 | ||
156 | mdiobus_unregister(bus); | ||
157 | free_mdio_bitbang(bus); | ||
158 | dev_set_drvdata(dev, NULL); | 147 | dev_set_drvdata(dev, NULL); |
159 | gpio_free(bitbang->mdc); | ||
160 | gpio_free(bitbang->mdio); | 148 | gpio_free(bitbang->mdio); |
149 | gpio_free(bitbang->mdc); | ||
150 | free_mdio_bitbang(bus); | ||
161 | kfree(bitbang); | 151 | kfree(bitbang); |
162 | } | 152 | } |
163 | 153 | ||
154 | static void __devexit mdio_gpio_bus_destroy(struct device *dev) | ||
155 | { | ||
156 | struct mii_bus *bus = dev_get_drvdata(dev); | ||
157 | |||
158 | mdiobus_unregister(bus); | ||
159 | mdio_gpio_bus_deinit(dev); | ||
160 | } | ||
161 | |||
164 | static int __devinit mdio_gpio_probe(struct platform_device *pdev) | 162 | static int __devinit mdio_gpio_probe(struct platform_device *pdev) |
165 | { | 163 | { |
166 | struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; | 164 | struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; |
165 | struct mii_bus *new_bus; | ||
166 | int ret; | ||
167 | 167 | ||
168 | if (!pdata) | 168 | if (!pdata) |
169 | return -ENODEV; | 169 | return -ENODEV; |
170 | 170 | ||
171 | return mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); | 171 | new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); |
172 | if (!new_bus) | ||
173 | return -ENODEV; | ||
174 | |||
175 | ret = mdiobus_register(new_bus); | ||
176 | if (ret) | ||
177 | mdio_gpio_bus_deinit(&pdev->dev); | ||
178 | |||
179 | return ret; | ||
172 | } | 180 | } |
173 | 181 | ||
174 | static int __devexit mdio_gpio_remove(struct platform_device *pdev) | 182 | static int __devexit mdio_gpio_remove(struct platform_device *pdev) |
@@ -179,29 +187,12 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev) | |||
179 | } | 187 | } |
180 | 188 | ||
181 | #ifdef CONFIG_OF_GPIO | 189 | #ifdef CONFIG_OF_GPIO |
182 | static void __devinit add_phy(struct mdio_gpio_platform_data *pdata, | ||
183 | struct device_node *np) | ||
184 | { | ||
185 | const u32 *data; | ||
186 | int len, id, irq; | ||
187 | |||
188 | data = of_get_property(np, "reg", &len); | ||
189 | if (!data || len != 4) | ||
190 | return; | ||
191 | |||
192 | id = *data; | ||
193 | pdata->phy_mask &= ~(1 << id); | ||
194 | |||
195 | irq = of_irq_to_resource(np, 0, NULL); | ||
196 | if (irq) | ||
197 | pdata->irqs[id] = irq; | ||
198 | } | ||
199 | 190 | ||
200 | static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, | 191 | static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, |
201 | const struct of_device_id *match) | 192 | const struct of_device_id *match) |
202 | { | 193 | { |
203 | struct device_node *np = NULL; | ||
204 | struct mdio_gpio_platform_data *pdata; | 194 | struct mdio_gpio_platform_data *pdata; |
195 | struct mii_bus *new_bus; | ||
205 | int ret; | 196 | int ret; |
206 | 197 | ||
207 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | 198 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); |
@@ -215,14 +206,18 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, | |||
215 | 206 | ||
216 | ret = of_get_gpio(ofdev->node, 1); | 207 | ret = of_get_gpio(ofdev->node, 1); |
217 | if (ret < 0) | 208 | if (ret < 0) |
218 | goto out_free; | 209 | goto out_free; |
219 | pdata->mdio = ret; | 210 | pdata->mdio = ret; |
220 | 211 | ||
221 | while ((np = of_get_next_child(ofdev->node, np))) | 212 | new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); |
222 | if (!strcmp(np->type, "ethernet-phy")) | 213 | if (!new_bus) |
223 | add_phy(pdata, np); | 214 | return -ENODEV; |
224 | 215 | ||
225 | return mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); | 216 | ret = of_mdiobus_register(new_bus, ofdev->node); |
217 | if (ret) | ||
218 | mdio_gpio_bus_deinit(&ofdev->dev); | ||
219 | |||
220 | return ret; | ||
226 | 221 | ||
227 | out_free: | 222 | out_free: |
228 | kfree(pdata); | 223 | kfree(pdata); |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 4b53b58d75fc..b82780d805f5 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2060,8 +2060,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2060 | } | 2060 | } |
2061 | } | 2061 | } |
2062 | 2062 | ||
2063 | pci_set_master(pdev); | ||
2064 | |||
2065 | /* ioremap MMIO region */ | 2063 | /* ioremap MMIO region */ |
2066 | ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); | 2064 | ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); |
2067 | if (!ioaddr) { | 2065 | if (!ioaddr) { |
@@ -2089,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2089 | 2087 | ||
2090 | RTL_W16(IntrStatus, 0xffff); | 2088 | RTL_W16(IntrStatus, 0xffff); |
2091 | 2089 | ||
2090 | pci_set_master(pdev); | ||
2091 | |||
2092 | /* Identify chip attached to board */ | 2092 | /* Identify chip attached to board */ |
2093 | rtl8169_get_mac_version(tp, ioaddr); | 2093 | rtl8169_get_mac_version(tp, ioaddr); |
2094 | 2094 | ||
@@ -3874,6 +3874,15 @@ static void rtl_shutdown(struct pci_dev *pdev) | |||
3874 | spin_unlock_irq(&tp->lock); | 3874 | spin_unlock_irq(&tp->lock); |
3875 | 3875 | ||
3876 | if (system_state == SYSTEM_POWER_OFF) { | 3876 | if (system_state == SYSTEM_POWER_OFF) { |
3877 | /* WoL fails with some 8168 when the receiver is disabled. */ | ||
3878 | if (tp->features & RTL_FEATURE_WOL) { | ||
3879 | pci_clear_master(pdev); | ||
3880 | |||
3881 | RTL_W8(ChipCmd, CmdRxEnb); | ||
3882 | /* PCI commit */ | ||
3883 | RTL_R8(ChipCmd); | ||
3884 | } | ||
3885 | |||
3877 | pci_wake_from_d3(pdev, true); | 3886 | pci_wake_from_d3(pdev, true); |
3878 | pci_set_power_state(pdev, PCI_D3hot); | 3887 | pci_set_power_state(pdev, PCI_D3hot); |
3879 | } | 3888 | } |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 60d502eef4fc..543af2044f40 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -3854,8 +3854,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3854 | skge->speed = -1; | 3854 | skge->speed = -1; |
3855 | skge->advertising = skge_supported_modes(hw); | 3855 | skge->advertising = skge_supported_modes(hw); |
3856 | 3856 | ||
3857 | if (device_may_wakeup(&hw->pdev->dev)) | 3857 | if (device_can_wakeup(&hw->pdev->dev)) { |
3858 | skge->wol = wol_supported(hw) & WAKE_MAGIC; | 3858 | skge->wol = wol_supported(hw) & WAKE_MAGIC; |
3859 | device_set_wakeup_enable(&hw->pdev->dev, skge->wol); | ||
3860 | } | ||
3859 | 3861 | ||
3860 | hw->dev[port] = dev; | 3862 | hw->dev[port] = dev; |
3861 | 3863 | ||
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index f1f773b17fe1..57a159fac99f 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -186,7 +186,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
186 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) | 186 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) |
187 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | 187 | #define SMC_IRQ_FLAGS (-1) /* from resource */ |
188 | 188 | ||
189 | #elif defined(CONFIG_MACH_LOGICPD_PXA270) | 189 | #elif defined(CONFIG_MACH_LOGICPD_PXA270) \ |
190 | || defined(CONFIG_MACH_NOMADIK_8815NHK) | ||
190 | 191 | ||
191 | #define SMC_CAN_USE_8BIT 0 | 192 | #define SMC_CAN_USE_8BIT 0 |
192 | #define SMC_CAN_USE_16BIT 1 | 193 | #define SMC_CAN_USE_16BIT 1 |
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 9d896116cf76..08a6c41c1599 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c | |||
@@ -1912,7 +1912,7 @@ static int __init ibmtr_init(void) | |||
1912 | 1912 | ||
1913 | find_turbo_adapters(io); | 1913 | find_turbo_adapters(io); |
1914 | 1914 | ||
1915 | for (i = 0; io[i] && (i < IBMTR_MAX_ADAPTERS); i++) { | 1915 | for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) { |
1916 | struct net_device *dev; | 1916 | struct net_device *dev; |
1917 | irq[i] = 0; | 1917 | irq[i] = 0; |
1918 | mem[i] = 0; | 1918 | mem[i] = 0; |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 40c6eba775ce..3b957e6412ee 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -1590,13 +1590,13 @@ static int init_phy(struct net_device *dev) | |||
1590 | priv->oldspeed = 0; | 1590 | priv->oldspeed = 0; |
1591 | priv->oldduplex = -1; | 1591 | priv->oldduplex = -1; |
1592 | 1592 | ||
1593 | if (!ug_info->phy_node) | ||
1594 | return 0; | ||
1595 | |||
1596 | phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, | 1593 | phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, |
1597 | priv->phy_interface); | 1594 | priv->phy_interface); |
1595 | if (!phydev) | ||
1596 | phydev = of_phy_connect_fixed_link(dev, &adjust_link, | ||
1597 | priv->phy_interface); | ||
1598 | if (!phydev) { | 1598 | if (!phydev) { |
1599 | printk("%s: Could not attach to PHY\n", dev->name); | 1599 | dev_err(&dev->dev, "Could not attach to PHY\n"); |
1600 | return -ENODEV; | 1600 | return -ENODEV; |
1601 | } | 1601 | } |
1602 | 1602 | ||
@@ -3608,9 +3608,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3608 | struct ucc_geth_private *ugeth = NULL; | 3608 | struct ucc_geth_private *ugeth = NULL; |
3609 | struct ucc_geth_info *ug_info; | 3609 | struct ucc_geth_info *ug_info; |
3610 | struct resource res; | 3610 | struct resource res; |
3611 | struct device_node *phy; | ||
3612 | int err, ucc_num, max_speed = 0; | 3611 | int err, ucc_num, max_speed = 0; |
3613 | const u32 *fixed_link; | ||
3614 | const unsigned int *prop; | 3612 | const unsigned int *prop; |
3615 | const char *sprop; | 3613 | const char *sprop; |
3616 | const void *mac_addr; | 3614 | const void *mac_addr; |
@@ -3708,15 +3706,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3708 | 3706 | ||
3709 | ug_info->uf_info.regs = res.start; | 3707 | ug_info->uf_info.regs = res.start; |
3710 | ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); | 3708 | ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); |
3711 | fixed_link = of_get_property(np, "fixed-link", NULL); | 3709 | |
3712 | if (fixed_link) { | 3710 | ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); |
3713 | phy = NULL; | ||
3714 | } else { | ||
3715 | phy = of_parse_phandle(np, "phy-handle", 0); | ||
3716 | if (phy == NULL) | ||
3717 | return -ENODEV; | ||
3718 | } | ||
3719 | ug_info->phy_node = phy; | ||
3720 | 3711 | ||
3721 | /* Find the TBI PHY node. If it's not there, we don't support SGMII */ | 3712 | /* Find the TBI PHY node. If it's not there, we don't support SGMII */ |
3722 | ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); | 3713 | ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
@@ -3725,7 +3716,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3725 | prop = of_get_property(np, "phy-connection-type", NULL); | 3716 | prop = of_get_property(np, "phy-connection-type", NULL); |
3726 | if (!prop) { | 3717 | if (!prop) { |
3727 | /* handle interface property present in old trees */ | 3718 | /* handle interface property present in old trees */ |
3728 | prop = of_get_property(phy, "interface", NULL); | 3719 | prop = of_get_property(ug_info->phy_node, "interface", NULL); |
3729 | if (prop != NULL) { | 3720 | if (prop != NULL) { |
3730 | phy_interface = enet_to_phy_interface[*prop]; | 3721 | phy_interface = enet_to_phy_interface[*prop]; |
3731 | max_speed = enet_to_speed[*prop]; | 3722 | max_speed = enet_to_speed[*prop]; |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index ea045151f953..029c1bc7468f 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -2970,6 +2970,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
2970 | if (modparam_nohwcrypt) | 2970 | if (modparam_nohwcrypt) |
2971 | return -EOPNOTSUPP; | 2971 | return -EOPNOTSUPP; |
2972 | 2972 | ||
2973 | if (sc->opmode == NL80211_IFTYPE_AP) | ||
2974 | return -EOPNOTSUPP; | ||
2975 | |||
2973 | switch (key->alg) { | 2976 | switch (key->alg) { |
2974 | case ALG_WEP: | 2977 | case ALG_WEP: |
2975 | case ALG_TKIP: | 2978 | case ALG_TKIP: |
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 1aeafb511ddd..aad259b4c197 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c | |||
@@ -478,6 +478,18 @@ void ath9k_ani_reset(struct ath_hw *ah) | |||
478 | "Reset ANI state opmode %u\n", ah->opmode); | 478 | "Reset ANI state opmode %u\n", ah->opmode); |
479 | ah->stats.ast_ani_reset++; | 479 | ah->stats.ast_ani_reset++; |
480 | 480 | ||
481 | if (ah->opmode == NL80211_IFTYPE_AP) { | ||
482 | /* | ||
483 | * ath9k_hw_ani_control() will only process items set on | ||
484 | * ah->ani_function | ||
485 | */ | ||
486 | if (IS_CHAN_2GHZ(chan)) | ||
487 | ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | | ||
488 | ATH9K_ANI_FIRSTEP_LEVEL); | ||
489 | else | ||
490 | ah->ani_function = 0; | ||
491 | } | ||
492 | |||
481 | ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); | 493 | ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); |
482 | ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); | 494 | ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); |
483 | ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0); | 495 | ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0); |
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index eef370bd1211..bf3d25ba7be1 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c | |||
@@ -474,6 +474,21 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, | |||
474 | return 0; | 474 | return 0; |
475 | } | 475 | } |
476 | 476 | ||
477 | /* | ||
478 | * Some users have reported their EEPROM programmed with | ||
479 | * 0x8000 set, this is not a supported regulatory domain | ||
480 | * but since we have more than one user with it we need | ||
481 | * a solution for them. We default to 0x64, which is the | ||
482 | * default Atheros world regulatory domain. | ||
483 | */ | ||
484 | static void ath_regd_sanitize(struct ath_regulatory *reg) | ||
485 | { | ||
486 | if (reg->current_rd != COUNTRY_ERD_FLAG) | ||
487 | return; | ||
488 | printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); | ||
489 | reg->current_rd = 0x64; | ||
490 | } | ||
491 | |||
477 | int | 492 | int |
478 | ath_regd_init(struct ath_regulatory *reg, | 493 | ath_regd_init(struct ath_regulatory *reg, |
479 | struct wiphy *wiphy, | 494 | struct wiphy *wiphy, |
@@ -486,6 +501,8 @@ ath_regd_init(struct ath_regulatory *reg, | |||
486 | if (!reg) | 501 | if (!reg) |
487 | return -EINVAL; | 502 | return -EINVAL; |
488 | 503 | ||
504 | ath_regd_sanitize(reg); | ||
505 | |||
489 | printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); | 506 | printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); |
490 | 507 | ||
491 | if (!ath_regd_is_eeprom_valid(reg)) { | 508 | if (!ath_regd_is_eeprom_valid(reg)) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 6d1519e1f011..355f50ea7fef 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -2675,12 +2675,10 @@ static ssize_t show_power_level(struct device *d, | |||
2675 | struct device_attribute *attr, char *buf) | 2675 | struct device_attribute *attr, char *buf) |
2676 | { | 2676 | { |
2677 | struct iwl_priv *priv = dev_get_drvdata(d); | 2677 | struct iwl_priv *priv = dev_get_drvdata(d); |
2678 | int mode = priv->power_data.user_power_setting; | ||
2679 | int level = priv->power_data.power_mode; | 2678 | int level = priv->power_data.power_mode; |
2680 | char *p = buf; | 2679 | char *p = buf; |
2681 | 2680 | ||
2682 | p += sprintf(p, "INDEX:%d\t", level); | 2681 | p += sprintf(p, "%d\n", level); |
2683 | p += sprintf(p, "USER:%d\n", mode); | ||
2684 | return p - buf + 1; | 2682 | return p - buf + 1; |
2685 | } | 2683 | } |
2686 | 2684 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 85ae7a62109c..9bbeec9427f0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -872,7 +872,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
872 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | 872 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); |
873 | 873 | ||
874 | /* Set up entry for this TFD in Tx byte-count array */ | 874 | /* Set up entry for this TFD in Tx byte-count array */ |
875 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | 875 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
876 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | ||
876 | le16_to_cpu(tx_cmd->len)); | 877 | le16_to_cpu(tx_cmd->len)); |
877 | 878 | ||
878 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | 879 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index cb9bd4c8f25e..956798f2c80c 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -3643,12 +3643,10 @@ static ssize_t show_power_level(struct device *d, | |||
3643 | struct device_attribute *attr, char *buf) | 3643 | struct device_attribute *attr, char *buf) |
3644 | { | 3644 | { |
3645 | struct iwl_priv *priv = dev_get_drvdata(d); | 3645 | struct iwl_priv *priv = dev_get_drvdata(d); |
3646 | int mode = priv->power_data.user_power_setting; | ||
3647 | int level = priv->power_data.power_mode; | 3646 | int level = priv->power_data.power_mode; |
3648 | char *p = buf; | 3647 | char *p = buf; |
3649 | 3648 | ||
3650 | p += sprintf(p, "INDEX:%d\t", level); | 3649 | p += sprintf(p, "%d\n", level); |
3651 | p += sprintf(p, "USER:%d\n", mode); | ||
3652 | return p - buf + 1; | 3650 | return p - buf + 1; |
3653 | } | 3651 | } |
3654 | 3652 | ||
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index aaa20c6885c8..aea5ccf24ccf 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c | |||
@@ -151,8 +151,8 @@ void iwm_if_free(struct iwm_priv *iwm) | |||
151 | return; | 151 | return; |
152 | 152 | ||
153 | free_netdev(iwm_to_ndev(iwm)); | 153 | free_netdev(iwm_to_ndev(iwm)); |
154 | iwm_wdev_free(iwm); | ||
155 | iwm_priv_deinit(iwm); | 154 | iwm_priv_deinit(iwm); |
155 | iwm_wdev_free(iwm); | ||
156 | } | 156 | } |
157 | 157 | ||
158 | int iwm_if_add(struct iwm_priv *iwm) | 158 | int iwm_if_add(struct iwm_priv *iwm) |
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 01db705a38ec..685098148e10 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c | |||
@@ -135,8 +135,14 @@ int lbs_update_hw_spec(struct lbs_private *priv) | |||
135 | /* Clamp region code to 8-bit since FW spec indicates that it should | 135 | /* Clamp region code to 8-bit since FW spec indicates that it should |
136 | * only ever be 8-bit, even though the field size is 16-bit. Some firmware | 136 | * only ever be 8-bit, even though the field size is 16-bit. Some firmware |
137 | * returns non-zero high 8 bits here. | 137 | * returns non-zero high 8 bits here. |
138 | * | ||
139 | * Firmware version 4.0.102 used in CF8381 has region code shifted. We | ||
140 | * need to check for this problem and handle it properly. | ||
138 | */ | 141 | */ |
139 | priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; | 142 | if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V4) |
143 | priv->regioncode = (le16_to_cpu(cmd.regioncode) >> 8) & 0xFF; | ||
144 | else | ||
145 | priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; | ||
140 | 146 | ||
141 | for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { | 147 | for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { |
142 | /* use the region code to search for the index */ | 148 | /* use the region code to search for the index */ |
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h index 48da157d6cda..72f3479a4d70 100644 --- a/drivers/net/wireless/libertas/defs.h +++ b/drivers/net/wireless/libertas/defs.h | |||
@@ -234,6 +234,8 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in | |||
234 | /** Mesh enable bit in FW capability */ | 234 | /** Mesh enable bit in FW capability */ |
235 | #define MESH_CAPINFO_ENABLE_MASK (1<<16) | 235 | #define MESH_CAPINFO_ENABLE_MASK (1<<16) |
236 | 236 | ||
237 | /** FW definition from Marvell v4 */ | ||
238 | #define MRVL_FW_V4 (0x04) | ||
237 | /** FW definition from Marvell v5 */ | 239 | /** FW definition from Marvell v5 */ |
238 | #define MRVL_FW_V5 (0x05) | 240 | #define MRVL_FW_V5 (0x05) |
239 | /** FW definition from Marvell v10 */ | 241 | /** FW definition from Marvell v10 */ |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index a111bda392e2..7916ca3f84c8 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -709,7 +709,7 @@ static const struct ieee80211_ops mac80211_hwsim_ops = | |||
709 | static void mac80211_hwsim_free(void) | 709 | static void mac80211_hwsim_free(void) |
710 | { | 710 | { |
711 | struct list_head tmplist, *i, *tmp; | 711 | struct list_head tmplist, *i, *tmp; |
712 | struct mac80211_hwsim_data *data; | 712 | struct mac80211_hwsim_data *data, *tmpdata; |
713 | 713 | ||
714 | INIT_LIST_HEAD(&tmplist); | 714 | INIT_LIST_HEAD(&tmplist); |
715 | 715 | ||
@@ -718,7 +718,7 @@ static void mac80211_hwsim_free(void) | |||
718 | list_move(i, &tmplist); | 718 | list_move(i, &tmplist); |
719 | spin_unlock_bh(&hwsim_radio_lock); | 719 | spin_unlock_bh(&hwsim_radio_lock); |
720 | 720 | ||
721 | list_for_each_entry(data, &tmplist, list) { | 721 | list_for_each_entry_safe(data, tmpdata, &tmplist, list) { |
722 | debugfs_remove(data->debugfs_group); | 722 | debugfs_remove(data->debugfs_group); |
723 | debugfs_remove(data->debugfs_ps); | 723 | debugfs_remove(data->debugfs_ps); |
724 | debugfs_remove(data->debugfs); | 724 | debugfs_remove(data->debugfs); |
@@ -1167,8 +1167,8 @@ static void __exit exit_mac80211_hwsim(void) | |||
1167 | { | 1167 | { |
1168 | printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); | 1168 | printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); |
1169 | 1169 | ||
1170 | unregister_netdev(hwsim_mon); | ||
1171 | mac80211_hwsim_free(); | 1170 | mac80211_hwsim_free(); |
1171 | unregister_netdev(hwsim_mon); | ||
1172 | } | 1172 | } |
1173 | 1173 | ||
1174 | 1174 | ||
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 83116baeb110..72c7dbd39d0a 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c | |||
@@ -635,7 +635,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) | |||
635 | 635 | ||
636 | hw = p54_init_common(sizeof(*priv)); | 636 | hw = p54_init_common(sizeof(*priv)); |
637 | if (!hw) { | 637 | if (!hw) { |
638 | dev_err(&priv->spi->dev, "could not alloc ieee80211_hw"); | 638 | dev_err(&spi->dev, "could not alloc ieee80211_hw"); |
639 | return -ENOMEM; | 639 | return -ENOMEM; |
640 | } | 640 | } |
641 | 641 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 66daf68ff0ee..ce75426764a1 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c | |||
@@ -1550,7 +1550,9 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) | |||
1550 | rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®); | 1550 | rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®); |
1551 | rt2x00_set_chip(rt2x00dev, RT2570, value, reg); | 1551 | rt2x00_set_chip(rt2x00dev, RT2570, value, reg); |
1552 | 1552 | ||
1553 | if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0)) { | 1553 | if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) || |
1554 | rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { | ||
1555 | |||
1554 | ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); | 1556 | ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); |
1555 | return -ENODEV; | 1557 | return -ENODEV; |
1556 | } | 1558 | } |
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c index b44253592243..cf9f899fe0e6 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_leds.c +++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c | |||
@@ -208,11 +208,12 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev) | |||
208 | { | 208 | { |
209 | struct rtl8187_priv *priv = dev->priv; | 209 | struct rtl8187_priv *priv = dev->priv; |
210 | 210 | ||
211 | rtl8187_unregister_led(&priv->led_tx); | ||
212 | /* turn the LED off before exiting */ | 211 | /* turn the LED off before exiting */ |
213 | queue_delayed_work(dev->workqueue, &priv->led_off, 0); | 212 | queue_delayed_work(dev->workqueue, &priv->led_off, 0); |
214 | cancel_delayed_work_sync(&priv->led_off); | 213 | cancel_delayed_work_sync(&priv->led_off); |
214 | cancel_delayed_work_sync(&priv->led_on); | ||
215 | rtl8187_unregister_led(&priv->led_rx); | 215 | rtl8187_unregister_led(&priv->led_rx); |
216 | rtl8187_unregister_led(&priv->led_tx); | ||
216 | } | 217 | } |
217 | #endif /* def CONFIG_RTL8187_LED */ | 218 | #endif /* def CONFIG_RTL8187_LED */ |
218 | 219 | ||
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index aee967d7f760..bacaa536fd51 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
@@ -9,6 +9,10 @@ | |||
9 | * out of the OpenFirmware device tree and using it to populate an mii_bus. | 9 | * out of the OpenFirmware device tree and using it to populate an mii_bus. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/err.h> | ||
12 | #include <linux/phy.h> | 16 | #include <linux/phy.h> |
13 | #include <linux/of.h> | 17 | #include <linux/of.h> |
14 | #include <linux/of_mdio.h> | 18 | #include <linux/of_mdio.h> |
@@ -137,3 +141,41 @@ struct phy_device *of_phy_connect(struct net_device *dev, | |||
137 | return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy; | 141 | return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy; |
138 | } | 142 | } |
139 | EXPORT_SYMBOL(of_phy_connect); | 143 | EXPORT_SYMBOL(of_phy_connect); |
144 | |||
145 | /** | ||
146 | * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy | ||
147 | * @dev: pointer to net_device claiming the phy | ||
148 | * @hndlr: Link state callback for the network device | ||
149 | * @iface: PHY data interface type | ||
150 | * | ||
151 | * This function is a temporary stop-gap and will be removed soon. It is | ||
152 | * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers. Do | ||
153 | * not call this function from new drivers. | ||
154 | */ | ||
155 | struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | ||
156 | void (*hndlr)(struct net_device *), | ||
157 | phy_interface_t iface) | ||
158 | { | ||
159 | struct device_node *net_np; | ||
160 | char bus_id[MII_BUS_ID_SIZE + 3]; | ||
161 | struct phy_device *phy; | ||
162 | const u32 *phy_id; | ||
163 | int sz; | ||
164 | |||
165 | if (!dev->dev.parent) | ||
166 | return NULL; | ||
167 | |||
168 | net_np = dev_archdata_get_node(&dev->dev.parent->archdata); | ||
169 | if (!net_np) | ||
170 | return NULL; | ||
171 | |||
172 | phy_id = of_get_property(net_np, "fixed-link", &sz); | ||
173 | if (!phy_id || sz < sizeof(*phy_id)) | ||
174 | return NULL; | ||
175 | |||
176 | sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]); | ||
177 | |||
178 | phy = phy_connect(dev, bus_id, hndlr, 0, iface); | ||
179 | return IS_ERR(phy) ? NULL : phy; | ||
180 | } | ||
181 | EXPORT_SYMBOL(of_phy_connect_fixed_link); | ||
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index be2fd6f91639..fb45f5ee8df1 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -973,7 +973,7 @@ static int acer_rfkill_set(void *data, bool blocked) | |||
973 | { | 973 | { |
974 | acpi_status status; | 974 | acpi_status status; |
975 | u32 cap = (unsigned long)data; | 975 | u32 cap = (unsigned long)data; |
976 | status = set_u32(!!blocked, cap); | 976 | status = set_u32(!blocked, cap); |
977 | if (ACPI_FAILURE(status)) | 977 | if (ACPI_FAILURE(status)) |
978 | return -ENODEV; | 978 | return -ENODEV; |
979 | return 0; | 979 | return 0; |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 727a809636d8..ed3dcdea7fe1 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -1145,12 +1145,17 @@ ap_config_timeout(unsigned long ptr) | |||
1145 | */ | 1145 | */ |
1146 | static inline void ap_schedule_poll_timer(void) | 1146 | static inline void ap_schedule_poll_timer(void) |
1147 | { | 1147 | { |
1148 | ktime_t hr_time; | ||
1148 | if (ap_using_interrupts() || ap_suspend_flag) | 1149 | if (ap_using_interrupts() || ap_suspend_flag) |
1149 | return; | 1150 | return; |
1150 | if (hrtimer_is_queued(&ap_poll_timer)) | 1151 | if (hrtimer_is_queued(&ap_poll_timer)) |
1151 | return; | 1152 | return; |
1152 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), | 1153 | if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { |
1153 | HRTIMER_MODE_ABS); | 1154 | hr_time = ktime_set(0, poll_timeout); |
1155 | hrtimer_forward_now(&ap_poll_timer, hr_time); | ||
1156 | hrtimer_restart(&ap_poll_timer); | ||
1157 | } | ||
1158 | return; | ||
1154 | } | 1159 | } |
1155 | 1160 | ||
1156 | /** | 1161 | /** |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 348bf61a8fec..975ecddbce30 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
@@ -103,8 +103,6 @@ source "drivers/staging/pohmelfs/Kconfig" | |||
103 | 103 | ||
104 | source "drivers/staging/stlc45xx/Kconfig" | 104 | source "drivers/staging/stlc45xx/Kconfig" |
105 | 105 | ||
106 | source "drivers/staging/uc2322/Kconfig" | ||
107 | |||
108 | source "drivers/staging/b3dfg/Kconfig" | 106 | source "drivers/staging/b3dfg/Kconfig" |
109 | 107 | ||
110 | source "drivers/staging/phison/Kconfig" | 108 | source "drivers/staging/phison/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 8d61d7b4debf..2241ae1b21ee 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
@@ -34,7 +34,6 @@ obj-$(CONFIG_ANDROID) += android/ | |||
34 | obj-$(CONFIG_DST) += dst/ | 34 | obj-$(CONFIG_DST) += dst/ |
35 | obj-$(CONFIG_POHMELFS) += pohmelfs/ | 35 | obj-$(CONFIG_POHMELFS) += pohmelfs/ |
36 | obj-$(CONFIG_STLC45XX) += stlc45xx/ | 36 | obj-$(CONFIG_STLC45XX) += stlc45xx/ |
37 | obj-$(CONFIG_USB_SERIAL_ATEN2011) += uc2322/ | ||
38 | obj-$(CONFIG_B3DFG) += b3dfg/ | 37 | obj-$(CONFIG_B3DFG) += b3dfg/ |
39 | obj-$(CONFIG_IDE_PHISON) += phison/ | 38 | obj-$(CONFIG_IDE_PHISON) += phison/ |
40 | obj-$(CONFIG_PLAN9AUTH) += p9auth/ | 39 | obj-$(CONFIG_PLAN9AUTH) += p9auth/ |
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index fe72240f5a9e..f934393f3959 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c | |||
@@ -96,19 +96,21 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
96 | 96 | ||
97 | read_lock(&tasklist_lock); | 97 | read_lock(&tasklist_lock); |
98 | for_each_process(p) { | 98 | for_each_process(p) { |
99 | struct mm_struct *mm; | ||
99 | int oom_adj; | 100 | int oom_adj; |
100 | 101 | ||
101 | task_lock(p); | 102 | task_lock(p); |
102 | if (!p->mm) { | 103 | mm = p->mm; |
104 | if (!mm) { | ||
103 | task_unlock(p); | 105 | task_unlock(p); |
104 | continue; | 106 | continue; |
105 | } | 107 | } |
106 | oom_adj = p->oomkilladj; | 108 | oom_adj = mm->oom_adj; |
107 | if (oom_adj < min_adj) { | 109 | if (oom_adj < min_adj) { |
108 | task_unlock(p); | 110 | task_unlock(p); |
109 | continue; | 111 | continue; |
110 | } | 112 | } |
111 | tasksize = get_mm_rss(p->mm); | 113 | tasksize = get_mm_rss(mm); |
112 | task_unlock(p); | 114 | task_unlock(p); |
113 | if (tasksize <= 0) | 115 | if (tasksize <= 0) |
114 | continue; | 116 | continue; |
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c index a9bd4106beb7..0fdf8c6dc648 100644 --- a/drivers/staging/serqt_usb2/serqt_usb2.c +++ b/drivers/staging/serqt_usb2/serqt_usb2.c | |||
@@ -360,18 +360,18 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
360 | if (port_paranoia_check(port, __func__) != 0) { | 360 | if (port_paranoia_check(port, __func__) != 0) { |
361 | dbg("%s - port_paranoia_check, exiting\n", __func__); | 361 | dbg("%s - port_paranoia_check, exiting\n", __func__); |
362 | qt_port->ReadBulkStopped = 1; | 362 | qt_port->ReadBulkStopped = 1; |
363 | return; | 363 | goto exit; |
364 | } | 364 | } |
365 | 365 | ||
366 | if (!serial) { | 366 | if (!serial) { |
367 | dbg("%s - bad serial pointer, exiting\n", __func__); | 367 | dbg("%s - bad serial pointer, exiting\n", __func__); |
368 | return; | 368 | goto exit; |
369 | } | 369 | } |
370 | if (qt_port->closePending == 1) { | 370 | if (qt_port->closePending == 1) { |
371 | /* Were closing , stop reading */ | 371 | /* Were closing , stop reading */ |
372 | dbg("%s - (qt_port->closepending == 1\n", __func__); | 372 | dbg("%s - (qt_port->closepending == 1\n", __func__); |
373 | qt_port->ReadBulkStopped = 1; | 373 | qt_port->ReadBulkStopped = 1; |
374 | return; | 374 | goto exit; |
375 | } | 375 | } |
376 | 376 | ||
377 | /* | 377 | /* |
@@ -381,7 +381,7 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
381 | */ | 381 | */ |
382 | if (qt_port->RxHolding == 1) { | 382 | if (qt_port->RxHolding == 1) { |
383 | qt_port->ReadBulkStopped = 1; | 383 | qt_port->ReadBulkStopped = 1; |
384 | return; | 384 | goto exit; |
385 | } | 385 | } |
386 | 386 | ||
387 | if (urb->status) { | 387 | if (urb->status) { |
@@ -389,7 +389,7 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
389 | 389 | ||
390 | dbg("%s - nonzero read bulk status received: %d\n", | 390 | dbg("%s - nonzero read bulk status received: %d\n", |
391 | __func__, urb->status); | 391 | __func__, urb->status); |
392 | return; | 392 | goto exit; |
393 | } | 393 | } |
394 | 394 | ||
395 | if (tty && RxCount) { | 395 | if (tty && RxCount) { |
@@ -463,6 +463,8 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
463 | } | 463 | } |
464 | 464 | ||
465 | schedule_work(&port->work); | 465 | schedule_work(&port->work); |
466 | exit: | ||
467 | tty_kref_put(tty); | ||
466 | } | 468 | } |
467 | 469 | ||
468 | /* | 470 | /* |
@@ -736,6 +738,11 @@ static int qt_startup(struct usb_serial *serial) | |||
736 | if (!qt_port) { | 738 | if (!qt_port) { |
737 | dbg("%s: kmalloc for quatech_port (%d) failed!.", | 739 | dbg("%s: kmalloc for quatech_port (%d) failed!.", |
738 | __func__, i); | 740 | __func__, i); |
741 | for(--i; i >= 0; i--) { | ||
742 | port = serial->port[i]; | ||
743 | kfree(usb_get_serial_port_data(port)); | ||
744 | usb_set_serial_port_data(port, NULL); | ||
745 | } | ||
739 | return -ENOMEM; | 746 | return -ENOMEM; |
740 | } | 747 | } |
741 | spin_lock_init(&qt_port->lock); | 748 | spin_lock_init(&qt_port->lock); |
@@ -1041,7 +1048,7 @@ static void qt_block_until_empty(struct tty_struct *tty, | |||
1041 | } | 1048 | } |
1042 | } | 1049 | } |
1043 | 1050 | ||
1044 | static void qt_close( struct usb_serial_port *port) | 1051 | static void qt_close(struct usb_serial_port *port) |
1045 | { | 1052 | { |
1046 | struct usb_serial *serial = port->serial; | 1053 | struct usb_serial *serial = port->serial; |
1047 | struct quatech_port *qt_port; | 1054 | struct quatech_port *qt_port; |
@@ -1068,6 +1075,7 @@ static void qt_close( struct usb_serial_port *port) | |||
1068 | /* wait up to for transmitter to empty */ | 1075 | /* wait up to for transmitter to empty */ |
1069 | if (serial->dev) | 1076 | if (serial->dev) |
1070 | qt_block_until_empty(tty, qt_port); | 1077 | qt_block_until_empty(tty, qt_port); |
1078 | tty_kref_put(tty); | ||
1071 | 1079 | ||
1072 | /* Close uart channel */ | 1080 | /* Close uart channel */ |
1073 | status = qt_close_channel(serial, index); | 1081 | status = qt_close_channel(serial, index); |
diff --git a/drivers/staging/uc2322/Kconfig b/drivers/staging/uc2322/Kconfig deleted file mode 100644 index 2e0c6e79df2b..000000000000 --- a/drivers/staging/uc2322/Kconfig +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | config USB_SERIAL_ATEN2011 | ||
2 | tristate "ATEN 2011 USB to serial device support" | ||
3 | depends on USB_SERIAL | ||
4 | default N | ||
5 | ---help--- | ||
6 | Say Y here if you want to use a ATEN 2011 dual port USB to serial | ||
7 | adapter. | ||
8 | |||
9 | To compile this driver as a module, choose M here: the module will be | ||
10 | called aten2011. | ||
diff --git a/drivers/staging/uc2322/Makefile b/drivers/staging/uc2322/Makefile deleted file mode 100644 index 49c18d6e579f..000000000000 --- a/drivers/staging/uc2322/Makefile +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | obj-$(CONFIG_USB_SERIAL_ATEN2011) += aten2011.o | ||
diff --git a/drivers/staging/uc2322/TODO b/drivers/staging/uc2322/TODO deleted file mode 100644 index c189a64c4185..000000000000 --- a/drivers/staging/uc2322/TODO +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | TODO: | ||
2 | - checkpatch.pl cleanups | ||
3 | - remove dead and useless code (auditing the tty ioctls to | ||
4 | verify that they really are correct and needed.) | ||
5 | |||
6 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and | ||
7 | Russell Lang <gsview@ghostgum.com.au>. | ||
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c deleted file mode 100644 index 39d0926d1a90..000000000000 --- a/drivers/staging/uc2322/aten2011.c +++ /dev/null | |||
@@ -1,2430 +0,0 @@ | |||
1 | /* | ||
2 | * Aten 2011 USB serial driver for 4 port devices | ||
3 | * | ||
4 | * Copyright (C) 2000 Inside Out Networks | ||
5 | * Copyright (C) 2001-2002, 2009 Greg Kroah-Hartman <greg@kroah.com> | ||
6 | * Copyright (C) 2009 Novell Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/tty.h> | ||
20 | #include <linux/tty_driver.h> | ||
21 | #include <linux/tty_flip.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/serial.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <linux/usb.h> | ||
26 | #include <linux/usb/serial.h> | ||
27 | |||
28 | |||
29 | #define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */ | ||
30 | #define ZLP_REG2 0x3B /* Zero_Flag_Reg2 59 */ | ||
31 | #define ZLP_REG3 0x3C /* Zero_Flag_Reg3 60 */ | ||
32 | #define ZLP_REG4 0x3D /* Zero_Flag_Reg4 61 */ | ||
33 | #define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */ | ||
34 | |||
35 | /* Interrupt Rotinue Defines */ | ||
36 | #define SERIAL_IIR_RLS 0x06 | ||
37 | #define SERIAL_IIR_RDA 0x04 | ||
38 | #define SERIAL_IIR_CTI 0x0c | ||
39 | #define SERIAL_IIR_THR 0x02 | ||
40 | #define SERIAL_IIR_MS 0x00 | ||
41 | |||
42 | /* Emulation of the bit mask on the LINE STATUS REGISTER. */ | ||
43 | #define SERIAL_LSR_DR 0x0001 | ||
44 | #define SERIAL_LSR_OE 0x0002 | ||
45 | #define SERIAL_LSR_PE 0x0004 | ||
46 | #define SERIAL_LSR_FE 0x0008 | ||
47 | #define SERIAL_LSR_BI 0x0010 | ||
48 | #define SERIAL_LSR_THRE 0x0020 | ||
49 | #define SERIAL_LSR_TEMT 0x0040 | ||
50 | #define SERIAL_LSR_FIFOERR 0x0080 | ||
51 | |||
52 | /* MSR bit defines(place holders) */ | ||
53 | #define ATEN_MSR_DELTA_CTS 0x10 | ||
54 | #define ATEN_MSR_DELTA_DSR 0x20 | ||
55 | #define ATEN_MSR_DELTA_RI 0x40 | ||
56 | #define ATEN_MSR_DELTA_CD 0x80 | ||
57 | |||
58 | /* Serial Port register Address */ | ||
59 | #define RECEIVE_BUFFER_REGISTER ((__u16)(0x00)) | ||
60 | #define TRANSMIT_HOLDING_REGISTER ((__u16)(0x00)) | ||
61 | #define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01)) | ||
62 | #define INTERRUPT_IDENT_REGISTER ((__u16)(0x02)) | ||
63 | #define FIFO_CONTROL_REGISTER ((__u16)(0x02)) | ||
64 | #define LINE_CONTROL_REGISTER ((__u16)(0x03)) | ||
65 | #define MODEM_CONTROL_REGISTER ((__u16)(0x04)) | ||
66 | #define LINE_STATUS_REGISTER ((__u16)(0x05)) | ||
67 | #define MODEM_STATUS_REGISTER ((__u16)(0x06)) | ||
68 | #define SCRATCH_PAD_REGISTER ((__u16)(0x07)) | ||
69 | #define DIVISOR_LATCH_LSB ((__u16)(0x00)) | ||
70 | #define DIVISOR_LATCH_MSB ((__u16)(0x01)) | ||
71 | |||
72 | #define SP1_REGISTER ((__u16)(0x00)) | ||
73 | #define CONTROL1_REGISTER ((__u16)(0x01)) | ||
74 | #define CLK_MULTI_REGISTER ((__u16)(0x02)) | ||
75 | #define CLK_START_VALUE_REGISTER ((__u16)(0x03)) | ||
76 | #define DCR1_REGISTER ((__u16)(0x04)) | ||
77 | #define GPIO_REGISTER ((__u16)(0x07)) | ||
78 | |||
79 | #define SERIAL_LCR_DLAB ((__u16)(0x0080)) | ||
80 | |||
81 | /* | ||
82 | * URB POOL related defines | ||
83 | */ | ||
84 | #define NUM_URBS 16 /* URB Count */ | ||
85 | #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ | ||
86 | |||
87 | #define USB_VENDOR_ID_ATENINTL 0x0557 | ||
88 | #define ATENINTL_DEVICE_ID_2011 0x2011 | ||
89 | #define ATENINTL_DEVICE_ID_7820 0x7820 | ||
90 | |||
91 | static struct usb_device_id id_table[] = { | ||
92 | { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_2011) }, | ||
93 | { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_7820) }, | ||
94 | { } /* terminating entry */ | ||
95 | }; | ||
96 | MODULE_DEVICE_TABLE(usb, id_table); | ||
97 | |||
98 | /* This structure holds all of the local port information */ | ||
99 | struct ATENINTL_port { | ||
100 | int port_num; /*Actual port number in the device(1,2,etc)*/ | ||
101 | __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ | ||
102 | unsigned char *bulk_out_buffer; /* buffer used for the bulk out endpoint */ | ||
103 | struct urb *write_urb; /* write URB for this port */ | ||
104 | __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ | ||
105 | unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ | ||
106 | struct urb *read_urb; /* read URB for this port */ | ||
107 | __u8 shadowLCR; /* last LCR value received */ | ||
108 | __u8 shadowMCR; /* last MCR value received */ | ||
109 | char open; | ||
110 | char chaseResponsePending; | ||
111 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ | ||
112 | wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ | ||
113 | struct async_icount icount; | ||
114 | struct usb_serial_port *port; /* loop back to the owner of this object */ | ||
115 | /*Offsets*/ | ||
116 | __u8 SpRegOffset; | ||
117 | __u8 ControlRegOffset; | ||
118 | __u8 DcrRegOffset; | ||
119 | /* for processing control URBS in interrupt context */ | ||
120 | struct urb *control_urb; | ||
121 | char *ctrl_buf; | ||
122 | int MsrLsr; | ||
123 | |||
124 | struct urb *write_urb_pool[NUM_URBS]; | ||
125 | /* we pass a pointer to this as the arguement sent to cypress_set_termios old_termios */ | ||
126 | struct ktermios tmp_termios; /* stores the old termios settings */ | ||
127 | spinlock_t lock; /* private lock */ | ||
128 | }; | ||
129 | |||
130 | /* This structure holds all of the individual serial device information */ | ||
131 | struct ATENINTL_serial { | ||
132 | __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */ | ||
133 | unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */ | ||
134 | struct urb *interrupt_read_urb; /* our interrupt urb */ | ||
135 | __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ | ||
136 | unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ | ||
137 | struct urb *read_urb; /* our bulk read urb */ | ||
138 | __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ | ||
139 | struct usb_serial *serial; /* loop back to the owner of this object */ | ||
140 | int ATEN2011_spectrum_2or4ports; /* this says the number of ports in the device */ | ||
141 | /* Indicates about the no.of opened ports of an individual USB-serial adapater. */ | ||
142 | unsigned int NoOfOpenPorts; | ||
143 | /* a flag for Status endpoint polling */ | ||
144 | unsigned char status_polling_started; | ||
145 | }; | ||
146 | |||
147 | static void ATEN2011_set_termios(struct tty_struct *tty, | ||
148 | struct usb_serial_port *port, | ||
149 | struct ktermios *old_termios); | ||
150 | static void ATEN2011_change_port_settings(struct tty_struct *tty, | ||
151 | struct ATENINTL_port *ATEN2011_port, | ||
152 | struct ktermios *old_termios); | ||
153 | |||
154 | /************************************* | ||
155 | * Bit definitions for each register * | ||
156 | *************************************/ | ||
157 | #define LCR_BITS_5 0x00 /* 5 bits/char */ | ||
158 | #define LCR_BITS_6 0x01 /* 6 bits/char */ | ||
159 | #define LCR_BITS_7 0x02 /* 7 bits/char */ | ||
160 | #define LCR_BITS_8 0x03 /* 8 bits/char */ | ||
161 | #define LCR_BITS_MASK 0x03 /* Mask for bits/char field */ | ||
162 | |||
163 | #define LCR_STOP_1 0x00 /* 1 stop bit */ | ||
164 | #define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */ | ||
165 | #define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */ | ||
166 | #define LCR_STOP_MASK 0x04 /* Mask for stop bits field */ | ||
167 | |||
168 | #define LCR_PAR_NONE 0x00 /* No parity */ | ||
169 | #define LCR_PAR_ODD 0x08 /* Odd parity */ | ||
170 | #define LCR_PAR_EVEN 0x18 /* Even parity */ | ||
171 | #define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */ | ||
172 | #define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */ | ||
173 | #define LCR_PAR_MASK 0x38 /* Mask for parity field */ | ||
174 | |||
175 | #define LCR_SET_BREAK 0x40 /* Set Break condition */ | ||
176 | #define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */ | ||
177 | |||
178 | #define MCR_DTR 0x01 /* Assert DTR */ | ||
179 | #define MCR_RTS 0x02 /* Assert RTS */ | ||
180 | #define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */ | ||
181 | #define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */ | ||
182 | #define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */ | ||
183 | #define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */ | ||
184 | |||
185 | #define ATEN2011_MSR_CTS 0x10 /* Current state of CTS */ | ||
186 | #define ATEN2011_MSR_DSR 0x20 /* Current state of DSR */ | ||
187 | #define ATEN2011_MSR_RI 0x40 /* Current state of RI */ | ||
188 | #define ATEN2011_MSR_CD 0x80 /* Current state of CD */ | ||
189 | |||
190 | |||
191 | static int debug; | ||
192 | |||
193 | /* | ||
194 | * Version Information | ||
195 | */ | ||
196 | #define DRIVER_VERSION "2.0" | ||
197 | #define DRIVER_DESC "ATENINTL 2011 USB Serial Adapter" | ||
198 | |||
199 | /* | ||
200 | * Defines used for sending commands to port | ||
201 | */ | ||
202 | |||
203 | #define ATEN_WDR_TIMEOUT (50) /* default urb timeout */ | ||
204 | |||
205 | /* Requests */ | ||
206 | #define ATEN_RD_RTYPE 0xC0 | ||
207 | #define ATEN_WR_RTYPE 0x40 | ||
208 | #define ATEN_RDREQ 0x0D | ||
209 | #define ATEN_WRREQ 0x0E | ||
210 | #define ATEN_CTRL_TIMEOUT 500 | ||
211 | #define VENDOR_READ_LENGTH (0x01) | ||
212 | |||
213 | /* set to 1 for RS485 mode and 0 for RS232 mode */ | ||
214 | /* FIXME make this somehow dynamic and not build time specific */ | ||
215 | static int RS485mode; | ||
216 | |||
217 | static int set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val) | ||
218 | { | ||
219 | struct usb_device *dev = port->serial->dev; | ||
220 | val = val & 0x00ff; | ||
221 | |||
222 | dbg("%s: is %x, value %x", __func__, reg, val); | ||
223 | |||
224 | return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ, | ||
225 | ATEN_WR_RTYPE, val, reg, NULL, 0, | ||
226 | ATEN_WDR_TIMEOUT); | ||
227 | } | ||
228 | |||
229 | static int get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val) | ||
230 | { | ||
231 | struct usb_device *dev = port->serial->dev; | ||
232 | int ret; | ||
233 | |||
234 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ, | ||
235 | ATEN_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH, | ||
236 | ATEN_WDR_TIMEOUT); | ||
237 | dbg("%s: offset is %x, return val %x", __func__, reg, *val); | ||
238 | *val = (*val) & 0x00ff; | ||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static int set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val) | ||
243 | { | ||
244 | struct usb_device *dev = port->serial->dev; | ||
245 | struct ATENINTL_serial *a_serial; | ||
246 | __u16 minor; | ||
247 | |||
248 | a_serial = usb_get_serial_data(port->serial); | ||
249 | minor = port->serial->minor; | ||
250 | if (minor == SERIAL_TTY_NO_MINOR) | ||
251 | minor = 0; | ||
252 | val = val & 0x00ff; | ||
253 | |||
254 | /* | ||
255 | * For the UART control registers, | ||
256 | * the application number need to be Or'ed | ||
257 | */ | ||
258 | if (a_serial->ATEN2011_spectrum_2or4ports == 4) | ||
259 | val |= (((__u16)port->number - minor) + 1) << 8; | ||
260 | else { | ||
261 | if (((__u16) port->number - minor) == 0) | ||
262 | val |= (((__u16)port->number - minor) + 1) << 8; | ||
263 | else | ||
264 | val |= (((__u16)port->number - minor) + 2) << 8; | ||
265 | } | ||
266 | dbg("%s: application number is %x", __func__, val); | ||
267 | |||
268 | return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ, | ||
269 | ATEN_WR_RTYPE, val, reg, NULL, 0, | ||
270 | ATEN_WDR_TIMEOUT); | ||
271 | } | ||
272 | |||
273 | static int get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val) | ||
274 | { | ||
275 | struct usb_device *dev = port->serial->dev; | ||
276 | int ret = 0; | ||
277 | __u16 wval; | ||
278 | struct ATENINTL_serial *a_serial; | ||
279 | __u16 minor = port->serial->minor; | ||
280 | |||
281 | a_serial = usb_get_serial_data(port->serial); | ||
282 | if (minor == SERIAL_TTY_NO_MINOR) | ||
283 | minor = 0; | ||
284 | |||
285 | /* wval is same as application number */ | ||
286 | if (a_serial->ATEN2011_spectrum_2or4ports == 4) | ||
287 | wval = (((__u16)port->number - minor) + 1) << 8; | ||
288 | else { | ||
289 | if (((__u16) port->number - minor) == 0) | ||
290 | wval = (((__u16) port->number - minor) + 1) << 8; | ||
291 | else | ||
292 | wval = (((__u16) port->number - minor) + 2) << 8; | ||
293 | } | ||
294 | dbg("%s: application number is %x", __func__, wval); | ||
295 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ, | ||
296 | ATEN_RD_RTYPE, wval, reg, val, VENDOR_READ_LENGTH, | ||
297 | ATEN_WDR_TIMEOUT); | ||
298 | *val = (*val) & 0x00ff; | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | static int handle_newMsr(struct ATENINTL_port *port, __u8 newMsr) | ||
303 | { | ||
304 | struct ATENINTL_port *ATEN2011_port; | ||
305 | struct async_icount *icount; | ||
306 | ATEN2011_port = port; | ||
307 | icount = &ATEN2011_port->icount; | ||
308 | if (newMsr & | ||
309 | (ATEN_MSR_DELTA_CTS | ATEN_MSR_DELTA_DSR | ATEN_MSR_DELTA_RI | | ||
310 | ATEN_MSR_DELTA_CD)) { | ||
311 | icount = &ATEN2011_port->icount; | ||
312 | |||
313 | /* update input line counters */ | ||
314 | if (newMsr & ATEN_MSR_DELTA_CTS) | ||
315 | icount->cts++; | ||
316 | if (newMsr & ATEN_MSR_DELTA_DSR) | ||
317 | icount->dsr++; | ||
318 | if (newMsr & ATEN_MSR_DELTA_CD) | ||
319 | icount->dcd++; | ||
320 | if (newMsr & ATEN_MSR_DELTA_RI) | ||
321 | icount->rng++; | ||
322 | } | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static int handle_newLsr(struct ATENINTL_port *port, __u8 newLsr) | ||
328 | { | ||
329 | struct async_icount *icount; | ||
330 | |||
331 | dbg("%s - %02x", __func__, newLsr); | ||
332 | |||
333 | if (newLsr & SERIAL_LSR_BI) { | ||
334 | /* | ||
335 | * Parity and Framing errors only count if they occur exclusive | ||
336 | * of a break being received. | ||
337 | */ | ||
338 | newLsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI); | ||
339 | } | ||
340 | |||
341 | /* update input line counters */ | ||
342 | icount = &port->icount; | ||
343 | if (newLsr & SERIAL_LSR_BI) | ||
344 | icount->brk++; | ||
345 | if (newLsr & SERIAL_LSR_OE) | ||
346 | icount->overrun++; | ||
347 | if (newLsr & SERIAL_LSR_PE) | ||
348 | icount->parity++; | ||
349 | if (newLsr & SERIAL_LSR_FE) | ||
350 | icount->frame++; | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static void ATEN2011_control_callback(struct urb *urb) | ||
356 | { | ||
357 | unsigned char *data; | ||
358 | struct ATENINTL_port *ATEN2011_port; | ||
359 | __u8 regval = 0x0; | ||
360 | |||
361 | switch (urb->status) { | ||
362 | case 0: | ||
363 | /* success */ | ||
364 | break; | ||
365 | case -ECONNRESET: | ||
366 | case -ENOENT: | ||
367 | case -ESHUTDOWN: | ||
368 | /* this urb is terminated, clean up */ | ||
369 | dbg("%s - urb shutting down with status: %d", __func__, | ||
370 | urb->status); | ||
371 | return; | ||
372 | default: | ||
373 | dbg("%s - nonzero urb status received: %d", __func__, | ||
374 | urb->status); | ||
375 | goto exit; | ||
376 | } | ||
377 | |||
378 | ATEN2011_port = (struct ATENINTL_port *)urb->context; | ||
379 | |||
380 | dbg("%s urb buffer size is %d", __func__, urb->actual_length); | ||
381 | dbg("%s ATEN2011_port->MsrLsr is %d port %d", __func__, | ||
382 | ATEN2011_port->MsrLsr, ATEN2011_port->port_num); | ||
383 | data = urb->transfer_buffer; | ||
384 | regval = (__u8) data[0]; | ||
385 | dbg("%s data is %x", __func__, regval); | ||
386 | if (ATEN2011_port->MsrLsr == 0) | ||
387 | handle_newMsr(ATEN2011_port, regval); | ||
388 | else if (ATEN2011_port->MsrLsr == 1) | ||
389 | handle_newLsr(ATEN2011_port, regval); | ||
390 | |||
391 | exit: | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | static int ATEN2011_get_reg(struct ATENINTL_port *ATEN, __u16 Wval, __u16 reg, | ||
396 | __u16 *val) | ||
397 | { | ||
398 | struct usb_device *dev = ATEN->port->serial->dev; | ||
399 | struct usb_ctrlrequest *dr = NULL; | ||
400 | unsigned char *buffer = NULL; | ||
401 | int ret = 0; | ||
402 | buffer = (__u8 *) ATEN->ctrl_buf; | ||
403 | |||
404 | dr = (void *)(buffer + 2); | ||
405 | dr->bRequestType = ATEN_RD_RTYPE; | ||
406 | dr->bRequest = ATEN_RDREQ; | ||
407 | dr->wValue = cpu_to_le16(Wval); | ||
408 | dr->wIndex = cpu_to_le16(reg); | ||
409 | dr->wLength = cpu_to_le16(2); | ||
410 | |||
411 | usb_fill_control_urb(ATEN->control_urb, dev, usb_rcvctrlpipe(dev, 0), | ||
412 | (unsigned char *)dr, buffer, 2, | ||
413 | ATEN2011_control_callback, ATEN); | ||
414 | ATEN->control_urb->transfer_buffer_length = 2; | ||
415 | ret = usb_submit_urb(ATEN->control_urb, GFP_ATOMIC); | ||
416 | return ret; | ||
417 | } | ||
418 | |||
419 | static void ATEN2011_interrupt_callback(struct urb *urb) | ||
420 | { | ||
421 | int result; | ||
422 | int length; | ||
423 | struct ATENINTL_port *ATEN2011_port; | ||
424 | struct ATENINTL_serial *ATEN2011_serial; | ||
425 | struct usb_serial *serial; | ||
426 | __u16 Data; | ||
427 | unsigned char *data; | ||
428 | __u8 sp[5], st; | ||
429 | int i; | ||
430 | __u16 wval; | ||
431 | int minor; | ||
432 | |||
433 | dbg("%s", " : Entering"); | ||
434 | |||
435 | ATEN2011_serial = (struct ATENINTL_serial *)urb->context; | ||
436 | |||
437 | switch (urb->status) { | ||
438 | case 0: | ||
439 | /* success */ | ||
440 | break; | ||
441 | case -ECONNRESET: | ||
442 | case -ENOENT: | ||
443 | case -ESHUTDOWN: | ||
444 | /* this urb is terminated, clean up */ | ||
445 | dbg("%s - urb shutting down with status: %d", __func__, | ||
446 | urb->status); | ||
447 | return; | ||
448 | default: | ||
449 | dbg("%s - nonzero urb status received: %d", __func__, | ||
450 | urb->status); | ||
451 | goto exit; | ||
452 | } | ||
453 | length = urb->actual_length; | ||
454 | data = urb->transfer_buffer; | ||
455 | |||
456 | serial = ATEN2011_serial->serial; | ||
457 | |||
458 | /* ATENINTL get 5 bytes | ||
459 | * Byte 1 IIR Port 1 (port.number is 0) | ||
460 | * Byte 2 IIR Port 2 (port.number is 1) | ||
461 | * Byte 3 IIR Port 3 (port.number is 2) | ||
462 | * Byte 4 IIR Port 4 (port.number is 3) | ||
463 | * Byte 5 FIFO status for both */ | ||
464 | |||
465 | if (length && length > 5) { | ||
466 | dbg("%s", "Wrong data !!!"); | ||
467 | return; | ||
468 | } | ||
469 | |||
470 | /* MATRIX */ | ||
471 | if (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 4) { | ||
472 | sp[0] = (__u8) data[0]; | ||
473 | sp[1] = (__u8) data[1]; | ||
474 | sp[2] = (__u8) data[2]; | ||
475 | sp[3] = (__u8) data[3]; | ||
476 | st = (__u8) data[4]; | ||
477 | } else { | ||
478 | sp[0] = (__u8) data[0]; | ||
479 | sp[1] = (__u8) data[2]; | ||
480 | /* sp[2]=(__u8)data[2]; */ | ||
481 | /* sp[3]=(__u8)data[3]; */ | ||
482 | st = (__u8) data[4]; | ||
483 | |||
484 | } | ||
485 | for (i = 0; i < serial->num_ports; i++) { | ||
486 | ATEN2011_port = usb_get_serial_port_data(serial->port[i]); | ||
487 | minor = serial->minor; | ||
488 | if (minor == SERIAL_TTY_NO_MINOR) | ||
489 | minor = 0; | ||
490 | if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) | ||
491 | && (i != 0)) | ||
492 | wval = | ||
493 | (((__u16) serial->port[i]->number - | ||
494 | (__u16) (minor)) + 2) << 8; | ||
495 | else | ||
496 | wval = | ||
497 | (((__u16) serial->port[i]->number - | ||
498 | (__u16) (minor)) + 1) << 8; | ||
499 | if (ATEN2011_port->open != 0) { | ||
500 | if (sp[i] & 0x01) { | ||
501 | dbg("SP%d No Interrupt !!!", i); | ||
502 | } else { | ||
503 | switch (sp[i] & 0x0f) { | ||
504 | case SERIAL_IIR_RLS: | ||
505 | dbg("Serial Port %d: Receiver status error or address bit detected in 9-bit mode", i); | ||
506 | ATEN2011_port->MsrLsr = 1; | ||
507 | ATEN2011_get_reg(ATEN2011_port, wval, | ||
508 | LINE_STATUS_REGISTER, | ||
509 | &Data); | ||
510 | break; | ||
511 | case SERIAL_IIR_MS: | ||
512 | dbg("Serial Port %d: Modem status change", i); | ||
513 | ATEN2011_port->MsrLsr = 0; | ||
514 | ATEN2011_get_reg(ATEN2011_port, wval, | ||
515 | MODEM_STATUS_REGISTER, | ||
516 | &Data); | ||
517 | break; | ||
518 | } | ||
519 | } | ||
520 | } | ||
521 | |||
522 | } | ||
523 | exit: | ||
524 | if (ATEN2011_serial->status_polling_started == 0) | ||
525 | return; | ||
526 | |||
527 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
528 | if (result) { | ||
529 | dev_err(&urb->dev->dev, | ||
530 | "%s - Error %d submitting interrupt urb\n", | ||
531 | __func__, result); | ||
532 | } | ||
533 | |||
534 | return; | ||
535 | } | ||
536 | |||
537 | static void ATEN2011_bulk_in_callback(struct urb *urb) | ||
538 | { | ||
539 | int status; | ||
540 | unsigned char *data; | ||
541 | struct usb_serial *serial; | ||
542 | struct usb_serial_port *port; | ||
543 | struct ATENINTL_serial *ATEN2011_serial; | ||
544 | struct ATENINTL_port *ATEN2011_port; | ||
545 | struct tty_struct *tty; | ||
546 | |||
547 | if (urb->status) { | ||
548 | dbg("nonzero read bulk status received: %d", urb->status); | ||
549 | return; | ||
550 | } | ||
551 | |||
552 | ATEN2011_port = (struct ATENINTL_port *)urb->context; | ||
553 | |||
554 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
555 | serial = port->serial; | ||
556 | |||
557 | dbg("%s", "Entering..."); | ||
558 | |||
559 | data = urb->transfer_buffer; | ||
560 | ATEN2011_serial = usb_get_serial_data(serial); | ||
561 | |||
562 | if (urb->actual_length) { | ||
563 | tty = tty_port_tty_get(&ATEN2011_port->port->port); | ||
564 | if (tty) { | ||
565 | tty_buffer_request_room(tty, urb->actual_length); | ||
566 | tty_insert_flip_string(tty, data, urb->actual_length); | ||
567 | tty_flip_buffer_push(tty); | ||
568 | tty_kref_put(tty); | ||
569 | } | ||
570 | |||
571 | ATEN2011_port->icount.rx += urb->actual_length; | ||
572 | dbg("ATEN2011_port->icount.rx is %d:", | ||
573 | ATEN2011_port->icount.rx); | ||
574 | } | ||
575 | |||
576 | if (!ATEN2011_port->read_urb) { | ||
577 | dbg("%s", "URB KILLED !!!"); | ||
578 | return; | ||
579 | } | ||
580 | |||
581 | if (ATEN2011_port->read_urb->status != -EINPROGRESS) { | ||
582 | ATEN2011_port->read_urb->dev = serial->dev; | ||
583 | |||
584 | status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); | ||
585 | if (status) | ||
586 | dbg("usb_submit_urb(read bulk) failed, status = %d", status); | ||
587 | } | ||
588 | } | ||
589 | |||
590 | static void ATEN2011_bulk_out_data_callback(struct urb *urb) | ||
591 | { | ||
592 | struct ATENINTL_port *ATEN2011_port; | ||
593 | struct tty_struct *tty; | ||
594 | |||
595 | if (urb->status) { | ||
596 | dbg("nonzero write bulk status received:%d", urb->status); | ||
597 | return; | ||
598 | } | ||
599 | |||
600 | ATEN2011_port = (struct ATENINTL_port *)urb->context; | ||
601 | |||
602 | dbg("%s", "Entering ........."); | ||
603 | |||
604 | tty = tty_port_tty_get(&ATEN2011_port->port->port); | ||
605 | |||
606 | if (tty && ATEN2011_port->open) | ||
607 | /* tell the tty driver that something has changed */ | ||
608 | tty_wakeup(tty); | ||
609 | |||
610 | /* schedule_work(&ATEN2011_port->port->work); */ | ||
611 | tty_kref_put(tty); | ||
612 | |||
613 | } | ||
614 | |||
615 | #ifdef ATENSerialProbe | ||
616 | static int ATEN2011_serial_probe(struct usb_serial *serial, | ||
617 | const struct usb_device_id *id) | ||
618 | { | ||
619 | |||
620 | /*need to implement the mode_reg reading and updating\ | ||
621 | structures usb_serial_ device_type\ | ||
622 | (i.e num_ports, num_bulkin,bulkout etc) */ | ||
623 | /* Also we can update the changes attach */ | ||
624 | return 1; | ||
625 | } | ||
626 | #endif | ||
627 | |||
628 | static int ATEN2011_open(struct tty_struct *tty, struct usb_serial_port *port, | ||
629 | struct file *filp) | ||
630 | { | ||
631 | int response; | ||
632 | int j; | ||
633 | struct usb_serial *serial; | ||
634 | struct urb *urb; | ||
635 | __u16 Data; | ||
636 | int status; | ||
637 | struct ATENINTL_serial *ATEN2011_serial; | ||
638 | struct ATENINTL_port *ATEN2011_port; | ||
639 | struct ktermios tmp_termios; | ||
640 | int minor; | ||
641 | |||
642 | serial = port->serial; | ||
643 | |||
644 | ATEN2011_port = usb_get_serial_port_data(port); | ||
645 | |||
646 | if (ATEN2011_port == NULL) | ||
647 | return -ENODEV; | ||
648 | |||
649 | ATEN2011_serial = usb_get_serial_data(serial); | ||
650 | if (ATEN2011_serial == NULL) | ||
651 | return -ENODEV; | ||
652 | |||
653 | /* increment the number of opened ports counter here */ | ||
654 | ATEN2011_serial->NoOfOpenPorts++; | ||
655 | |||
656 | usb_clear_halt(serial->dev, port->write_urb->pipe); | ||
657 | usb_clear_halt(serial->dev, port->read_urb->pipe); | ||
658 | |||
659 | /* Initialising the write urb pool */ | ||
660 | for (j = 0; j < NUM_URBS; ++j) { | ||
661 | urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
662 | ATEN2011_port->write_urb_pool[j] = urb; | ||
663 | |||
664 | if (urb == NULL) { | ||
665 | err("No more urbs???"); | ||
666 | continue; | ||
667 | } | ||
668 | |||
669 | urb->transfer_buffer = NULL; | ||
670 | urb->transfer_buffer = | ||
671 | kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); | ||
672 | if (!urb->transfer_buffer) { | ||
673 | err("%s-out of memory for urb buffers.", __func__); | ||
674 | continue; | ||
675 | } | ||
676 | } | ||
677 | |||
678 | /***************************************************************************** | ||
679 | * Initialize ATEN2011 -- Write Init values to corresponding Registers | ||
680 | * | ||
681 | * Register Index | ||
682 | * 1 : IER | ||
683 | * 2 : FCR | ||
684 | * 3 : LCR | ||
685 | * 4 : MCR | ||
686 | * | ||
687 | * 0x08 : SP1/2 Control Reg | ||
688 | *****************************************************************************/ | ||
689 | |||
690 | /* NEED to check the fallowing Block */ | ||
691 | |||
692 | Data = 0x0; | ||
693 | status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); | ||
694 | if (status < 0) { | ||
695 | dbg("Reading Spreg failed"); | ||
696 | return -1; | ||
697 | } | ||
698 | Data |= 0x80; | ||
699 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
700 | if (status < 0) { | ||
701 | dbg("writing Spreg failed"); | ||
702 | return -1; | ||
703 | } | ||
704 | |||
705 | Data &= ~0x80; | ||
706 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
707 | if (status < 0) { | ||
708 | dbg("writing Spreg failed"); | ||
709 | return -1; | ||
710 | } | ||
711 | |||
712 | /* End of block to be checked */ | ||
713 | /**************************CHECK***************************/ | ||
714 | |||
715 | if (RS485mode == 0) | ||
716 | Data = 0xC0; | ||
717 | else | ||
718 | Data = 0x00; | ||
719 | status = set_uart_reg(port, SCRATCH_PAD_REGISTER, Data); | ||
720 | if (status < 0) { | ||
721 | dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x", status); | ||
722 | return -1; | ||
723 | } else | ||
724 | dbg("SCRATCH_PAD_REGISTER Writing success status%d", status); | ||
725 | |||
726 | /**************************CHECK***************************/ | ||
727 | |||
728 | Data = 0x0; | ||
729 | status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); | ||
730 | if (status < 0) { | ||
731 | dbg("Reading Controlreg failed"); | ||
732 | return -1; | ||
733 | } | ||
734 | Data |= 0x08; /* Driver done bit */ | ||
735 | Data |= 0x20; /* rx_disable */ | ||
736 | status = 0; | ||
737 | status = | ||
738 | set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); | ||
739 | if (status < 0) { | ||
740 | dbg("writing Controlreg failed"); | ||
741 | return -1; | ||
742 | } | ||
743 | /* | ||
744 | * do register settings here | ||
745 | * Set all regs to the device default values. | ||
746 | * First Disable all interrupts. | ||
747 | */ | ||
748 | |||
749 | Data = 0x00; | ||
750 | status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
751 | if (status < 0) { | ||
752 | dbg("disableing interrupts failed"); | ||
753 | return -1; | ||
754 | } | ||
755 | /* Set FIFO_CONTROL_REGISTER to the default value */ | ||
756 | Data = 0x00; | ||
757 | status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
758 | if (status < 0) { | ||
759 | dbg("Writing FIFO_CONTROL_REGISTER failed"); | ||
760 | return -1; | ||
761 | } | ||
762 | |||
763 | Data = 0xcf; /* chk */ | ||
764 | status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
765 | if (status < 0) { | ||
766 | dbg("Writing FIFO_CONTROL_REGISTER failed"); | ||
767 | return -1; | ||
768 | } | ||
769 | |||
770 | Data = 0x03; /* LCR_BITS_8 */ | ||
771 | status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
772 | ATEN2011_port->shadowLCR = Data; | ||
773 | |||
774 | Data = 0x0b; /* MCR_DTR|MCR_RTS|MCR_MASTER_IE */ | ||
775 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
776 | ATEN2011_port->shadowMCR = Data; | ||
777 | |||
778 | #ifdef Check | ||
779 | Data = 0x00; | ||
780 | status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); | ||
781 | ATEN2011_port->shadowLCR = Data; | ||
782 | |||
783 | Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ | ||
784 | status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
785 | |||
786 | Data = 0x0c; | ||
787 | status = set_uart_reg(port, DIVISOR_LATCH_LSB, Data); | ||
788 | |||
789 | Data = 0x0; | ||
790 | status = set_uart_reg(port, DIVISOR_LATCH_MSB, Data); | ||
791 | |||
792 | Data = 0x00; | ||
793 | status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); | ||
794 | |||
795 | /* Data = ATEN2011_port->shadowLCR; */ /* data latch disable */ | ||
796 | Data = Data & ~SERIAL_LCR_DLAB; | ||
797 | status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
798 | ATEN2011_port->shadowLCR = Data; | ||
799 | #endif | ||
800 | /* clearing Bulkin and Bulkout Fifo */ | ||
801 | Data = 0x0; | ||
802 | status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); | ||
803 | |||
804 | Data = Data | 0x0c; | ||
805 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
806 | |||
807 | Data = Data & ~0x0c; | ||
808 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
809 | /* Finally enable all interrupts */ | ||
810 | Data = 0x0; | ||
811 | Data = 0x0c; | ||
812 | status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
813 | |||
814 | /* clearing rx_disable */ | ||
815 | Data = 0x0; | ||
816 | status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); | ||
817 | Data = Data & ~0x20; | ||
818 | status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); | ||
819 | |||
820 | /* rx_negate */ | ||
821 | Data = 0x0; | ||
822 | status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); | ||
823 | Data = Data | 0x10; | ||
824 | status = 0; | ||
825 | status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); | ||
826 | |||
827 | /* | ||
828 | * Check to see if we've set up our endpoint info yet | ||
829 | * (can't set it up in ATEN2011_startup as the structures | ||
830 | * were not set up at that time.) | ||
831 | */ | ||
832 | if (ATEN2011_serial->NoOfOpenPorts == 1) { | ||
833 | /* start the status polling here */ | ||
834 | ATEN2011_serial->status_polling_started = 1; | ||
835 | /* If not yet set, Set here */ | ||
836 | ATEN2011_serial->interrupt_in_buffer = | ||
837 | serial->port[0]->interrupt_in_buffer; | ||
838 | ATEN2011_serial->interrupt_in_endpoint = | ||
839 | serial->port[0]->interrupt_in_endpointAddress; | ||
840 | ATEN2011_serial->interrupt_read_urb = | ||
841 | serial->port[0]->interrupt_in_urb; | ||
842 | |||
843 | /* set up interrupt urb */ | ||
844 | usb_fill_int_urb(ATEN2011_serial->interrupt_read_urb, | ||
845 | serial->dev, | ||
846 | usb_rcvintpipe(serial->dev, | ||
847 | ATEN2011_serial-> | ||
848 | interrupt_in_endpoint), | ||
849 | ATEN2011_serial->interrupt_in_buffer, | ||
850 | ATEN2011_serial->interrupt_read_urb-> | ||
851 | transfer_buffer_length, | ||
852 | ATEN2011_interrupt_callback, ATEN2011_serial, | ||
853 | ATEN2011_serial->interrupt_read_urb->interval); | ||
854 | |||
855 | /* start interrupt read for ATEN2011 * | ||
856 | * will continue as long as ATEN2011 is connected */ | ||
857 | |||
858 | response = | ||
859 | usb_submit_urb(ATEN2011_serial->interrupt_read_urb, | ||
860 | GFP_KERNEL); | ||
861 | if (response) { | ||
862 | dbg("%s - Error %d submitting interrupt urb", | ||
863 | __func__, response); | ||
864 | } | ||
865 | |||
866 | } | ||
867 | |||
868 | /* | ||
869 | * See if we've set up our endpoint info yet | ||
870 | * (can't set it up in ATEN2011_startup as the | ||
871 | * structures were not set up at that time.) | ||
872 | */ | ||
873 | |||
874 | dbg("port number is %d", port->number); | ||
875 | dbg("serial number is %d", port->serial->minor); | ||
876 | dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress); | ||
877 | dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress); | ||
878 | dbg("Interrupt endpoint is %d", | ||
879 | port->interrupt_in_endpointAddress); | ||
880 | dbg("port's number in the device is %d", ATEN2011_port->port_num); | ||
881 | ATEN2011_port->bulk_in_buffer = port->bulk_in_buffer; | ||
882 | ATEN2011_port->bulk_in_endpoint = port->bulk_in_endpointAddress; | ||
883 | ATEN2011_port->read_urb = port->read_urb; | ||
884 | ATEN2011_port->bulk_out_endpoint = port->bulk_out_endpointAddress; | ||
885 | |||
886 | minor = port->serial->minor; | ||
887 | if (minor == SERIAL_TTY_NO_MINOR) | ||
888 | minor = 0; | ||
889 | |||
890 | /* set up our bulk in urb */ | ||
891 | if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) | ||
892 | && (((__u16) port->number - (__u16) (minor)) != 0)) { | ||
893 | usb_fill_bulk_urb(ATEN2011_port->read_urb, serial->dev, | ||
894 | usb_rcvbulkpipe(serial->dev, | ||
895 | (port-> | ||
896 | bulk_in_endpointAddress + | ||
897 | 2)), port->bulk_in_buffer, | ||
898 | ATEN2011_port->read_urb-> | ||
899 | transfer_buffer_length, | ||
900 | ATEN2011_bulk_in_callback, ATEN2011_port); | ||
901 | } else | ||
902 | usb_fill_bulk_urb(ATEN2011_port->read_urb, | ||
903 | serial->dev, | ||
904 | usb_rcvbulkpipe(serial->dev, | ||
905 | port-> | ||
906 | bulk_in_endpointAddress), | ||
907 | port->bulk_in_buffer, | ||
908 | ATEN2011_port->read_urb-> | ||
909 | transfer_buffer_length, | ||
910 | ATEN2011_bulk_in_callback, ATEN2011_port); | ||
911 | |||
912 | dbg("ATEN2011_open: bulkin endpoint is %d", | ||
913 | port->bulk_in_endpointAddress); | ||
914 | response = usb_submit_urb(ATEN2011_port->read_urb, GFP_KERNEL); | ||
915 | if (response) { | ||
916 | err("%s - Error %d submitting control urb", __func__, | ||
917 | response); | ||
918 | } | ||
919 | |||
920 | /* initialize our wait queues */ | ||
921 | init_waitqueue_head(&ATEN2011_port->wait_chase); | ||
922 | init_waitqueue_head(&ATEN2011_port->wait_command); | ||
923 | |||
924 | /* initialize our icount structure */ | ||
925 | memset(&(ATEN2011_port->icount), 0x00, sizeof(ATEN2011_port->icount)); | ||
926 | |||
927 | /* initialize our port settings */ | ||
928 | ATEN2011_port->shadowMCR = MCR_MASTER_IE; /* Must set to enable ints! */ | ||
929 | ATEN2011_port->chaseResponsePending = 0; | ||
930 | /* send a open port command */ | ||
931 | ATEN2011_port->open = 1; | ||
932 | /* ATEN2011_change_port_settings(ATEN2011_port,old_termios); */ | ||
933 | /* Setup termios */ | ||
934 | ATEN2011_set_termios(tty, port, &tmp_termios); | ||
935 | ATEN2011_port->icount.tx = 0; | ||
936 | ATEN2011_port->icount.rx = 0; | ||
937 | |||
938 | dbg("usb_serial serial:%x ATEN2011_port:%x\nATEN2011_serial:%x usb_serial_port port:%x", | ||
939 | (unsigned int)serial, (unsigned int)ATEN2011_port, | ||
940 | (unsigned int)ATEN2011_serial, (unsigned int)port); | ||
941 | |||
942 | return 0; | ||
943 | |||
944 | } | ||
945 | |||
946 | static int ATEN2011_chars_in_buffer(struct tty_struct *tty) | ||
947 | { | ||
948 | struct usb_serial_port *port = tty->driver_data; | ||
949 | int i; | ||
950 | int chars = 0; | ||
951 | struct ATENINTL_port *ATEN2011_port; | ||
952 | |||
953 | /* dbg("%s"," ATEN2011_chars_in_buffer:entering ..........."); */ | ||
954 | |||
955 | ATEN2011_port = usb_get_serial_port_data(port); | ||
956 | if (ATEN2011_port == NULL) { | ||
957 | dbg("%s", "ATEN2011_break:leaving ..........."); | ||
958 | return -1; | ||
959 | } | ||
960 | |||
961 | for (i = 0; i < NUM_URBS; ++i) | ||
962 | if (ATEN2011_port->write_urb_pool[i]->status == -EINPROGRESS) | ||
963 | chars += URB_TRANSFER_BUFFER_SIZE; | ||
964 | |||
965 | dbg("%s - returns %d", __func__, chars); | ||
966 | return chars; | ||
967 | |||
968 | } | ||
969 | |||
970 | static void ATEN2011_block_until_tx_empty(struct tty_struct *tty, | ||
971 | struct ATENINTL_port *ATEN2011_port) | ||
972 | { | ||
973 | int timeout = HZ / 10; | ||
974 | int wait = 30; | ||
975 | int count; | ||
976 | |||
977 | while (1) { | ||
978 | count = ATEN2011_chars_in_buffer(tty); | ||
979 | |||
980 | /* Check for Buffer status */ | ||
981 | if (count <= 0) | ||
982 | return; | ||
983 | |||
984 | /* Block the thread for a while */ | ||
985 | interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase, | ||
986 | timeout); | ||
987 | |||
988 | /* No activity.. count down section */ | ||
989 | wait--; | ||
990 | if (wait == 0) { | ||
991 | dbg("%s - TIMEOUT", __func__); | ||
992 | return; | ||
993 | } else { | ||
994 | /* Reset timout value back to seconds */ | ||
995 | wait = 30; | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | static void ATEN2011_close(struct tty_struct *tty, struct usb_serial_port *port, | ||
1001 | struct file *filp) | ||
1002 | { | ||
1003 | struct usb_serial *serial; | ||
1004 | struct ATENINTL_serial *ATEN2011_serial; | ||
1005 | struct ATENINTL_port *ATEN2011_port; | ||
1006 | int no_urbs; | ||
1007 | __u16 Data; | ||
1008 | |||
1009 | dbg("%s", "ATEN2011_close:entering..."); | ||
1010 | serial = port->serial; | ||
1011 | |||
1012 | /* take the Adpater and port's private data */ | ||
1013 | ATEN2011_serial = usb_get_serial_data(serial); | ||
1014 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1015 | if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL)) | ||
1016 | return; | ||
1017 | |||
1018 | if (serial->dev) { | ||
1019 | /* flush and block(wait) until tx is empty */ | ||
1020 | ATEN2011_block_until_tx_empty(tty, ATEN2011_port); | ||
1021 | } | ||
1022 | /* kill the ports URB's */ | ||
1023 | for (no_urbs = 0; no_urbs < NUM_URBS; no_urbs++) | ||
1024 | usb_kill_urb(ATEN2011_port->write_urb_pool[no_urbs]); | ||
1025 | /* Freeing Write URBs */ | ||
1026 | for (no_urbs = 0; no_urbs < NUM_URBS; ++no_urbs) { | ||
1027 | kfree(ATEN2011_port->write_urb_pool[no_urbs]->transfer_buffer); | ||
1028 | usb_free_urb(ATEN2011_port->write_urb_pool[no_urbs]); | ||
1029 | } | ||
1030 | /* While closing port, shutdown all bulk read, write * | ||
1031 | * and interrupt read if they exists */ | ||
1032 | if (serial->dev) { | ||
1033 | if (ATEN2011_port->write_urb) { | ||
1034 | dbg("%s", "Shutdown bulk write"); | ||
1035 | usb_kill_urb(ATEN2011_port->write_urb); | ||
1036 | } | ||
1037 | if (ATEN2011_port->read_urb) { | ||
1038 | dbg("%s", "Shutdown bulk read"); | ||
1039 | usb_kill_urb(ATEN2011_port->read_urb); | ||
1040 | } | ||
1041 | if ((&ATEN2011_port->control_urb)) { | ||
1042 | dbg("%s", "Shutdown control read"); | ||
1043 | /* usb_kill_urb (ATEN2011_port->control_urb); */ | ||
1044 | |||
1045 | } | ||
1046 | } | ||
1047 | /* if(ATEN2011_port->ctrl_buf != NULL) */ | ||
1048 | /* kfree(ATEN2011_port->ctrl_buf); */ | ||
1049 | /* decrement the no.of open ports counter of an individual USB-serial adapter. */ | ||
1050 | ATEN2011_serial->NoOfOpenPorts--; | ||
1051 | dbg("NoOfOpenPorts in close%d:in port%d", | ||
1052 | ATEN2011_serial->NoOfOpenPorts, port->number); | ||
1053 | if (ATEN2011_serial->NoOfOpenPorts == 0) { | ||
1054 | /* stop the stus polling here */ | ||
1055 | ATEN2011_serial->status_polling_started = 0; | ||
1056 | if (ATEN2011_serial->interrupt_read_urb) { | ||
1057 | dbg("%s", "Shutdown interrupt_read_urb"); | ||
1058 | /* ATEN2011_serial->interrupt_in_buffer=NULL; */ | ||
1059 | /* usb_kill_urb (ATEN2011_serial->interrupt_read_urb); */ | ||
1060 | } | ||
1061 | } | ||
1062 | if (ATEN2011_port->write_urb) { | ||
1063 | /* if this urb had a transfer buffer already (old tx) free it */ | ||
1064 | kfree(ATEN2011_port->write_urb->transfer_buffer); | ||
1065 | usb_free_urb(ATEN2011_port->write_urb); | ||
1066 | } | ||
1067 | |||
1068 | /* clear the MCR & IER */ | ||
1069 | Data = 0x00; | ||
1070 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1071 | Data = 0x00; | ||
1072 | set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
1073 | |||
1074 | ATEN2011_port->open = 0; | ||
1075 | dbg("%s", "Leaving ............"); | ||
1076 | |||
1077 | } | ||
1078 | |||
1079 | static void ATEN2011_block_until_chase_response(struct tty_struct *tty, | ||
1080 | struct ATENINTL_port | ||
1081 | *ATEN2011_port) | ||
1082 | { | ||
1083 | int timeout = 1 * HZ; | ||
1084 | int wait = 10; | ||
1085 | int count; | ||
1086 | |||
1087 | while (1) { | ||
1088 | count = ATEN2011_chars_in_buffer(tty); | ||
1089 | |||
1090 | /* Check for Buffer status */ | ||
1091 | if (count <= 0) { | ||
1092 | ATEN2011_port->chaseResponsePending = 0; | ||
1093 | return; | ||
1094 | } | ||
1095 | |||
1096 | /* Block the thread for a while */ | ||
1097 | interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase, | ||
1098 | timeout); | ||
1099 | /* No activity.. count down section */ | ||
1100 | wait--; | ||
1101 | if (wait == 0) { | ||
1102 | dbg("%s - TIMEOUT", __func__); | ||
1103 | return; | ||
1104 | } else { | ||
1105 | /* Reset timout value back to seconds */ | ||
1106 | wait = 10; | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | } | ||
1111 | |||
1112 | static void ATEN2011_break(struct tty_struct *tty, int break_state) | ||
1113 | { | ||
1114 | struct usb_serial_port *port = tty->driver_data; | ||
1115 | unsigned char data; | ||
1116 | struct usb_serial *serial; | ||
1117 | struct ATENINTL_serial *ATEN2011_serial; | ||
1118 | struct ATENINTL_port *ATEN2011_port; | ||
1119 | |||
1120 | dbg("%s", "Entering ..........."); | ||
1121 | dbg("ATEN2011_break: Start"); | ||
1122 | |||
1123 | serial = port->serial; | ||
1124 | |||
1125 | ATEN2011_serial = usb_get_serial_data(serial); | ||
1126 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1127 | |||
1128 | if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL)) | ||
1129 | return; | ||
1130 | |||
1131 | /* flush and chase */ | ||
1132 | ATEN2011_port->chaseResponsePending = 1; | ||
1133 | |||
1134 | if (serial->dev) { | ||
1135 | /* flush and block until tx is empty */ | ||
1136 | ATEN2011_block_until_chase_response(tty, ATEN2011_port); | ||
1137 | } | ||
1138 | |||
1139 | if (break_state == -1) | ||
1140 | data = ATEN2011_port->shadowLCR | LCR_SET_BREAK; | ||
1141 | else | ||
1142 | data = ATEN2011_port->shadowLCR & ~LCR_SET_BREAK; | ||
1143 | |||
1144 | ATEN2011_port->shadowLCR = data; | ||
1145 | dbg("ATEN2011_break ATEN2011_port->shadowLCR is %x", | ||
1146 | ATEN2011_port->shadowLCR); | ||
1147 | set_uart_reg(port, LINE_CONTROL_REGISTER, ATEN2011_port->shadowLCR); | ||
1148 | |||
1149 | return; | ||
1150 | } | ||
1151 | |||
1152 | static int ATEN2011_write_room(struct tty_struct *tty) | ||
1153 | { | ||
1154 | struct usb_serial_port *port = tty->driver_data; | ||
1155 | int i; | ||
1156 | int room = 0; | ||
1157 | struct ATENINTL_port *ATEN2011_port; | ||
1158 | |||
1159 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1160 | if (ATEN2011_port == NULL) { | ||
1161 | dbg("%s", "ATEN2011_break:leaving ..........."); | ||
1162 | return -1; | ||
1163 | } | ||
1164 | |||
1165 | for (i = 0; i < NUM_URBS; ++i) | ||
1166 | if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) | ||
1167 | room += URB_TRANSFER_BUFFER_SIZE; | ||
1168 | |||
1169 | dbg("%s - returns %d", __func__, room); | ||
1170 | return room; | ||
1171 | |||
1172 | } | ||
1173 | |||
1174 | static int ATEN2011_write(struct tty_struct *tty, struct usb_serial_port *port, | ||
1175 | const unsigned char *data, int count) | ||
1176 | { | ||
1177 | int status; | ||
1178 | int i; | ||
1179 | int bytes_sent = 0; | ||
1180 | int transfer_size; | ||
1181 | int minor; | ||
1182 | |||
1183 | struct ATENINTL_port *ATEN2011_port; | ||
1184 | struct usb_serial *serial; | ||
1185 | struct ATENINTL_serial *ATEN2011_serial; | ||
1186 | struct urb *urb; | ||
1187 | const unsigned char *current_position = data; | ||
1188 | unsigned char *data1; | ||
1189 | dbg("%s", "entering ..........."); | ||
1190 | |||
1191 | serial = port->serial; | ||
1192 | |||
1193 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1194 | if (ATEN2011_port == NULL) { | ||
1195 | dbg("%s", "ATEN2011_port is NULL"); | ||
1196 | return -1; | ||
1197 | } | ||
1198 | |||
1199 | ATEN2011_serial = usb_get_serial_data(serial); | ||
1200 | if (ATEN2011_serial == NULL) { | ||
1201 | dbg("%s", "ATEN2011_serial is NULL"); | ||
1202 | return -1; | ||
1203 | } | ||
1204 | |||
1205 | /* try to find a free urb in the list */ | ||
1206 | urb = NULL; | ||
1207 | |||
1208 | for (i = 0; i < NUM_URBS; ++i) { | ||
1209 | if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) { | ||
1210 | urb = ATEN2011_port->write_urb_pool[i]; | ||
1211 | dbg("URB:%d", i); | ||
1212 | break; | ||
1213 | } | ||
1214 | } | ||
1215 | |||
1216 | if (urb == NULL) { | ||
1217 | dbg("%s - no more free urbs", __func__); | ||
1218 | goto exit; | ||
1219 | } | ||
1220 | |||
1221 | if (urb->transfer_buffer == NULL) { | ||
1222 | urb->transfer_buffer = | ||
1223 | kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); | ||
1224 | |||
1225 | if (urb->transfer_buffer == NULL) { | ||
1226 | err("%s no more kernel memory...", __func__); | ||
1227 | goto exit; | ||
1228 | } | ||
1229 | } | ||
1230 | transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); | ||
1231 | |||
1232 | memcpy(urb->transfer_buffer, current_position, transfer_size); | ||
1233 | /* usb_serial_debug_data (__FILE__, __func__, transfer_size, urb->transfer_buffer); */ | ||
1234 | |||
1235 | /* fill urb with data and submit */ | ||
1236 | minor = port->serial->minor; | ||
1237 | if (minor == SERIAL_TTY_NO_MINOR) | ||
1238 | minor = 0; | ||
1239 | if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) | ||
1240 | && (((__u16) port->number - (__u16) (minor)) != 0)) { | ||
1241 | usb_fill_bulk_urb(urb, ATEN2011_serial->serial->dev, | ||
1242 | usb_sndbulkpipe(ATEN2011_serial->serial->dev, | ||
1243 | (port-> | ||
1244 | bulk_out_endpointAddress) + | ||
1245 | 2), urb->transfer_buffer, | ||
1246 | transfer_size, | ||
1247 | ATEN2011_bulk_out_data_callback, | ||
1248 | ATEN2011_port); | ||
1249 | } else | ||
1250 | |||
1251 | usb_fill_bulk_urb(urb, | ||
1252 | ATEN2011_serial->serial->dev, | ||
1253 | usb_sndbulkpipe(ATEN2011_serial->serial->dev, | ||
1254 | port-> | ||
1255 | bulk_out_endpointAddress), | ||
1256 | urb->transfer_buffer, transfer_size, | ||
1257 | ATEN2011_bulk_out_data_callback, | ||
1258 | ATEN2011_port); | ||
1259 | |||
1260 | data1 = urb->transfer_buffer; | ||
1261 | dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress); | ||
1262 | /* for(i=0;i < urb->actual_length;i++) */ | ||
1263 | /* dbg("Data is %c ",data1[i]); */ | ||
1264 | |||
1265 | /* send it down the pipe */ | ||
1266 | status = usb_submit_urb(urb, GFP_ATOMIC); | ||
1267 | |||
1268 | if (status) { | ||
1269 | err("%s - usb_submit_urb(write bulk) failed with status = %d", | ||
1270 | __func__, status); | ||
1271 | bytes_sent = status; | ||
1272 | goto exit; | ||
1273 | } | ||
1274 | bytes_sent = transfer_size; | ||
1275 | ATEN2011_port->icount.tx += transfer_size; | ||
1276 | dbg("ATEN2011_port->icount.tx is %d:", ATEN2011_port->icount.tx); | ||
1277 | |||
1278 | exit: | ||
1279 | return bytes_sent; | ||
1280 | } | ||
1281 | |||
1282 | static void ATEN2011_throttle(struct tty_struct *tty) | ||
1283 | { | ||
1284 | struct usb_serial_port *port = tty->driver_data; | ||
1285 | struct ATENINTL_port *ATEN2011_port; | ||
1286 | int status; | ||
1287 | |||
1288 | dbg("- port %d", port->number); | ||
1289 | |||
1290 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1291 | |||
1292 | if (ATEN2011_port == NULL) | ||
1293 | return; | ||
1294 | |||
1295 | if (!ATEN2011_port->open) { | ||
1296 | dbg("%s", "port not opened"); | ||
1297 | return; | ||
1298 | } | ||
1299 | |||
1300 | dbg("%s", "Entering .......... "); | ||
1301 | |||
1302 | if (!tty) { | ||
1303 | dbg("%s - no tty available", __func__); | ||
1304 | return; | ||
1305 | } | ||
1306 | |||
1307 | /* if we are implementing XON/XOFF, send the stop character */ | ||
1308 | if (I_IXOFF(tty)) { | ||
1309 | unsigned char stop_char = STOP_CHAR(tty); | ||
1310 | status = ATEN2011_write(tty, port, &stop_char, 1); | ||
1311 | if (status <= 0) | ||
1312 | return; | ||
1313 | } | ||
1314 | |||
1315 | /* if we are implementing RTS/CTS, toggle that line */ | ||
1316 | if (tty->termios->c_cflag & CRTSCTS) { | ||
1317 | ATEN2011_port->shadowMCR &= ~MCR_RTS; | ||
1318 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, | ||
1319 | ATEN2011_port->shadowMCR); | ||
1320 | if (status < 0) | ||
1321 | return; | ||
1322 | } | ||
1323 | |||
1324 | return; | ||
1325 | } | ||
1326 | |||
1327 | static void ATEN2011_unthrottle(struct tty_struct *tty) | ||
1328 | { | ||
1329 | struct usb_serial_port *port = tty->driver_data; | ||
1330 | int status; | ||
1331 | struct ATENINTL_port *ATEN2011_port = usb_get_serial_port_data(port); | ||
1332 | |||
1333 | if (ATEN2011_port == NULL) | ||
1334 | return; | ||
1335 | |||
1336 | if (!ATEN2011_port->open) { | ||
1337 | dbg("%s - port not opened", __func__); | ||
1338 | return; | ||
1339 | } | ||
1340 | |||
1341 | dbg("%s", "Entering .......... "); | ||
1342 | |||
1343 | if (!tty) { | ||
1344 | dbg("%s - no tty available", __func__); | ||
1345 | return; | ||
1346 | } | ||
1347 | |||
1348 | /* if we are implementing XON/XOFF, send the start character */ | ||
1349 | if (I_IXOFF(tty)) { | ||
1350 | unsigned char start_char = START_CHAR(tty); | ||
1351 | status = ATEN2011_write(tty, port, &start_char, 1); | ||
1352 | if (status <= 0) | ||
1353 | return; | ||
1354 | } | ||
1355 | |||
1356 | /* if we are implementing RTS/CTS, toggle that line */ | ||
1357 | if (tty->termios->c_cflag & CRTSCTS) { | ||
1358 | ATEN2011_port->shadowMCR |= MCR_RTS; | ||
1359 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, | ||
1360 | ATEN2011_port->shadowMCR); | ||
1361 | if (status < 0) | ||
1362 | return; | ||
1363 | } | ||
1364 | |||
1365 | return; | ||
1366 | } | ||
1367 | |||
1368 | static int ATEN2011_tiocmget(struct tty_struct *tty, struct file *file) | ||
1369 | { | ||
1370 | struct usb_serial_port *port = tty->driver_data; | ||
1371 | struct ATENINTL_port *ATEN2011_port; | ||
1372 | unsigned int result; | ||
1373 | __u16 msr; | ||
1374 | __u16 mcr; | ||
1375 | /* unsigned int mcr; */ | ||
1376 | int status = 0; | ||
1377 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1378 | |||
1379 | dbg("%s - port %d", __func__, port->number); | ||
1380 | |||
1381 | if (ATEN2011_port == NULL) | ||
1382 | return -ENODEV; | ||
1383 | |||
1384 | status = get_uart_reg(port, MODEM_STATUS_REGISTER, &msr); | ||
1385 | status = get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr); | ||
1386 | /* mcr = ATEN2011_port->shadowMCR; */ | ||
1387 | /* COMMENT2: the Fallowing three line are commented for updating only MSR values */ | ||
1388 | result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) | ||
1389 | | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) | ||
1390 | | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0) | ||
1391 | | ((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) | ||
1392 | | ((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) | ||
1393 | | ((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) | ||
1394 | | ((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); | ||
1395 | |||
1396 | dbg("%s - 0x%04X", __func__, result); | ||
1397 | |||
1398 | return result; | ||
1399 | } | ||
1400 | |||
1401 | static int ATEN2011_tiocmset(struct tty_struct *tty, struct file *file, | ||
1402 | unsigned int set, unsigned int clear) | ||
1403 | { | ||
1404 | struct usb_serial_port *port = tty->driver_data; | ||
1405 | struct ATENINTL_port *ATEN2011_port; | ||
1406 | unsigned int mcr; | ||
1407 | unsigned int status; | ||
1408 | |||
1409 | dbg("%s - port %d", __func__, port->number); | ||
1410 | |||
1411 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1412 | |||
1413 | if (ATEN2011_port == NULL) | ||
1414 | return -ENODEV; | ||
1415 | |||
1416 | mcr = ATEN2011_port->shadowMCR; | ||
1417 | if (clear & TIOCM_RTS) | ||
1418 | mcr &= ~MCR_RTS; | ||
1419 | if (clear & TIOCM_DTR) | ||
1420 | mcr &= ~MCR_DTR; | ||
1421 | if (clear & TIOCM_LOOP) | ||
1422 | mcr &= ~MCR_LOOPBACK; | ||
1423 | |||
1424 | if (set & TIOCM_RTS) | ||
1425 | mcr |= MCR_RTS; | ||
1426 | if (set & TIOCM_DTR) | ||
1427 | mcr |= MCR_DTR; | ||
1428 | if (set & TIOCM_LOOP) | ||
1429 | mcr |= MCR_LOOPBACK; | ||
1430 | |||
1431 | ATEN2011_port->shadowMCR = mcr; | ||
1432 | |||
1433 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); | ||
1434 | if (status < 0) { | ||
1435 | dbg("setting MODEM_CONTROL_REGISTER Failed"); | ||
1436 | return -1; | ||
1437 | } | ||
1438 | |||
1439 | return 0; | ||
1440 | } | ||
1441 | |||
1442 | static void ATEN2011_set_termios(struct tty_struct *tty, | ||
1443 | struct usb_serial_port *port, | ||
1444 | struct ktermios *old_termios) | ||
1445 | { | ||
1446 | int status; | ||
1447 | unsigned int cflag; | ||
1448 | struct usb_serial *serial; | ||
1449 | struct ATENINTL_port *ATEN2011_port; | ||
1450 | |||
1451 | dbg("ATEN2011_set_termios: START"); | ||
1452 | |||
1453 | serial = port->serial; | ||
1454 | |||
1455 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1456 | |||
1457 | if (ATEN2011_port == NULL) | ||
1458 | return; | ||
1459 | |||
1460 | if (!ATEN2011_port->open) { | ||
1461 | dbg("%s - port not opened", __func__); | ||
1462 | return; | ||
1463 | } | ||
1464 | |||
1465 | dbg("%s", "setting termios - "); | ||
1466 | |||
1467 | cflag = tty->termios->c_cflag; | ||
1468 | |||
1469 | dbg("%s - cflag %08x iflag %08x", __func__, | ||
1470 | tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); | ||
1471 | |||
1472 | if (old_termios) { | ||
1473 | dbg("%s - old clfag %08x old iflag %08x", __func__, | ||
1474 | old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag)); | ||
1475 | } | ||
1476 | |||
1477 | dbg("%s - port %d", __func__, port->number); | ||
1478 | |||
1479 | /* change the port settings to the new ones specified */ | ||
1480 | |||
1481 | ATEN2011_change_port_settings(tty, ATEN2011_port, old_termios); | ||
1482 | |||
1483 | if (!ATEN2011_port->read_urb) { | ||
1484 | dbg("%s", "URB KILLED !!!!!"); | ||
1485 | return; | ||
1486 | } | ||
1487 | |||
1488 | if (ATEN2011_port->read_urb->status != -EINPROGRESS) { | ||
1489 | ATEN2011_port->read_urb->dev = serial->dev; | ||
1490 | status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); | ||
1491 | if (status) { | ||
1492 | dbg | ||
1493 | (" usb_submit_urb(read bulk) failed, status = %d", | ||
1494 | status); | ||
1495 | } | ||
1496 | } | ||
1497 | return; | ||
1498 | } | ||
1499 | |||
1500 | static int get_lsr_info(struct tty_struct *tty, | ||
1501 | struct ATENINTL_port *ATEN2011_port, | ||
1502 | unsigned int __user *value) | ||
1503 | { | ||
1504 | int count; | ||
1505 | unsigned int result = 0; | ||
1506 | |||
1507 | count = ATEN2011_chars_in_buffer(tty); | ||
1508 | if (count == 0) { | ||
1509 | dbg("%s -- Empty", __func__); | ||
1510 | result = TIOCSER_TEMT; | ||
1511 | } | ||
1512 | |||
1513 | if (copy_to_user(value, &result, sizeof(int))) | ||
1514 | return -EFAULT; | ||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | static int get_number_bytes_avail(struct tty_struct *tty, | ||
1519 | struct ATENINTL_port *ATEN2011_port, | ||
1520 | unsigned int __user *value) | ||
1521 | { | ||
1522 | unsigned int result = 0; | ||
1523 | |||
1524 | if (!tty) | ||
1525 | return -ENOIOCTLCMD; | ||
1526 | |||
1527 | result = tty->read_cnt; | ||
1528 | |||
1529 | dbg("%s(%d) = %d", __func__, ATEN2011_port->port->number, result); | ||
1530 | if (copy_to_user(value, &result, sizeof(int))) | ||
1531 | return -EFAULT; | ||
1532 | |||
1533 | return -ENOIOCTLCMD; | ||
1534 | } | ||
1535 | |||
1536 | static int set_modem_info(struct ATENINTL_port *ATEN2011_port, unsigned int cmd, | ||
1537 | unsigned int __user *value) | ||
1538 | { | ||
1539 | unsigned int mcr; | ||
1540 | unsigned int arg; | ||
1541 | __u16 Data; | ||
1542 | int status; | ||
1543 | struct usb_serial_port *port; | ||
1544 | |||
1545 | if (ATEN2011_port == NULL) | ||
1546 | return -1; | ||
1547 | |||
1548 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
1549 | |||
1550 | mcr = ATEN2011_port->shadowMCR; | ||
1551 | |||
1552 | if (copy_from_user(&arg, value, sizeof(int))) | ||
1553 | return -EFAULT; | ||
1554 | |||
1555 | switch (cmd) { | ||
1556 | case TIOCMBIS: | ||
1557 | if (arg & TIOCM_RTS) | ||
1558 | mcr |= MCR_RTS; | ||
1559 | if (arg & TIOCM_DTR) | ||
1560 | mcr |= MCR_RTS; | ||
1561 | if (arg & TIOCM_LOOP) | ||
1562 | mcr |= MCR_LOOPBACK; | ||
1563 | break; | ||
1564 | |||
1565 | case TIOCMBIC: | ||
1566 | if (arg & TIOCM_RTS) | ||
1567 | mcr &= ~MCR_RTS; | ||
1568 | if (arg & TIOCM_DTR) | ||
1569 | mcr &= ~MCR_RTS; | ||
1570 | if (arg & TIOCM_LOOP) | ||
1571 | mcr &= ~MCR_LOOPBACK; | ||
1572 | break; | ||
1573 | |||
1574 | case TIOCMSET: | ||
1575 | /* turn off the RTS and DTR and LOOPBACK | ||
1576 | * and then only turn on what was asked to */ | ||
1577 | mcr &= ~(MCR_RTS | MCR_DTR | MCR_LOOPBACK); | ||
1578 | mcr |= ((arg & TIOCM_RTS) ? MCR_RTS : 0); | ||
1579 | mcr |= ((arg & TIOCM_DTR) ? MCR_DTR : 0); | ||
1580 | mcr |= ((arg & TIOCM_LOOP) ? MCR_LOOPBACK : 0); | ||
1581 | break; | ||
1582 | } | ||
1583 | |||
1584 | ATEN2011_port->shadowMCR = mcr; | ||
1585 | |||
1586 | Data = ATEN2011_port->shadowMCR; | ||
1587 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1588 | if (status < 0) { | ||
1589 | dbg("setting MODEM_CONTROL_REGISTER Failed"); | ||
1590 | return -1; | ||
1591 | } | ||
1592 | |||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | static int get_modem_info(struct ATENINTL_port *ATEN2011_port, | ||
1597 | unsigned int __user *value) | ||
1598 | { | ||
1599 | unsigned int result = 0; | ||
1600 | __u16 msr; | ||
1601 | unsigned int mcr = ATEN2011_port->shadowMCR; | ||
1602 | int status; | ||
1603 | |||
1604 | status = get_uart_reg(ATEN2011_port->port, MODEM_STATUS_REGISTER, &msr); | ||
1605 | result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) /* 0x002 */ | ||
1606 | |((mcr & MCR_RTS) ? TIOCM_RTS : 0) /* 0x004 */ | ||
1607 | |((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) /* 0x020 */ | ||
1608 | |((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) /* 0x040 */ | ||
1609 | |((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) /* 0x080 */ | ||
1610 | |((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); /* 0x100 */ | ||
1611 | |||
1612 | dbg("%s -- %x", __func__, result); | ||
1613 | |||
1614 | if (copy_to_user(value, &result, sizeof(int))) | ||
1615 | return -EFAULT; | ||
1616 | return 0; | ||
1617 | } | ||
1618 | |||
1619 | static int get_serial_info(struct ATENINTL_port *ATEN2011_port, | ||
1620 | struct serial_struct __user *retinfo) | ||
1621 | { | ||
1622 | struct serial_struct tmp; | ||
1623 | |||
1624 | if (ATEN2011_port == NULL) | ||
1625 | return -1; | ||
1626 | |||
1627 | if (!retinfo) | ||
1628 | return -EFAULT; | ||
1629 | |||
1630 | memset(&tmp, 0, sizeof(tmp)); | ||
1631 | |||
1632 | tmp.type = PORT_16550A; | ||
1633 | tmp.line = ATEN2011_port->port->serial->minor; | ||
1634 | if (tmp.line == SERIAL_TTY_NO_MINOR) | ||
1635 | tmp.line = 0; | ||
1636 | tmp.port = ATEN2011_port->port->number; | ||
1637 | tmp.irq = 0; | ||
1638 | tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; | ||
1639 | tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE; | ||
1640 | tmp.baud_base = 9600; | ||
1641 | tmp.close_delay = 5 * HZ; | ||
1642 | tmp.closing_wait = 30 * HZ; | ||
1643 | |||
1644 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) | ||
1645 | return -EFAULT; | ||
1646 | return 0; | ||
1647 | } | ||
1648 | |||
1649 | static int ATEN2011_ioctl(struct tty_struct *tty, struct file *file, | ||
1650 | unsigned int cmd, unsigned long arg) | ||
1651 | { | ||
1652 | struct usb_serial_port *port = tty->driver_data; | ||
1653 | struct ATENINTL_port *ATEN2011_port; | ||
1654 | struct async_icount cnow; | ||
1655 | struct async_icount cprev; | ||
1656 | struct serial_icounter_struct icount; | ||
1657 | int ATENret = 0; | ||
1658 | unsigned int __user *user_arg = (unsigned int __user *)arg; | ||
1659 | |||
1660 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1661 | |||
1662 | if (ATEN2011_port == NULL) | ||
1663 | return -1; | ||
1664 | |||
1665 | dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); | ||
1666 | |||
1667 | switch (cmd) { | ||
1668 | /* return number of bytes available */ | ||
1669 | |||
1670 | case TIOCINQ: | ||
1671 | dbg("%s (%d) TIOCINQ", __func__, port->number); | ||
1672 | return get_number_bytes_avail(tty, ATEN2011_port, user_arg); | ||
1673 | break; | ||
1674 | |||
1675 | case TIOCOUTQ: | ||
1676 | dbg("%s (%d) TIOCOUTQ", __func__, port->number); | ||
1677 | return put_user(ATEN2011_chars_in_buffer(tty), user_arg); | ||
1678 | break; | ||
1679 | |||
1680 | case TIOCSERGETLSR: | ||
1681 | dbg("%s (%d) TIOCSERGETLSR", __func__, port->number); | ||
1682 | return get_lsr_info(tty, ATEN2011_port, user_arg); | ||
1683 | return 0; | ||
1684 | |||
1685 | case TIOCMBIS: | ||
1686 | case TIOCMBIC: | ||
1687 | case TIOCMSET: | ||
1688 | dbg("%s (%d) TIOCMSET/TIOCMBIC/TIOCMSET", __func__, | ||
1689 | port->number); | ||
1690 | ATENret = set_modem_info(ATEN2011_port, cmd, user_arg); | ||
1691 | return ATENret; | ||
1692 | |||
1693 | case TIOCMGET: | ||
1694 | dbg("%s (%d) TIOCMGET", __func__, port->number); | ||
1695 | return get_modem_info(ATEN2011_port, user_arg); | ||
1696 | |||
1697 | case TIOCGSERIAL: | ||
1698 | dbg("%s (%d) TIOCGSERIAL", __func__, port->number); | ||
1699 | return get_serial_info(ATEN2011_port, | ||
1700 | (struct serial_struct __user *)arg); | ||
1701 | |||
1702 | case TIOCSSERIAL: | ||
1703 | dbg("%s (%d) TIOCSSERIAL", __func__, port->number); | ||
1704 | break; | ||
1705 | |||
1706 | case TIOCMIWAIT: | ||
1707 | dbg("%s (%d) TIOCMIWAIT", __func__, port->number); | ||
1708 | cprev = ATEN2011_port->icount; | ||
1709 | while (1) { | ||
1710 | /* see if a signal did it */ | ||
1711 | if (signal_pending(current)) | ||
1712 | return -ERESTARTSYS; | ||
1713 | cnow = ATEN2011_port->icount; | ||
1714 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | ||
1715 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | ||
1716 | return -EIO; /* no change => error */ | ||
1717 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || | ||
1718 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || | ||
1719 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || | ||
1720 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { | ||
1721 | return 0; | ||
1722 | } | ||
1723 | cprev = cnow; | ||
1724 | } | ||
1725 | /* NOTREACHED */ | ||
1726 | break; | ||
1727 | |||
1728 | case TIOCGICOUNT: | ||
1729 | cnow = ATEN2011_port->icount; | ||
1730 | icount.cts = cnow.cts; | ||
1731 | icount.dsr = cnow.dsr; | ||
1732 | icount.rng = cnow.rng; | ||
1733 | icount.dcd = cnow.dcd; | ||
1734 | icount.rx = cnow.rx; | ||
1735 | icount.tx = cnow.tx; | ||
1736 | icount.frame = cnow.frame; | ||
1737 | icount.overrun = cnow.overrun; | ||
1738 | icount.parity = cnow.parity; | ||
1739 | icount.brk = cnow.brk; | ||
1740 | icount.buf_overrun = cnow.buf_overrun; | ||
1741 | |||
1742 | dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__, | ||
1743 | port->number, icount.rx, icount.tx); | ||
1744 | if (copy_to_user((void __user *)arg, &icount, sizeof(icount))) | ||
1745 | return -EFAULT; | ||
1746 | return 0; | ||
1747 | |||
1748 | default: | ||
1749 | break; | ||
1750 | } | ||
1751 | |||
1752 | return -ENOIOCTLCMD; | ||
1753 | } | ||
1754 | |||
1755 | static int ATEN2011_calc_baud_rate_divisor(int baudRate, int *divisor, | ||
1756 | __u16 *clk_sel_val) | ||
1757 | { | ||
1758 | dbg("%s - %d", __func__, baudRate); | ||
1759 | |||
1760 | if (baudRate <= 115200) { | ||
1761 | *divisor = 115200 / baudRate; | ||
1762 | *clk_sel_val = 0x0; | ||
1763 | } | ||
1764 | if ((baudRate > 115200) && (baudRate <= 230400)) { | ||
1765 | *divisor = 230400 / baudRate; | ||
1766 | *clk_sel_val = 0x10; | ||
1767 | } else if ((baudRate > 230400) && (baudRate <= 403200)) { | ||
1768 | *divisor = 403200 / baudRate; | ||
1769 | *clk_sel_val = 0x20; | ||
1770 | } else if ((baudRate > 403200) && (baudRate <= 460800)) { | ||
1771 | *divisor = 460800 / baudRate; | ||
1772 | *clk_sel_val = 0x30; | ||
1773 | } else if ((baudRate > 460800) && (baudRate <= 806400)) { | ||
1774 | *divisor = 806400 / baudRate; | ||
1775 | *clk_sel_val = 0x40; | ||
1776 | } else if ((baudRate > 806400) && (baudRate <= 921600)) { | ||
1777 | *divisor = 921600 / baudRate; | ||
1778 | *clk_sel_val = 0x50; | ||
1779 | } else if ((baudRate > 921600) && (baudRate <= 1572864)) { | ||
1780 | *divisor = 1572864 / baudRate; | ||
1781 | *clk_sel_val = 0x60; | ||
1782 | } else if ((baudRate > 1572864) && (baudRate <= 3145728)) { | ||
1783 | *divisor = 3145728 / baudRate; | ||
1784 | *clk_sel_val = 0x70; | ||
1785 | } | ||
1786 | return 0; | ||
1787 | } | ||
1788 | |||
1789 | static int ATEN2011_send_cmd_write_baud_rate(struct ATENINTL_port | ||
1790 | *ATEN2011_port, int baudRate) | ||
1791 | { | ||
1792 | int divisor = 0; | ||
1793 | int status; | ||
1794 | __u16 Data; | ||
1795 | unsigned char number; | ||
1796 | __u16 clk_sel_val; | ||
1797 | struct usb_serial_port *port; | ||
1798 | int minor; | ||
1799 | |||
1800 | if (ATEN2011_port == NULL) | ||
1801 | return -1; | ||
1802 | |||
1803 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
1804 | |||
1805 | dbg("%s", "Entering .......... "); | ||
1806 | |||
1807 | minor = ATEN2011_port->port->serial->minor; | ||
1808 | if (minor == SERIAL_TTY_NO_MINOR) | ||
1809 | minor = 0; | ||
1810 | number = ATEN2011_port->port->number - minor; | ||
1811 | |||
1812 | dbg("%s - port = %d, baud = %d", __func__, | ||
1813 | ATEN2011_port->port->number, baudRate); | ||
1814 | /* reset clk_uart_sel in spregOffset */ | ||
1815 | if (baudRate > 115200) { | ||
1816 | #ifdef HW_flow_control | ||
1817 | /* | ||
1818 | * NOTE: need to see the pther register to modify | ||
1819 | * setting h/w flow control bit to 1; | ||
1820 | */ | ||
1821 | /* Data = ATEN2011_port->shadowMCR; */ | ||
1822 | Data = 0x2b; | ||
1823 | ATEN2011_port->shadowMCR = Data; | ||
1824 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1825 | if (status < 0) { | ||
1826 | dbg("Writing spreg failed in set_serial_baud"); | ||
1827 | return -1; | ||
1828 | } | ||
1829 | #endif | ||
1830 | |||
1831 | } else { | ||
1832 | #ifdef HW_flow_control | ||
1833 | /* setting h/w flow control bit to 0; */ | ||
1834 | /* Data = ATEN2011_port->shadowMCR; */ | ||
1835 | Data = 0xb; | ||
1836 | ATEN2011_port->shadowMCR = Data; | ||
1837 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1838 | if (status < 0) { | ||
1839 | dbg("Writing spreg failed in set_serial_baud"); | ||
1840 | return -1; | ||
1841 | } | ||
1842 | #endif | ||
1843 | |||
1844 | } | ||
1845 | |||
1846 | if (1) /* baudRate <= 115200) */ { | ||
1847 | clk_sel_val = 0x0; | ||
1848 | Data = 0x0; | ||
1849 | status = | ||
1850 | ATEN2011_calc_baud_rate_divisor(baudRate, &divisor, | ||
1851 | &clk_sel_val); | ||
1852 | status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); | ||
1853 | if (status < 0) { | ||
1854 | dbg("reading spreg failed in set_serial_baud"); | ||
1855 | return -1; | ||
1856 | } | ||
1857 | Data = (Data & 0x8f) | clk_sel_val; | ||
1858 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
1859 | if (status < 0) { | ||
1860 | dbg("Writing spreg failed in set_serial_baud"); | ||
1861 | return -1; | ||
1862 | } | ||
1863 | /* Calculate the Divisor */ | ||
1864 | |||
1865 | if (status) { | ||
1866 | err("%s - bad baud rate", __func__); | ||
1867 | dbg("%s", "bad baud rate"); | ||
1868 | return status; | ||
1869 | } | ||
1870 | /* Enable access to divisor latch */ | ||
1871 | Data = ATEN2011_port->shadowLCR | SERIAL_LCR_DLAB; | ||
1872 | ATEN2011_port->shadowLCR = Data; | ||
1873 | set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
1874 | |||
1875 | /* Write the divisor */ | ||
1876 | Data = (unsigned char)(divisor & 0xff); | ||
1877 | dbg("set_serial_baud Value to write DLL is %x", Data); | ||
1878 | set_uart_reg(port, DIVISOR_LATCH_LSB, Data); | ||
1879 | |||
1880 | Data = (unsigned char)((divisor & 0xff00) >> 8); | ||
1881 | dbg("set_serial_baud Value to write DLM is %x", Data); | ||
1882 | set_uart_reg(port, DIVISOR_LATCH_MSB, Data); | ||
1883 | |||
1884 | /* Disable access to divisor latch */ | ||
1885 | Data = ATEN2011_port->shadowLCR & ~SERIAL_LCR_DLAB; | ||
1886 | ATEN2011_port->shadowLCR = Data; | ||
1887 | set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
1888 | |||
1889 | } | ||
1890 | |||
1891 | return status; | ||
1892 | } | ||
1893 | |||
1894 | static void ATEN2011_change_port_settings(struct tty_struct *tty, | ||
1895 | struct ATENINTL_port *ATEN2011_port, | ||
1896 | struct ktermios *old_termios) | ||
1897 | { | ||
1898 | int baud; | ||
1899 | unsigned cflag; | ||
1900 | unsigned iflag; | ||
1901 | __u8 lData; | ||
1902 | __u8 lParity; | ||
1903 | __u8 lStop; | ||
1904 | int status; | ||
1905 | __u16 Data; | ||
1906 | struct usb_serial_port *port; | ||
1907 | struct usb_serial *serial; | ||
1908 | |||
1909 | if (ATEN2011_port == NULL) | ||
1910 | return; | ||
1911 | |||
1912 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
1913 | |||
1914 | serial = port->serial; | ||
1915 | |||
1916 | dbg("%s - port %d", __func__, ATEN2011_port->port->number); | ||
1917 | |||
1918 | if (!ATEN2011_port->open) { | ||
1919 | dbg("%s - port not opened", __func__); | ||
1920 | return; | ||
1921 | } | ||
1922 | |||
1923 | if ((!tty) || (!tty->termios)) { | ||
1924 | dbg("%s - no tty structures", __func__); | ||
1925 | return; | ||
1926 | } | ||
1927 | |||
1928 | dbg("%s", "Entering .......... "); | ||
1929 | |||
1930 | lData = LCR_BITS_8; | ||
1931 | lStop = LCR_STOP_1; | ||
1932 | lParity = LCR_PAR_NONE; | ||
1933 | |||
1934 | cflag = tty->termios->c_cflag; | ||
1935 | iflag = tty->termios->c_iflag; | ||
1936 | |||
1937 | /* Change the number of bits */ | ||
1938 | |||
1939 | /* COMMENT1: the below Line"if(cflag & CSIZE)" is added for the errors we get for serial loop data test i.e serial_loopback.pl -v */ | ||
1940 | /* if(cflag & CSIZE) */ | ||
1941 | { | ||
1942 | switch (cflag & CSIZE) { | ||
1943 | case CS5: | ||
1944 | lData = LCR_BITS_5; | ||
1945 | break; | ||
1946 | |||
1947 | case CS6: | ||
1948 | lData = LCR_BITS_6; | ||
1949 | break; | ||
1950 | |||
1951 | case CS7: | ||
1952 | lData = LCR_BITS_7; | ||
1953 | break; | ||
1954 | default: | ||
1955 | case CS8: | ||
1956 | lData = LCR_BITS_8; | ||
1957 | break; | ||
1958 | } | ||
1959 | } | ||
1960 | /* Change the Parity bit */ | ||
1961 | if (cflag & PARENB) { | ||
1962 | if (cflag & PARODD) { | ||
1963 | lParity = LCR_PAR_ODD; | ||
1964 | dbg("%s - parity = odd", __func__); | ||
1965 | } else { | ||
1966 | lParity = LCR_PAR_EVEN; | ||
1967 | dbg("%s - parity = even", __func__); | ||
1968 | } | ||
1969 | |||
1970 | } else { | ||
1971 | dbg("%s - parity = none", __func__); | ||
1972 | } | ||
1973 | |||
1974 | if (cflag & CMSPAR) | ||
1975 | lParity = lParity | 0x20; | ||
1976 | |||
1977 | /* Change the Stop bit */ | ||
1978 | if (cflag & CSTOPB) { | ||
1979 | lStop = LCR_STOP_2; | ||
1980 | dbg("%s - stop bits = 2", __func__); | ||
1981 | } else { | ||
1982 | lStop = LCR_STOP_1; | ||
1983 | dbg("%s - stop bits = 1", __func__); | ||
1984 | } | ||
1985 | |||
1986 | /* Update the LCR with the correct value */ | ||
1987 | ATEN2011_port->shadowLCR &= | ||
1988 | ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); | ||
1989 | ATEN2011_port->shadowLCR |= (lData | lParity | lStop); | ||
1990 | |||
1991 | dbg | ||
1992 | ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is %x", | ||
1993 | ATEN2011_port->shadowLCR); | ||
1994 | /* Disable Interrupts */ | ||
1995 | Data = 0x00; | ||
1996 | set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
1997 | |||
1998 | Data = 0x00; | ||
1999 | set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
2000 | |||
2001 | Data = 0xcf; | ||
2002 | set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
2003 | |||
2004 | /* Send the updated LCR value to the ATEN2011 */ | ||
2005 | Data = ATEN2011_port->shadowLCR; | ||
2006 | |||
2007 | set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
2008 | |||
2009 | Data = 0x00b; | ||
2010 | ATEN2011_port->shadowMCR = Data; | ||
2011 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
2012 | Data = 0x00b; | ||
2013 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
2014 | |||
2015 | /* set up the MCR register and send it to the ATEN2011 */ | ||
2016 | |||
2017 | ATEN2011_port->shadowMCR = MCR_MASTER_IE; | ||
2018 | if (cflag & CBAUD) | ||
2019 | ATEN2011_port->shadowMCR |= (MCR_DTR | MCR_RTS); | ||
2020 | |||
2021 | if (cflag & CRTSCTS) | ||
2022 | ATEN2011_port->shadowMCR |= (MCR_XON_ANY); | ||
2023 | else | ||
2024 | ATEN2011_port->shadowMCR &= ~(MCR_XON_ANY); | ||
2025 | |||
2026 | Data = ATEN2011_port->shadowMCR; | ||
2027 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
2028 | |||
2029 | /* Determine divisor based on baud rate */ | ||
2030 | baud = tty_get_baud_rate(tty); | ||
2031 | |||
2032 | if (!baud) { | ||
2033 | /* pick a default, any default... */ | ||
2034 | dbg("%s", "Picked default baud..."); | ||
2035 | baud = 9600; | ||
2036 | } | ||
2037 | |||
2038 | dbg("%s - baud rate = %d", __func__, baud); | ||
2039 | status = ATEN2011_send_cmd_write_baud_rate(ATEN2011_port, baud); | ||
2040 | |||
2041 | /* Enable Interrupts */ | ||
2042 | Data = 0x0c; | ||
2043 | set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
2044 | |||
2045 | if (ATEN2011_port->read_urb->status != -EINPROGRESS) { | ||
2046 | ATEN2011_port->read_urb->dev = serial->dev; | ||
2047 | |||
2048 | status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); | ||
2049 | |||
2050 | if (status) { | ||
2051 | dbg | ||
2052 | (" usb_submit_urb(read bulk) failed, status = %d", | ||
2053 | status); | ||
2054 | } | ||
2055 | } | ||
2056 | dbg | ||
2057 | ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is End %x", | ||
2058 | ATEN2011_port->shadowLCR); | ||
2059 | |||
2060 | return; | ||
2061 | } | ||
2062 | |||
2063 | static int ATEN2011_calc_num_ports(struct usb_serial *serial) | ||
2064 | { | ||
2065 | |||
2066 | __u16 Data = 0x00; | ||
2067 | int ret = 0; | ||
2068 | int ATEN2011_2or4ports; | ||
2069 | ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), | ||
2070 | ATEN_RDREQ, ATEN_RD_RTYPE, 0, GPIO_REGISTER, | ||
2071 | &Data, VENDOR_READ_LENGTH, ATEN_WDR_TIMEOUT); | ||
2072 | |||
2073 | /* ghostgum: here is where the problem appears to bet */ | ||
2074 | /* Which of the following are needed? */ | ||
2075 | /* Greg used the serial->type->num_ports=2 */ | ||
2076 | /* But the code in the ATEN2011_open relies on serial->num_ports=2 */ | ||
2077 | if ((Data & 0x01) == 0) { | ||
2078 | ATEN2011_2or4ports = 2; | ||
2079 | serial->type->num_ports = 2; | ||
2080 | serial->num_ports = 2; | ||
2081 | } | ||
2082 | /* else if(serial->interface->cur_altsetting->desc.bNumEndpoints == 9) */ | ||
2083 | else { | ||
2084 | ATEN2011_2or4ports = 4; | ||
2085 | serial->type->num_ports = 4; | ||
2086 | serial->num_ports = 4; | ||
2087 | |||
2088 | } | ||
2089 | |||
2090 | return ATEN2011_2or4ports; | ||
2091 | } | ||
2092 | |||
2093 | static int ATEN2011_startup(struct usb_serial *serial) | ||
2094 | { | ||
2095 | struct ATENINTL_serial *ATEN2011_serial; | ||
2096 | struct ATENINTL_port *ATEN2011_port; | ||
2097 | struct usb_device *dev; | ||
2098 | int i, status; | ||
2099 | int minor; | ||
2100 | |||
2101 | __u16 Data; | ||
2102 | dbg("%s", " ATEN2011_startup :entering.........."); | ||
2103 | |||
2104 | if (!serial) { | ||
2105 | dbg("%s", "Invalid Handler"); | ||
2106 | return -1; | ||
2107 | } | ||
2108 | |||
2109 | dev = serial->dev; | ||
2110 | |||
2111 | dbg("%s", "Entering..."); | ||
2112 | |||
2113 | /* create our private serial structure */ | ||
2114 | ATEN2011_serial = kzalloc(sizeof(struct ATENINTL_serial), GFP_KERNEL); | ||
2115 | if (ATEN2011_serial == NULL) { | ||
2116 | err("%s - Out of memory", __func__); | ||
2117 | return -ENOMEM; | ||
2118 | } | ||
2119 | |||
2120 | /* resetting the private structure field values to zero */ | ||
2121 | memset(ATEN2011_serial, 0, sizeof(struct ATENINTL_serial)); | ||
2122 | |||
2123 | ATEN2011_serial->serial = serial; | ||
2124 | /* initilize status polling flag to 0 */ | ||
2125 | ATEN2011_serial->status_polling_started = 0; | ||
2126 | |||
2127 | usb_set_serial_data(serial, ATEN2011_serial); | ||
2128 | ATEN2011_serial->ATEN2011_spectrum_2or4ports = | ||
2129 | ATEN2011_calc_num_ports(serial); | ||
2130 | /* we set up the pointers to the endpoints in the ATEN2011_open * | ||
2131 | * function, as the structures aren't created yet. */ | ||
2132 | |||
2133 | /* set up port private structures */ | ||
2134 | for (i = 0; i < serial->num_ports; ++i) { | ||
2135 | ATEN2011_port = | ||
2136 | kmalloc(sizeof(struct ATENINTL_port), GFP_KERNEL); | ||
2137 | if (ATEN2011_port == NULL) { | ||
2138 | err("%s - Out of memory", __func__); | ||
2139 | usb_set_serial_data(serial, NULL); | ||
2140 | kfree(ATEN2011_serial); | ||
2141 | return -ENOMEM; | ||
2142 | } | ||
2143 | memset(ATEN2011_port, 0, sizeof(struct ATENINTL_port)); | ||
2144 | |||
2145 | /* | ||
2146 | * Initialize all port interrupt end point to port 0 | ||
2147 | * int endpoint. Our device has only one interrupt end point | ||
2148 | * comman to all port | ||
2149 | */ | ||
2150 | /* serial->port[i]->interrupt_in_endpointAddress = serial->port[0]->interrupt_in_endpointAddress; */ | ||
2151 | |||
2152 | ATEN2011_port->port = serial->port[i]; | ||
2153 | usb_set_serial_port_data(serial->port[i], ATEN2011_port); | ||
2154 | |||
2155 | minor = serial->port[i]->serial->minor; | ||
2156 | if (minor == SERIAL_TTY_NO_MINOR) | ||
2157 | minor = 0; | ||
2158 | ATEN2011_port->port_num = | ||
2159 | ((serial->port[i]->number - minor) + 1); | ||
2160 | |||
2161 | if (ATEN2011_port->port_num == 1) { | ||
2162 | ATEN2011_port->SpRegOffset = 0x0; | ||
2163 | ATEN2011_port->ControlRegOffset = 0x1; | ||
2164 | ATEN2011_port->DcrRegOffset = 0x4; | ||
2165 | } else if ((ATEN2011_port->port_num == 2) | ||
2166 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2167 | 4)) { | ||
2168 | ATEN2011_port->SpRegOffset = 0x8; | ||
2169 | ATEN2011_port->ControlRegOffset = 0x9; | ||
2170 | ATEN2011_port->DcrRegOffset = 0x16; | ||
2171 | } else if ((ATEN2011_port->port_num == 2) | ||
2172 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2173 | 2)) { | ||
2174 | ATEN2011_port->SpRegOffset = 0xa; | ||
2175 | ATEN2011_port->ControlRegOffset = 0xb; | ||
2176 | ATEN2011_port->DcrRegOffset = 0x19; | ||
2177 | } else if ((ATEN2011_port->port_num == 3) | ||
2178 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2179 | 4)) { | ||
2180 | ATEN2011_port->SpRegOffset = 0xa; | ||
2181 | ATEN2011_port->ControlRegOffset = 0xb; | ||
2182 | ATEN2011_port->DcrRegOffset = 0x19; | ||
2183 | } else if ((ATEN2011_port->port_num == 4) | ||
2184 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2185 | 4)) { | ||
2186 | ATEN2011_port->SpRegOffset = 0xc; | ||
2187 | ATEN2011_port->ControlRegOffset = 0xd; | ||
2188 | ATEN2011_port->DcrRegOffset = 0x1c; | ||
2189 | } | ||
2190 | |||
2191 | usb_set_serial_port_data(serial->port[i], ATEN2011_port); | ||
2192 | |||
2193 | /* enable rx_disable bit in control register */ | ||
2194 | |||
2195 | status = get_reg_sync(serial->port[i], | ||
2196 | ATEN2011_port->ControlRegOffset, &Data); | ||
2197 | if (status < 0) { | ||
2198 | dbg("Reading ControlReg failed status-0x%x", | ||
2199 | status); | ||
2200 | break; | ||
2201 | } else | ||
2202 | dbg | ||
2203 | ("ControlReg Reading success val is %x, status%d", | ||
2204 | Data, status); | ||
2205 | Data |= 0x08; /* setting driver done bit */ | ||
2206 | Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */ | ||
2207 | |||
2208 | /* Data |= 0x20; */ /* rx_disable bit */ | ||
2209 | status = set_reg_sync(serial->port[i], | ||
2210 | ATEN2011_port->ControlRegOffset, Data); | ||
2211 | if (status < 0) { | ||
2212 | dbg | ||
2213 | ("Writing ControlReg failed(rx_disable) status-0x%x", | ||
2214 | status); | ||
2215 | break; | ||
2216 | } else | ||
2217 | dbg | ||
2218 | ("ControlReg Writing success(rx_disable) status%d", | ||
2219 | status); | ||
2220 | |||
2221 | /* | ||
2222 | * Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 | ||
2223 | * and 0x24 in DCR3 | ||
2224 | */ | ||
2225 | Data = 0x01; | ||
2226 | status = set_reg_sync(serial->port[i], | ||
2227 | (__u16)(ATEN2011_port->DcrRegOffset + 0), | ||
2228 | Data); | ||
2229 | if (status < 0) { | ||
2230 | dbg("Writing DCR0 failed status-0x%x", status); | ||
2231 | break; | ||
2232 | } else | ||
2233 | dbg("DCR0 Writing success status%d", status); | ||
2234 | |||
2235 | Data = 0x05; | ||
2236 | status = set_reg_sync(serial->port[i], | ||
2237 | (__u16)(ATEN2011_port->DcrRegOffset + 1), | ||
2238 | Data); | ||
2239 | if (status < 0) { | ||
2240 | dbg("Writing DCR1 failed status-0x%x", status); | ||
2241 | break; | ||
2242 | } else | ||
2243 | dbg("DCR1 Writing success status%d", status); | ||
2244 | |||
2245 | Data = 0x24; | ||
2246 | status = set_reg_sync(serial->port[i], | ||
2247 | (__u16)(ATEN2011_port->DcrRegOffset + 2), | ||
2248 | Data); | ||
2249 | if (status < 0) { | ||
2250 | dbg("Writing DCR2 failed status-0x%x", status); | ||
2251 | break; | ||
2252 | } else | ||
2253 | dbg("DCR2 Writing success status%d", status); | ||
2254 | |||
2255 | /* write values in clkstart0x0 and clkmulti 0x20 */ | ||
2256 | Data = 0x0; | ||
2257 | status = set_reg_sync(serial->port[i], CLK_START_VALUE_REGISTER, | ||
2258 | Data); | ||
2259 | if (status < 0) { | ||
2260 | dbg | ||
2261 | ("Writing CLK_START_VALUE_REGISTER failed status-0x%x", | ||
2262 | status); | ||
2263 | break; | ||
2264 | } else | ||
2265 | dbg | ||
2266 | ("CLK_START_VALUE_REGISTER Writing success status%d", | ||
2267 | status); | ||
2268 | |||
2269 | Data = 0x20; | ||
2270 | status = set_reg_sync(serial->port[i], CLK_MULTI_REGISTER, | ||
2271 | Data); | ||
2272 | if (status < 0) { | ||
2273 | dbg | ||
2274 | ("Writing CLK_MULTI_REGISTER failed status-0x%x", | ||
2275 | status); | ||
2276 | break; | ||
2277 | } else | ||
2278 | dbg("CLK_MULTI_REGISTER Writing success status%d", | ||
2279 | status); | ||
2280 | |||
2281 | /* Zero Length flag register */ | ||
2282 | if ((ATEN2011_port->port_num != 1) | ||
2283 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)) { | ||
2284 | |||
2285 | Data = 0xff; | ||
2286 | status = set_reg_sync(serial->port[i], | ||
2287 | (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num)), | ||
2288 | Data); | ||
2289 | dbg("ZLIP offset%x", | ||
2290 | (__u16) (ZLP_REG1 + | ||
2291 | ((__u16) ATEN2011_port->port_num))); | ||
2292 | if (status < 0) { | ||
2293 | dbg | ||
2294 | ("Writing ZLP_REG%d failed status-0x%x", | ||
2295 | i + 2, status); | ||
2296 | break; | ||
2297 | } else | ||
2298 | dbg("ZLP_REG%d Writing success status%d", | ||
2299 | i + 2, status); | ||
2300 | } else { | ||
2301 | Data = 0xff; | ||
2302 | status = set_reg_sync(serial->port[i], | ||
2303 | (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num) - 0x1), | ||
2304 | Data); | ||
2305 | dbg("ZLIP offset%x", | ||
2306 | (__u16) (ZLP_REG1 + | ||
2307 | ((__u16) ATEN2011_port->port_num) - | ||
2308 | 0x1)); | ||
2309 | if (status < 0) { | ||
2310 | dbg | ||
2311 | ("Writing ZLP_REG%d failed status-0x%x", | ||
2312 | i + 1, status); | ||
2313 | break; | ||
2314 | } else | ||
2315 | dbg("ZLP_REG%d Writing success status%d", | ||
2316 | i + 1, status); | ||
2317 | |||
2318 | } | ||
2319 | ATEN2011_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
2320 | ATEN2011_port->ctrl_buf = kmalloc(16, GFP_KERNEL); | ||
2321 | |||
2322 | } | ||
2323 | |||
2324 | /* Zero Length flag enable */ | ||
2325 | Data = 0x0f; | ||
2326 | status = set_reg_sync(serial->port[0], ZLP_REG5, Data); | ||
2327 | if (status < 0) { | ||
2328 | dbg("Writing ZLP_REG5 failed status-0x%x", status); | ||
2329 | return -1; | ||
2330 | } else | ||
2331 | dbg("ZLP_REG5 Writing success status%d", status); | ||
2332 | |||
2333 | /* setting configuration feature to one */ | ||
2334 | usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), | ||
2335 | (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); | ||
2336 | return 0; | ||
2337 | } | ||
2338 | |||
2339 | static void ATEN2011_release(struct usb_serial *serial) | ||
2340 | { | ||
2341 | int i; | ||
2342 | struct ATENINTL_port *ATEN2011_port; | ||
2343 | |||
2344 | /* check for the ports to be closed,close the ports and disconnect */ | ||
2345 | |||
2346 | /* free private structure allocated for serial port * | ||
2347 | * stop reads and writes on all ports */ | ||
2348 | |||
2349 | for (i = 0; i < serial->num_ports; ++i) { | ||
2350 | ATEN2011_port = usb_get_serial_port_data(serial->port[i]); | ||
2351 | kfree(ATEN2011_port->ctrl_buf); | ||
2352 | usb_kill_urb(ATEN2011_port->control_urb); | ||
2353 | kfree(ATEN2011_port); | ||
2354 | usb_set_serial_port_data(serial->port[i], NULL); | ||
2355 | } | ||
2356 | |||
2357 | /* free private structure allocated for serial device */ | ||
2358 | |||
2359 | kfree(usb_get_serial_data(serial)); | ||
2360 | usb_set_serial_data(serial, NULL); | ||
2361 | } | ||
2362 | |||
2363 | static struct usb_serial_driver aten_serial_driver = { | ||
2364 | .driver = { | ||
2365 | .owner = THIS_MODULE, | ||
2366 | .name = "aten2011", | ||
2367 | }, | ||
2368 | .description = DRIVER_DESC, | ||
2369 | .id_table = id_table, | ||
2370 | .open = ATEN2011_open, | ||
2371 | .close = ATEN2011_close, | ||
2372 | .write = ATEN2011_write, | ||
2373 | .write_room = ATEN2011_write_room, | ||
2374 | .chars_in_buffer = ATEN2011_chars_in_buffer, | ||
2375 | .throttle = ATEN2011_throttle, | ||
2376 | .unthrottle = ATEN2011_unthrottle, | ||
2377 | .calc_num_ports = ATEN2011_calc_num_ports, | ||
2378 | |||
2379 | .ioctl = ATEN2011_ioctl, | ||
2380 | .set_termios = ATEN2011_set_termios, | ||
2381 | .break_ctl = ATEN2011_break, | ||
2382 | .tiocmget = ATEN2011_tiocmget, | ||
2383 | .tiocmset = ATEN2011_tiocmset, | ||
2384 | .attach = ATEN2011_startup, | ||
2385 | .release = ATEN2011_release, | ||
2386 | .read_bulk_callback = ATEN2011_bulk_in_callback, | ||
2387 | .read_int_callback = ATEN2011_interrupt_callback, | ||
2388 | }; | ||
2389 | |||
2390 | static struct usb_driver aten_driver = { | ||
2391 | .name = "aten2011", | ||
2392 | .probe = usb_serial_probe, | ||
2393 | .disconnect = usb_serial_disconnect, | ||
2394 | .id_table = id_table, | ||
2395 | }; | ||
2396 | |||
2397 | static int __init aten_init(void) | ||
2398 | { | ||
2399 | int retval; | ||
2400 | |||
2401 | /* Register with the usb serial */ | ||
2402 | retval = usb_serial_register(&aten_serial_driver); | ||
2403 | if (retval) | ||
2404 | return retval; | ||
2405 | |||
2406 | printk(KERN_INFO KBUILD_MODNAME ":" | ||
2407 | DRIVER_DESC " " DRIVER_VERSION "\n"); | ||
2408 | |||
2409 | /* Register with the usb */ | ||
2410 | retval = usb_register(&aten_driver); | ||
2411 | if (retval) | ||
2412 | usb_serial_deregister(&aten_serial_driver); | ||
2413 | |||
2414 | return retval; | ||
2415 | } | ||
2416 | |||
2417 | static void __exit aten_exit(void) | ||
2418 | { | ||
2419 | usb_deregister(&aten_driver); | ||
2420 | usb_serial_deregister(&aten_serial_driver); | ||
2421 | } | ||
2422 | |||
2423 | module_init(aten_init); | ||
2424 | module_exit(aten_exit); | ||
2425 | |||
2426 | /* Module information */ | ||
2427 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
2428 | MODULE_LICENSE("GPL"); | ||
2429 | |||
2430 | MODULE_PARM_DESC(debug, "Debug enabled or not"); | ||
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c index 0ab9d15f3439..f5416af1e902 100644 --- a/drivers/staging/udlfb/udlfb.c +++ b/drivers/staging/udlfb/udlfb.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/fb.h> | 22 | #include <linux/fb.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/vmalloc.h> | ||
24 | 25 | ||
25 | #include "udlfb.h" | 26 | #include "udlfb.h" |
26 | 27 | ||
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 24dfb33f90cb..a16c538d0132 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
80 | int max_tx; | 80 | int max_tx; |
81 | int i; | 81 | int i; |
82 | 82 | ||
83 | /* Allocate space for the SS endpoint companion descriptor */ | ||
84 | ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), | ||
85 | GFP_KERNEL); | ||
86 | if (!ep->ss_ep_comp) | ||
87 | return -ENOMEM; | ||
88 | desc = (struct usb_ss_ep_comp_descriptor *) buffer; | 83 | desc = (struct usb_ss_ep_comp_descriptor *) buffer; |
89 | if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { | 84 | if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { |
90 | dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " | 85 | dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " |
91 | " interface %d altsetting %d ep %d: " | 86 | " interface %d altsetting %d ep %d: " |
92 | "using minimum values\n", | 87 | "using minimum values\n", |
93 | cfgno, inum, asnum, ep->desc.bEndpointAddress); | 88 | cfgno, inum, asnum, ep->desc.bEndpointAddress); |
94 | ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; | ||
95 | ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; | ||
96 | ep->ss_ep_comp->desc.bMaxBurst = 0; | ||
97 | /* | ||
98 | * Leave bmAttributes as zero, which will mean no streams for | ||
99 | * bulk, and isoc won't support multiple bursts of packets. | ||
100 | * With bursts of only one packet, and a Mult of 1, the max | ||
101 | * amount of data moved per endpoint service interval is one | ||
102 | * packet. | ||
103 | */ | ||
104 | if (usb_endpoint_xfer_isoc(&ep->desc) || | ||
105 | usb_endpoint_xfer_int(&ep->desc)) | ||
106 | ep->ss_ep_comp->desc.wBytesPerInterval = | ||
107 | ep->desc.wMaxPacketSize; | ||
108 | /* | 89 | /* |
109 | * The next descriptor is for an Endpoint or Interface, | 90 | * The next descriptor is for an Endpoint or Interface, |
110 | * no extra descriptors to copy into the companion structure, | 91 | * no extra descriptors to copy into the companion structure, |
111 | * and we didn't eat up any of the buffer. | 92 | * and we didn't eat up any of the buffer. |
112 | */ | 93 | */ |
113 | retval = 0; | 94 | return 0; |
114 | goto valid; | ||
115 | } | 95 | } |
116 | memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); | 96 | memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); |
117 | desc = &ep->ss_ep_comp->desc; | 97 | desc = &ep->ss_ep_comp->desc; |
@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
320 | buffer += i; | 300 | buffer += i; |
321 | size -= i; | 301 | size -= i; |
322 | 302 | ||
303 | /* Allocate space for the SS endpoint companion descriptor */ | ||
304 | endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), | ||
305 | GFP_KERNEL); | ||
306 | if (!endpoint->ss_ep_comp) | ||
307 | return -ENOMEM; | ||
308 | |||
309 | /* Fill in some default values (may be overwritten later) */ | ||
310 | endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; | ||
311 | endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; | ||
312 | endpoint->ss_ep_comp->desc.bMaxBurst = 0; | ||
313 | /* | ||
314 | * Leave bmAttributes as zero, which will mean no streams for | ||
315 | * bulk, and isoc won't support multiple bursts of packets. | ||
316 | * With bursts of only one packet, and a Mult of 1, the max | ||
317 | * amount of data moved per endpoint service interval is one | ||
318 | * packet. | ||
319 | */ | ||
320 | if (usb_endpoint_xfer_isoc(&endpoint->desc) || | ||
321 | usb_endpoint_xfer_int(&endpoint->desc)) | ||
322 | endpoint->ss_ep_comp->desc.wBytesPerInterval = | ||
323 | endpoint->desc.wMaxPacketSize; | ||
324 | |||
323 | if (size > 0) { | 325 | if (size > 0) { |
324 | retval = usb_parse_ss_endpoint_companion(ddev, cfgno, | 326 | retval = usb_parse_ss_endpoint_companion(ddev, cfgno, |
325 | inum, asnum, endpoint, num_ep, buffer, | 327 | inum, asnum, endpoint, num_ep, buffer, |
@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
329 | retval = buffer - buffer0; | 331 | retval = buffer - buffer0; |
330 | } | 332 | } |
331 | } else { | 333 | } else { |
334 | dev_warn(ddev, "config %d interface %d altsetting %d " | ||
335 | "endpoint 0x%X has no " | ||
336 | "SuperSpeed companion descriptor\n", | ||
337 | cfgno, inum, asnum, d->bEndpointAddress); | ||
332 | retval = buffer - buffer0; | 338 | retval = buffer - buffer0; |
333 | } | 339 | } |
334 | } else { | 340 | } else { |
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index dc2ac613a9d1..1d283e1b2b8d 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c | |||
@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd) | |||
105 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 105 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
106 | int retval; | 106 | int retval; |
107 | 107 | ||
108 | ehci_reset(ehci); | ||
108 | retval = ehci_halt(ehci); | 109 | retval = ehci_halt(ehci); |
109 | if (retval) | 110 | if (retval) |
110 | return retval; | 111 | return retval; |
@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd) | |||
118 | 119 | ||
119 | hcd->has_tt = 1; | 120 | hcd->has_tt = 1; |
120 | 121 | ||
121 | ehci_reset(ehci); | ||
122 | ehci_port_power(ehci, 0); | 122 | ehci_port_power(ehci, 0); |
123 | 123 | ||
124 | return retval; | 124 | return retval; |
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index f3aaba35e912..83cbecd2a1ed 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c | |||
@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
282 | static void ohci_omap_stop(struct usb_hcd *hcd) | 282 | static void ohci_omap_stop(struct usb_hcd *hcd) |
283 | { | 283 | { |
284 | dev_dbg(hcd->self.controller, "stopping USB Controller\n"); | 284 | dev_dbg(hcd->self.controller, "stopping USB Controller\n"); |
285 | ohci_stop(hcd); | ||
285 | omap_ohci_clock_power(0); | 286 | omap_ohci_clock_power(0); |
286 | } | 287 | } |
287 | 288 | ||
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 2501c571f855..705e34324156 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int | |||
173 | { | 173 | { |
174 | void *addr; | 174 | void *addr; |
175 | u32 temp; | 175 | u32 temp; |
176 | u64 temp_64; | ||
176 | 177 | ||
177 | addr = &ir_set->irq_pending; | 178 | addr = &ir_set->irq_pending; |
178 | temp = xhci_readl(xhci, addr); | 179 | temp = xhci_readl(xhci, addr); |
@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int | |||
200 | xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", | 201 | xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", |
201 | addr, (unsigned int)temp); | 202 | addr, (unsigned int)temp); |
202 | 203 | ||
203 | addr = &ir_set->erst_base[0]; | 204 | addr = &ir_set->erst_base; |
204 | temp = xhci_readl(xhci, addr); | 205 | temp_64 = xhci_read_64(xhci, addr); |
205 | xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", | 206 | xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", |
206 | addr, (unsigned int) temp); | 207 | addr, temp_64); |
207 | |||
208 | addr = &ir_set->erst_base[1]; | ||
209 | temp = xhci_readl(xhci, addr); | ||
210 | xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n", | ||
211 | addr, (unsigned int) temp); | ||
212 | 208 | ||
213 | addr = &ir_set->erst_dequeue[0]; | 209 | addr = &ir_set->erst_dequeue; |
214 | temp = xhci_readl(xhci, addr); | 210 | temp_64 = xhci_read_64(xhci, addr); |
215 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", | 211 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", |
216 | addr, (unsigned int) temp); | 212 | addr, temp_64); |
217 | |||
218 | addr = &ir_set->erst_dequeue[1]; | ||
219 | temp = xhci_readl(xhci, addr); | ||
220 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n", | ||
221 | addr, (unsigned int) temp); | ||
222 | } | 213 | } |
223 | 214 | ||
224 | void xhci_print_run_regs(struct xhci_hcd *xhci) | 215 | void xhci_print_run_regs(struct xhci_hcd *xhci) |
@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
268 | xhci_dbg(xhci, "Link TRB:\n"); | 259 | xhci_dbg(xhci, "Link TRB:\n"); |
269 | xhci_print_trb_offsets(xhci, trb); | 260 | xhci_print_trb_offsets(xhci, trb); |
270 | 261 | ||
271 | address = trb->link.segment_ptr[0] + | 262 | address = trb->link.segment_ptr; |
272 | (((u64) trb->link.segment_ptr[1]) << 32); | ||
273 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); | 263 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); |
274 | 264 | ||
275 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", | 265 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", |
@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
282 | (unsigned int) (trb->link.control & TRB_NO_SNOOP)); | 272 | (unsigned int) (trb->link.control & TRB_NO_SNOOP)); |
283 | break; | 273 | break; |
284 | case TRB_TYPE(TRB_TRANSFER): | 274 | case TRB_TYPE(TRB_TRANSFER): |
285 | address = trb->trans_event.buffer[0] + | 275 | address = trb->trans_event.buffer; |
286 | (((u64) trb->trans_event.buffer[1]) << 32); | ||
287 | /* | 276 | /* |
288 | * FIXME: look at flags to figure out if it's an address or if | 277 | * FIXME: look at flags to figure out if it's an address or if |
289 | * the data is directly in the buffer field. | 278 | * the data is directly in the buffer field. |
@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
291 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); | 280 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); |
292 | break; | 281 | break; |
293 | case TRB_TYPE(TRB_COMPLETION): | 282 | case TRB_TYPE(TRB_COMPLETION): |
294 | address = trb->event_cmd.cmd_trb[0] + | 283 | address = trb->event_cmd.cmd_trb; |
295 | (((u64) trb->event_cmd.cmd_trb[1]) << 32); | ||
296 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); | 284 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); |
297 | xhci_dbg(xhci, "Completion status = %u\n", | 285 | xhci_dbg(xhci, "Completion status = %u\n", |
298 | (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); | 286 | (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); |
@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) | |||
328 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { | 316 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { |
329 | trb = &seg->trbs[i]; | 317 | trb = &seg->trbs[i]; |
330 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, | 318 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, |
331 | (unsigned int) trb->link.segment_ptr[0], | 319 | lower_32_bits(trb->link.segment_ptr), |
332 | (unsigned int) trb->link.segment_ptr[1], | 320 | upper_32_bits(trb->link.segment_ptr), |
333 | (unsigned int) trb->link.intr_target, | 321 | (unsigned int) trb->link.intr_target, |
334 | (unsigned int) trb->link.control); | 322 | (unsigned int) trb->link.control); |
335 | addr += sizeof(*trb); | 323 | addr += sizeof(*trb); |
@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | |||
386 | entry = &erst->entries[i]; | 374 | entry = &erst->entries[i]; |
387 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", | 375 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", |
388 | (unsigned int) addr, | 376 | (unsigned int) addr, |
389 | (unsigned int) entry->seg_addr[0], | 377 | lower_32_bits(entry->seg_addr), |
390 | (unsigned int) entry->seg_addr[1], | 378 | upper_32_bits(entry->seg_addr), |
391 | (unsigned int) entry->seg_size, | 379 | (unsigned int) entry->seg_size, |
392 | (unsigned int) entry->rsvd); | 380 | (unsigned int) entry->rsvd); |
393 | addr += sizeof(*entry); | 381 | addr += sizeof(*entry); |
@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | |||
396 | 384 | ||
397 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) | 385 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) |
398 | { | 386 | { |
399 | u32 val; | 387 | u64 val; |
400 | 388 | ||
401 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 389 | val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
402 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); | 390 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", |
403 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); | 391 | lower_32_bits(val)); |
404 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); | 392 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", |
393 | upper_32_bits(val)); | ||
405 | } | 394 | } |
406 | 395 | ||
407 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) | 396 | /* Print the last 32 bytes for 64-byte contexts */ |
397 | static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) | ||
398 | { | ||
399 | int i; | ||
400 | for (i = 0; i < 4; ++i) { | ||
401 | xhci_dbg(xhci, "@%p (virt) @%08llx " | ||
402 | "(dma) %#08llx - rsvd64[%d]\n", | ||
403 | &ctx[4 + i], (unsigned long long)dma, | ||
404 | ctx[4 + i], i); | ||
405 | dma += 8; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | ||
408 | { | 410 | { |
409 | int i, j; | ||
410 | int last_ep_ctx = 31; | ||
411 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | 411 | /* Fields are 32 bits wide, DMA addresses are in bytes */ |
412 | int field_size = 32 / 8; | 412 | int field_size = 32 / 8; |
413 | int i; | ||
413 | 414 | ||
414 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", | 415 | struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); |
415 | &ctx->drop_flags, (unsigned long long)dma, | 416 | dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); |
416 | ctx->drop_flags); | 417 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); |
417 | dma += field_size; | ||
418 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", | ||
419 | &ctx->add_flags, (unsigned long long)dma, | ||
420 | ctx->add_flags); | ||
421 | dma += field_size; | ||
422 | for (i = 0; i > 6; ++i) { | ||
423 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | ||
424 | &ctx->rsvd[i], (unsigned long long)dma, | ||
425 | ctx->rsvd[i], i); | ||
426 | dma += field_size; | ||
427 | } | ||
428 | 418 | ||
429 | xhci_dbg(xhci, "Slot Context:\n"); | 419 | xhci_dbg(xhci, "Slot Context:\n"); |
430 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", | 420 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", |
431 | &ctx->slot.dev_info, | 421 | &slot_ctx->dev_info, |
432 | (unsigned long long)dma, ctx->slot.dev_info); | 422 | (unsigned long long)dma, slot_ctx->dev_info); |
433 | dma += field_size; | 423 | dma += field_size; |
434 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", | 424 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", |
435 | &ctx->slot.dev_info2, | 425 | &slot_ctx->dev_info2, |
436 | (unsigned long long)dma, ctx->slot.dev_info2); | 426 | (unsigned long long)dma, slot_ctx->dev_info2); |
437 | dma += field_size; | 427 | dma += field_size; |
438 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", | 428 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", |
439 | &ctx->slot.tt_info, | 429 | &slot_ctx->tt_info, |
440 | (unsigned long long)dma, ctx->slot.tt_info); | 430 | (unsigned long long)dma, slot_ctx->tt_info); |
441 | dma += field_size; | 431 | dma += field_size; |
442 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", | 432 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", |
443 | &ctx->slot.dev_state, | 433 | &slot_ctx->dev_state, |
444 | (unsigned long long)dma, ctx->slot.dev_state); | 434 | (unsigned long long)dma, slot_ctx->dev_state); |
445 | dma += field_size; | 435 | dma += field_size; |
446 | for (i = 0; i > 4; ++i) { | 436 | for (i = 0; i < 4; ++i) { |
447 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | 437 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", |
448 | &ctx->slot.reserved[i], (unsigned long long)dma, | 438 | &slot_ctx->reserved[i], (unsigned long long)dma, |
449 | ctx->slot.reserved[i], i); | 439 | slot_ctx->reserved[i], i); |
450 | dma += field_size; | 440 | dma += field_size; |
451 | } | 441 | } |
452 | 442 | ||
443 | if (csz) | ||
444 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); | ||
445 | } | ||
446 | |||
447 | void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, | ||
448 | struct xhci_container_ctx *ctx, | ||
449 | unsigned int last_ep) | ||
450 | { | ||
451 | int i, j; | ||
452 | int last_ep_ctx = 31; | ||
453 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | ||
454 | int field_size = 32 / 8; | ||
455 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); | ||
456 | |||
453 | if (last_ep < 31) | 457 | if (last_ep < 31) |
454 | last_ep_ctx = last_ep + 1; | 458 | last_ep_ctx = last_ep + 1; |
455 | for (i = 0; i < last_ep_ctx; ++i) { | 459 | for (i = 0; i < last_ep_ctx; ++i) { |
460 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); | ||
461 | dma_addr_t dma = ctx->dma + | ||
462 | ((unsigned long)ep_ctx - (unsigned long)ctx); | ||
463 | |||
456 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); | 464 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); |
457 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", | 465 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", |
458 | &ctx->ep[i].ep_info, | 466 | &ep_ctx->ep_info, |
459 | (unsigned long long)dma, ctx->ep[i].ep_info); | 467 | (unsigned long long)dma, ep_ctx->ep_info); |
460 | dma += field_size; | 468 | dma += field_size; |
461 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", | 469 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", |
462 | &ctx->ep[i].ep_info2, | 470 | &ep_ctx->ep_info2, |
463 | (unsigned long long)dma, ctx->ep[i].ep_info2); | 471 | (unsigned long long)dma, ep_ctx->ep_info2); |
464 | dma += field_size; | ||
465 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n", | ||
466 | &ctx->ep[i].deq[0], | ||
467 | (unsigned long long)dma, ctx->ep[i].deq[0]); | ||
468 | dma += field_size; | ||
469 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n", | ||
470 | &ctx->ep[i].deq[1], | ||
471 | (unsigned long long)dma, ctx->ep[i].deq[1]); | ||
472 | dma += field_size; | 472 | dma += field_size; |
473 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n", | ||
474 | &ep_ctx->deq, | ||
475 | (unsigned long long)dma, ep_ctx->deq); | ||
476 | dma += 2*field_size; | ||
473 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", | 477 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", |
474 | &ctx->ep[i].tx_info, | 478 | &ep_ctx->tx_info, |
475 | (unsigned long long)dma, ctx->ep[i].tx_info); | 479 | (unsigned long long)dma, ep_ctx->tx_info); |
476 | dma += field_size; | 480 | dma += field_size; |
477 | for (j = 0; j < 3; ++j) { | 481 | for (j = 0; j < 3; ++j) { |
478 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | 482 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", |
479 | &ctx->ep[i].reserved[j], | 483 | &ep_ctx->reserved[j], |
480 | (unsigned long long)dma, | 484 | (unsigned long long)dma, |
481 | ctx->ep[i].reserved[j], j); | 485 | ep_ctx->reserved[j], j); |
486 | dma += field_size; | ||
487 | } | ||
488 | |||
489 | if (csz) | ||
490 | dbg_rsvd64(xhci, (u64 *)ep_ctx, dma); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | void xhci_dbg_ctx(struct xhci_hcd *xhci, | ||
495 | struct xhci_container_ctx *ctx, | ||
496 | unsigned int last_ep) | ||
497 | { | ||
498 | int i; | ||
499 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | ||
500 | int field_size = 32 / 8; | ||
501 | struct xhci_slot_ctx *slot_ctx; | ||
502 | dma_addr_t dma = ctx->dma; | ||
503 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); | ||
504 | |||
505 | if (ctx->type == XHCI_CTX_TYPE_INPUT) { | ||
506 | struct xhci_input_control_ctx *ctrl_ctx = | ||
507 | xhci_get_input_control_ctx(xhci, ctx); | ||
508 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", | ||
509 | &ctrl_ctx->drop_flags, (unsigned long long)dma, | ||
510 | ctrl_ctx->drop_flags); | ||
511 | dma += field_size; | ||
512 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", | ||
513 | &ctrl_ctx->add_flags, (unsigned long long)dma, | ||
514 | ctrl_ctx->add_flags); | ||
515 | dma += field_size; | ||
516 | for (i = 0; i < 6; ++i) { | ||
517 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n", | ||
518 | &ctrl_ctx->rsvd2[i], (unsigned long long)dma, | ||
519 | ctrl_ctx->rsvd2[i], i); | ||
482 | dma += field_size; | 520 | dma += field_size; |
483 | } | 521 | } |
522 | |||
523 | if (csz) | ||
524 | dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma); | ||
484 | } | 525 | } |
526 | |||
527 | slot_ctx = xhci_get_slot_ctx(xhci, ctx); | ||
528 | xhci_dbg_slot_ctx(xhci, ctx); | ||
529 | xhci_dbg_ep_ctx(xhci, ctx, last_ep); | ||
485 | } | 530 | } |
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index dba3e07ccd09..816c39caca1c 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
103 | u32 state; | 103 | u32 state; |
104 | 104 | ||
105 | state = xhci_readl(xhci, &xhci->op_regs->status); | 105 | state = xhci_readl(xhci, &xhci->op_regs->status); |
106 | BUG_ON((state & STS_HALT) == 0); | 106 | if ((state & STS_HALT) == 0) { |
107 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | ||
108 | return 0; | ||
109 | } | ||
107 | 110 | ||
108 | xhci_dbg(xhci, "// Reset the HC\n"); | 111 | xhci_dbg(xhci, "// Reset the HC\n"); |
109 | command = xhci_readl(xhci, &xhci->op_regs->command); | 112 | command = xhci_readl(xhci, &xhci->op_regs->command); |
@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
226 | static void xhci_work(struct xhci_hcd *xhci) | 229 | static void xhci_work(struct xhci_hcd *xhci) |
227 | { | 230 | { |
228 | u32 temp; | 231 | u32 temp; |
232 | u64 temp_64; | ||
229 | 233 | ||
230 | /* | 234 | /* |
231 | * Clear the op reg interrupt status first, | 235 | * Clear the op reg interrupt status first, |
@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci) | |||
248 | /* FIXME this should be a delayed service routine that clears the EHB */ | 252 | /* FIXME this should be a delayed service routine that clears the EHB */ |
249 | xhci_handle_event(xhci); | 253 | xhci_handle_event(xhci); |
250 | 254 | ||
251 | /* Clear the event handler busy flag; the event ring should be empty. */ | 255 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ |
252 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 256 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
253 | xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); | 257 | xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); |
254 | /* Flush posted writes -- FIXME is this necessary? */ | 258 | /* Flush posted writes -- FIXME is this necessary? */ |
255 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | 259 | xhci_readl(xhci, &xhci->ir_set->irq_pending); |
256 | } | 260 | } |
@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
266 | { | 270 | { |
267 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 271 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
268 | u32 temp, temp2; | 272 | u32 temp, temp2; |
273 | union xhci_trb *trb; | ||
269 | 274 | ||
270 | spin_lock(&xhci->lock); | 275 | spin_lock(&xhci->lock); |
276 | trb = xhci->event_ring->dequeue; | ||
271 | /* Check if the xHC generated the interrupt, or the irq is shared */ | 277 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
272 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 278 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
273 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 279 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
280 | if (temp == 0xffffffff && temp2 == 0xffffffff) | ||
281 | goto hw_died; | ||
282 | |||
274 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { | 283 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { |
275 | spin_unlock(&xhci->lock); | 284 | spin_unlock(&xhci->lock); |
276 | return IRQ_NONE; | 285 | return IRQ_NONE; |
277 | } | 286 | } |
287 | xhci_dbg(xhci, "op reg status = %08x\n", temp); | ||
288 | xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); | ||
289 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); | ||
290 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", | ||
291 | (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), | ||
292 | lower_32_bits(trb->link.segment_ptr), | ||
293 | upper_32_bits(trb->link.segment_ptr), | ||
294 | (unsigned int) trb->link.intr_target, | ||
295 | (unsigned int) trb->link.control); | ||
278 | 296 | ||
279 | if (temp & STS_FATAL) { | 297 | if (temp & STS_FATAL) { |
280 | xhci_warn(xhci, "WARNING: Host System Error\n"); | 298 | xhci_warn(xhci, "WARNING: Host System Error\n"); |
281 | xhci_halt(xhci); | 299 | xhci_halt(xhci); |
300 | hw_died: | ||
282 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | 301 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; |
283 | spin_unlock(&xhci->lock); | 302 | spin_unlock(&xhci->lock); |
284 | return -ESHUTDOWN; | 303 | return -ESHUTDOWN; |
@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
295 | { | 314 | { |
296 | unsigned long flags; | 315 | unsigned long flags; |
297 | int temp; | 316 | int temp; |
317 | u64 temp_64; | ||
298 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; | 318 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; |
299 | int i, j; | 319 | int i, j; |
300 | 320 | ||
@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg) | |||
311 | xhci_dbg(xhci, "Event ring:\n"); | 331 | xhci_dbg(xhci, "Event ring:\n"); |
312 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); | 332 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); |
313 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | 333 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
314 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 334 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
315 | temp &= ERST_PTR_MASK; | 335 | temp_64 &= ~ERST_PTR_MASK; |
316 | xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); | 336 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); |
317 | xhci_dbg(xhci, "Command ring:\n"); | 337 | xhci_dbg(xhci, "Command ring:\n"); |
318 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); | 338 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); |
319 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | 339 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
356 | int xhci_run(struct usb_hcd *hcd) | 376 | int xhci_run(struct usb_hcd *hcd) |
357 | { | 377 | { |
358 | u32 temp; | 378 | u32 temp; |
379 | u64 temp_64; | ||
359 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 380 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
360 | void (*doorbell)(struct xhci_hcd *) = NULL; | 381 | void (*doorbell)(struct xhci_hcd *) = NULL; |
361 | 382 | ||
@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd) | |||
382 | add_timer(&xhci->event_ring_timer); | 403 | add_timer(&xhci->event_ring_timer); |
383 | #endif | 404 | #endif |
384 | 405 | ||
406 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
407 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
408 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
409 | xhci_dbg_cmd_ptrs(xhci); | ||
410 | |||
411 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
412 | xhci_dbg_erst(xhci, &xhci->erst); | ||
413 | xhci_dbg(xhci, "Event ring:\n"); | ||
414 | xhci_debug_ring(xhci, xhci->event_ring); | ||
415 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
416 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
417 | temp_64 &= ~ERST_PTR_MASK; | ||
418 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | ||
419 | |||
385 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); | 420 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); |
386 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); | 421 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); |
387 | temp &= ~ER_IRQ_INTERVAL_MASK; | 422 | temp &= ~ER_IRQ_INTERVAL_MASK; |
@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd) | |||
406 | if (NUM_TEST_NOOPS > 0) | 441 | if (NUM_TEST_NOOPS > 0) |
407 | doorbell = xhci_setup_one_noop(xhci); | 442 | doorbell = xhci_setup_one_noop(xhci); |
408 | 443 | ||
409 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
410 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
411 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
412 | xhci_dbg_cmd_ptrs(xhci); | ||
413 | |||
414 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
415 | xhci_dbg_erst(xhci, &xhci->erst); | ||
416 | xhci_dbg(xhci, "Event ring:\n"); | ||
417 | xhci_debug_ring(xhci, xhci->event_ring); | ||
418 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
419 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | ||
420 | temp &= ERST_PTR_MASK; | ||
421 | xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); | ||
422 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]); | ||
423 | xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp); | ||
424 | |||
425 | temp = xhci_readl(xhci, &xhci->op_regs->command); | 444 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
426 | temp |= (CMD_RUN); | 445 | temp |= (CMD_RUN); |
427 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", | 446 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
601 | goto exit; | 620 | goto exit; |
602 | } | 621 | } |
603 | if (usb_endpoint_xfer_control(&urb->ep->desc)) | 622 | if (usb_endpoint_xfer_control(&urb->ep->desc)) |
604 | ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, | 623 | /* We have a spinlock and interrupts disabled, so we must pass |
624 | * atomic context to this function, which may allocate memory. | ||
625 | */ | ||
626 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | ||
605 | slot_id, ep_index); | 627 | slot_id, ep_index); |
606 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) | 628 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) |
607 | ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, | 629 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
608 | slot_id, ep_index); | 630 | slot_id, ep_index); |
609 | else | 631 | else |
610 | ret = -EINVAL; | 632 | ret = -EINVAL; |
@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
661 | goto done; | 683 | goto done; |
662 | 684 | ||
663 | xhci_dbg(xhci, "Cancel URB %p\n", urb); | 685 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
686 | xhci_dbg(xhci, "Event ring:\n"); | ||
687 | xhci_debug_ring(xhci, xhci->event_ring); | ||
664 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 688 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
665 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; | 689 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; |
690 | xhci_dbg(xhci, "Endpoint ring:\n"); | ||
691 | xhci_debug_ring(xhci, ep_ring); | ||
666 | td = (struct xhci_td *) urb->hcpriv; | 692 | td = (struct xhci_td *) urb->hcpriv; |
667 | 693 | ||
668 | ep_ring->cancels_pending++; | 694 | ep_ring->cancels_pending++; |
@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
696 | struct usb_host_endpoint *ep) | 722 | struct usb_host_endpoint *ep) |
697 | { | 723 | { |
698 | struct xhci_hcd *xhci; | 724 | struct xhci_hcd *xhci; |
699 | struct xhci_device_control *in_ctx; | 725 | struct xhci_container_ctx *in_ctx, *out_ctx; |
726 | struct xhci_input_control_ctx *ctrl_ctx; | ||
727 | struct xhci_slot_ctx *slot_ctx; | ||
700 | unsigned int last_ctx; | 728 | unsigned int last_ctx; |
701 | unsigned int ep_index; | 729 | unsigned int ep_index; |
702 | struct xhci_ep_ctx *ep_ctx; | 730 | struct xhci_ep_ctx *ep_ctx; |
@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
724 | } | 752 | } |
725 | 753 | ||
726 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | 754 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
755 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
756 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
727 | ep_index = xhci_get_endpoint_index(&ep->desc); | 757 | ep_index = xhci_get_endpoint_index(&ep->desc); |
728 | ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; | 758 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
729 | /* If the HC already knows the endpoint is disabled, | 759 | /* If the HC already knows the endpoint is disabled, |
730 | * or the HCD has noted it is disabled, ignore this request | 760 | * or the HCD has noted it is disabled, ignore this request |
731 | */ | 761 | */ |
732 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || | 762 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || |
733 | in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { | 763 | ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { |
734 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | 764 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
735 | __func__, ep); | 765 | __func__, ep); |
736 | return 0; | 766 | return 0; |
737 | } | 767 | } |
738 | 768 | ||
739 | in_ctx->drop_flags |= drop_flag; | 769 | ctrl_ctx->drop_flags |= drop_flag; |
740 | new_drop_flags = in_ctx->drop_flags; | 770 | new_drop_flags = ctrl_ctx->drop_flags; |
741 | 771 | ||
742 | in_ctx->add_flags = ~drop_flag; | 772 | ctrl_ctx->add_flags = ~drop_flag; |
743 | new_add_flags = in_ctx->add_flags; | 773 | new_add_flags = ctrl_ctx->add_flags; |
744 | 774 | ||
745 | last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); | 775 | last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); |
776 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
746 | /* Update the last valid endpoint context, if we deleted the last one */ | 777 | /* Update the last valid endpoint context, if we deleted the last one */ |
747 | if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { | 778 | if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { |
748 | in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 779 | slot_ctx->dev_info &= ~LAST_CTX_MASK; |
749 | in_ctx->slot.dev_info |= LAST_CTX(last_ctx); | 780 | slot_ctx->dev_info |= LAST_CTX(last_ctx); |
750 | } | 781 | } |
751 | new_slot_info = in_ctx->slot.dev_info; | 782 | new_slot_info = slot_ctx->dev_info; |
752 | 783 | ||
753 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | 784 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
754 | 785 | ||
@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
778 | struct usb_host_endpoint *ep) | 809 | struct usb_host_endpoint *ep) |
779 | { | 810 | { |
780 | struct xhci_hcd *xhci; | 811 | struct xhci_hcd *xhci; |
781 | struct xhci_device_control *in_ctx; | 812 | struct xhci_container_ctx *in_ctx, *out_ctx; |
782 | unsigned int ep_index; | 813 | unsigned int ep_index; |
783 | struct xhci_ep_ctx *ep_ctx; | 814 | struct xhci_ep_ctx *ep_ctx; |
815 | struct xhci_slot_ctx *slot_ctx; | ||
816 | struct xhci_input_control_ctx *ctrl_ctx; | ||
784 | u32 added_ctxs; | 817 | u32 added_ctxs; |
785 | unsigned int last_ctx; | 818 | unsigned int last_ctx; |
786 | u32 new_add_flags, new_drop_flags, new_slot_info; | 819 | u32 new_add_flags, new_drop_flags, new_slot_info; |
787 | int ret = 0; | 820 | int ret = 0; |
788 | 821 | ||
789 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | 822 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); |
790 | if (ret <= 0) | 823 | if (ret <= 0) { |
824 | /* So we won't queue a reset ep command for a root hub */ | ||
825 | ep->hcpriv = NULL; | ||
791 | return ret; | 826 | return ret; |
827 | } | ||
792 | xhci = hcd_to_xhci(hcd); | 828 | xhci = hcd_to_xhci(hcd); |
793 | 829 | ||
794 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | 830 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); |
@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
810 | } | 846 | } |
811 | 847 | ||
812 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | 848 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
849 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
850 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
813 | ep_index = xhci_get_endpoint_index(&ep->desc); | 851 | ep_index = xhci_get_endpoint_index(&ep->desc); |
814 | ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; | 852 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
815 | /* If the HCD has already noted the endpoint is enabled, | 853 | /* If the HCD has already noted the endpoint is enabled, |
816 | * ignore this request. | 854 | * ignore this request. |
817 | */ | 855 | */ |
818 | if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { | 856 | if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { |
819 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", | 857 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
820 | __func__, ep); | 858 | __func__, ep); |
821 | return 0; | 859 | return 0; |
@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
833 | return -ENOMEM; | 871 | return -ENOMEM; |
834 | } | 872 | } |
835 | 873 | ||
836 | in_ctx->add_flags |= added_ctxs; | 874 | ctrl_ctx->add_flags |= added_ctxs; |
837 | new_add_flags = in_ctx->add_flags; | 875 | new_add_flags = ctrl_ctx->add_flags; |
838 | 876 | ||
839 | /* If xhci_endpoint_disable() was called for this endpoint, but the | 877 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
840 | * xHC hasn't been notified yet through the check_bandwidth() call, | 878 | * xHC hasn't been notified yet through the check_bandwidth() call, |
@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
842 | * descriptors. We must drop and re-add this endpoint, so we leave the | 880 | * descriptors. We must drop and re-add this endpoint, so we leave the |
843 | * drop flags alone. | 881 | * drop flags alone. |
844 | */ | 882 | */ |
845 | new_drop_flags = in_ctx->drop_flags; | 883 | new_drop_flags = ctrl_ctx->drop_flags; |
846 | 884 | ||
885 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
847 | /* Update the last valid endpoint context, if we just added one past */ | 886 | /* Update the last valid endpoint context, if we just added one past */ |
848 | if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { | 887 | if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { |
849 | in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 888 | slot_ctx->dev_info &= ~LAST_CTX_MASK; |
850 | in_ctx->slot.dev_info |= LAST_CTX(last_ctx); | 889 | slot_ctx->dev_info |= LAST_CTX(last_ctx); |
851 | } | 890 | } |
852 | new_slot_info = in_ctx->slot.dev_info; | 891 | new_slot_info = slot_ctx->dev_info; |
892 | |||
893 | /* Store the usb_device pointer for later use */ | ||
894 | ep->hcpriv = udev; | ||
853 | 895 | ||
854 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", | 896 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
855 | (unsigned int) ep->desc.bEndpointAddress, | 897 | (unsigned int) ep->desc.bEndpointAddress, |
@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
860 | return 0; | 902 | return 0; |
861 | } | 903 | } |
862 | 904 | ||
863 | static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) | 905 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
864 | { | 906 | { |
907 | struct xhci_input_control_ctx *ctrl_ctx; | ||
865 | struct xhci_ep_ctx *ep_ctx; | 908 | struct xhci_ep_ctx *ep_ctx; |
909 | struct xhci_slot_ctx *slot_ctx; | ||
866 | int i; | 910 | int i; |
867 | 911 | ||
868 | /* When a device's add flag and drop flag are zero, any subsequent | 912 | /* When a device's add flag and drop flag are zero, any subsequent |
@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) | |||
870 | * untouched. Make sure we don't leave any old state in the input | 914 | * untouched. Make sure we don't leave any old state in the input |
871 | * endpoint contexts. | 915 | * endpoint contexts. |
872 | */ | 916 | */ |
873 | virt_dev->in_ctx->drop_flags = 0; | 917 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
874 | virt_dev->in_ctx->add_flags = 0; | 918 | ctrl_ctx->drop_flags = 0; |
875 | virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 919 | ctrl_ctx->add_flags = 0; |
920 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | ||
921 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | ||
876 | /* Endpoint 0 is always valid */ | 922 | /* Endpoint 0 is always valid */ |
877 | virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); | 923 | slot_ctx->dev_info |= LAST_CTX(1); |
878 | for (i = 1; i < 31; ++i) { | 924 | for (i = 1; i < 31; ++i) { |
879 | ep_ctx = &virt_dev->in_ctx->ep[i]; | 925 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
880 | ep_ctx->ep_info = 0; | 926 | ep_ctx->ep_info = 0; |
881 | ep_ctx->ep_info2 = 0; | 927 | ep_ctx->ep_info2 = 0; |
882 | ep_ctx->deq[0] = 0; | 928 | ep_ctx->deq = 0; |
883 | ep_ctx->deq[1] = 0; | ||
884 | ep_ctx->tx_info = 0; | 929 | ep_ctx->tx_info = 0; |
885 | } | 930 | } |
886 | } | 931 | } |
@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
903 | unsigned long flags; | 948 | unsigned long flags; |
904 | struct xhci_hcd *xhci; | 949 | struct xhci_hcd *xhci; |
905 | struct xhci_virt_device *virt_dev; | 950 | struct xhci_virt_device *virt_dev; |
951 | struct xhci_input_control_ctx *ctrl_ctx; | ||
952 | struct xhci_slot_ctx *slot_ctx; | ||
906 | 953 | ||
907 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | 954 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); |
908 | if (ret <= 0) | 955 | if (ret <= 0) |
@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
918 | virt_dev = xhci->devs[udev->slot_id]; | 965 | virt_dev = xhci->devs[udev->slot_id]; |
919 | 966 | ||
920 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | 967 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
921 | virt_dev->in_ctx->add_flags |= SLOT_FLAG; | 968 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
922 | virt_dev->in_ctx->add_flags &= ~EP0_FLAG; | 969 | ctrl_ctx->add_flags |= SLOT_FLAG; |
923 | virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; | 970 | ctrl_ctx->add_flags &= ~EP0_FLAG; |
924 | virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; | 971 | ctrl_ctx->drop_flags &= ~SLOT_FLAG; |
972 | ctrl_ctx->drop_flags &= ~EP0_FLAG; | ||
925 | xhci_dbg(xhci, "New Input Control Context:\n"); | 973 | xhci_dbg(xhci, "New Input Control Context:\n"); |
926 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, | 974 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
927 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); | 975 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
976 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | ||
928 | 977 | ||
929 | spin_lock_irqsave(&xhci->lock, flags); | 978 | spin_lock_irqsave(&xhci->lock, flags); |
930 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, | 979 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, |
931 | udev->slot_id); | 980 | udev->slot_id); |
932 | if (ret < 0) { | 981 | if (ret < 0) { |
933 | spin_unlock_irqrestore(&xhci->lock, flags); | 982 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
982 | } | 1031 | } |
983 | 1032 | ||
984 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | 1033 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
985 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, | 1034 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
986 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); | 1035 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); |
987 | 1036 | ||
988 | xhci_zero_in_ctx(virt_dev); | 1037 | xhci_zero_in_ctx(xhci, virt_dev); |
989 | /* Free any old rings */ | 1038 | /* Free any old rings */ |
990 | for (i = 1; i < 31; ++i) { | 1039 | for (i = 1; i < 31; ++i) { |
991 | if (virt_dev->new_ep_rings[i]) { | 1040 | if (virt_dev->new_ep_rings[i]) { |
@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1023 | virt_dev->new_ep_rings[i] = NULL; | 1072 | virt_dev->new_ep_rings[i] = NULL; |
1024 | } | 1073 | } |
1025 | } | 1074 | } |
1026 | xhci_zero_in_ctx(virt_dev); | 1075 | xhci_zero_in_ctx(xhci, virt_dev); |
1076 | } | ||
1077 | |||
1078 | /* Deal with stalled endpoints. The core should have sent the control message | ||
1079 | * to clear the halt condition. However, we need to make the xHCI hardware | ||
1080 | * reset its sequence number, since a device will expect a sequence number of | ||
1081 | * zero after the halt condition is cleared. | ||
1082 | * Context: in_interrupt | ||
1083 | */ | ||
1084 | void xhci_endpoint_reset(struct usb_hcd *hcd, | ||
1085 | struct usb_host_endpoint *ep) | ||
1086 | { | ||
1087 | struct xhci_hcd *xhci; | ||
1088 | struct usb_device *udev; | ||
1089 | unsigned int ep_index; | ||
1090 | unsigned long flags; | ||
1091 | int ret; | ||
1092 | struct xhci_dequeue_state deq_state; | ||
1093 | struct xhci_ring *ep_ring; | ||
1094 | |||
1095 | xhci = hcd_to_xhci(hcd); | ||
1096 | udev = (struct usb_device *) ep->hcpriv; | ||
1097 | /* Called with a root hub endpoint (or an endpoint that wasn't added | ||
1098 | * with xhci_add_endpoint() | ||
1099 | */ | ||
1100 | if (!ep->hcpriv) | ||
1101 | return; | ||
1102 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
1103 | ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; | ||
1104 | if (!ep_ring->stopped_td) { | ||
1105 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | ||
1106 | ep->desc.bEndpointAddress); | ||
1107 | return; | ||
1108 | } | ||
1109 | |||
1110 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | ||
1111 | spin_lock_irqsave(&xhci->lock, flags); | ||
1112 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | ||
1113 | /* | ||
1114 | * Can't change the ring dequeue pointer until it's transitioned to the | ||
1115 | * stopped state, which is only upon a successful reset endpoint | ||
1116 | * command. Better hope that last command worked! | ||
1117 | */ | ||
1118 | if (!ret) { | ||
1119 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | ||
1120 | /* We need to move the HW's dequeue pointer past this TD, | ||
1121 | * or it will attempt to resend it on the next doorbell ring. | ||
1122 | */ | ||
1123 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
1124 | ep_index, ep_ring->stopped_td, &deq_state); | ||
1125 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
1126 | xhci_queue_new_dequeue_state(xhci, ep_ring, | ||
1127 | udev->slot_id, | ||
1128 | ep_index, &deq_state); | ||
1129 | kfree(ep_ring->stopped_td); | ||
1130 | xhci_ring_cmd_db(xhci); | ||
1131 | } | ||
1132 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1133 | |||
1134 | if (ret) | ||
1135 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | ||
1027 | } | 1136 | } |
1028 | 1137 | ||
1029 | /* | 1138 | /* |
@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1120 | struct xhci_virt_device *virt_dev; | 1229 | struct xhci_virt_device *virt_dev; |
1121 | int ret = 0; | 1230 | int ret = 0; |
1122 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1231 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1123 | u32 temp; | 1232 | struct xhci_slot_ctx *slot_ctx; |
1233 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1234 | u64 temp_64; | ||
1124 | 1235 | ||
1125 | if (!udev->slot_id) { | 1236 | if (!udev->slot_id) { |
1126 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); | 1237 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); |
@@ -1133,10 +1244,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1133 | if (!udev->config) | 1244 | if (!udev->config) |
1134 | xhci_setup_addressable_virt_dev(xhci, udev); | 1245 | xhci_setup_addressable_virt_dev(xhci, udev); |
1135 | /* Otherwise, assume the core has the device configured how it wants */ | 1246 | /* Otherwise, assume the core has the device configured how it wants */ |
1247 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | ||
1248 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | ||
1136 | 1249 | ||
1137 | spin_lock_irqsave(&xhci->lock, flags); | 1250 | spin_lock_irqsave(&xhci->lock, flags); |
1138 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, | 1251 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
1139 | udev->slot_id); | 1252 | udev->slot_id); |
1140 | if (ret) { | 1253 | if (ret) { |
1141 | spin_unlock_irqrestore(&xhci->lock, flags); | 1254 | spin_unlock_irqrestore(&xhci->lock, flags); |
1142 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 1255 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1176 | default: | 1289 | default: |
1177 | xhci_err(xhci, "ERROR: unexpected command completion " | 1290 | xhci_err(xhci, "ERROR: unexpected command completion " |
1178 | "code 0x%x.\n", virt_dev->cmd_status); | 1291 | "code 0x%x.\n", virt_dev->cmd_status); |
1292 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | ||
1293 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); | ||
1179 | ret = -EINVAL; | 1294 | ret = -EINVAL; |
1180 | break; | 1295 | break; |
1181 | } | 1296 | } |
1182 | if (ret) { | 1297 | if (ret) { |
1183 | return ret; | 1298 | return ret; |
1184 | } | 1299 | } |
1185 | temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); | 1300 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
1186 | xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); | 1301 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); |
1187 | temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); | 1302 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", |
1188 | xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); | ||
1189 | xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n", | ||
1190 | udev->slot_id, | ||
1191 | &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], | ||
1192 | xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); | ||
1193 | xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n", | ||
1194 | udev->slot_id, | 1303 | udev->slot_id, |
1195 | &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], | 1304 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
1196 | xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); | 1305 | (unsigned long long) |
1306 | xhci->dcbaa->dev_context_ptrs[udev->slot_id]); | ||
1197 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", | 1307 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
1198 | (unsigned long long)virt_dev->out_ctx_dma); | 1308 | (unsigned long long)virt_dev->out_ctx->dma); |
1199 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 1309 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
1200 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); | 1310 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
1201 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | 1311 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
1202 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); | 1312 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
1203 | /* | 1313 | /* |
1204 | * USB core uses address 1 for the roothubs, so we add one to the | 1314 | * USB core uses address 1 for the roothubs, so we add one to the |
1205 | * address given back to us by the HC. | 1315 | * address given back to us by the HC. |
1206 | */ | 1316 | */ |
1207 | udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; | 1317 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
1318 | udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; | ||
1208 | /* Zero the input context control for later use */ | 1319 | /* Zero the input context control for later use */ |
1209 | virt_dev->in_ctx->add_flags = 0; | 1320 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1210 | virt_dev->in_ctx->drop_flags = 0; | 1321 | ctrl_ctx->add_flags = 0; |
1211 | /* Mirror flags in the output context for future ep enable/disable */ | 1322 | ctrl_ctx->drop_flags = 0; |
1212 | virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | ||
1213 | virt_dev->out_ctx->drop_flags = 0; | ||
1214 | 1323 | ||
1215 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); | 1324 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); |
1216 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ | 1325 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ |
@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void) | |||
1252 | /* xhci_device_control has eight fields, and also | 1361 | /* xhci_device_control has eight fields, and also |
1253 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | 1362 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
1254 | */ | 1363 | */ |
1255 | BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8); | ||
1256 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); | 1364 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
1257 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | 1365 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
1258 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | 1366 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c8a72de1c508..e6b9a1c6002d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
88 | return; | 88 | return; |
89 | prev->next = next; | 89 | prev->next = next; |
90 | if (link_trbs) { | 90 | if (link_trbs) { |
91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | 91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; |
92 | 92 | ||
93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; |
@@ -189,6 +189,63 @@ fail: | |||
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) | ||
193 | |||
194 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | ||
195 | int type, gfp_t flags) | ||
196 | { | ||
197 | struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); | ||
198 | if (!ctx) | ||
199 | return NULL; | ||
200 | |||
201 | BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); | ||
202 | ctx->type = type; | ||
203 | ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; | ||
204 | if (type == XHCI_CTX_TYPE_INPUT) | ||
205 | ctx->size += CTX_SIZE(xhci->hcc_params); | ||
206 | |||
207 | ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); | ||
208 | memset(ctx->bytes, 0, ctx->size); | ||
209 | return ctx; | ||
210 | } | ||
211 | |||
212 | void xhci_free_container_ctx(struct xhci_hcd *xhci, | ||
213 | struct xhci_container_ctx *ctx) | ||
214 | { | ||
215 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); | ||
216 | kfree(ctx); | ||
217 | } | ||
218 | |||
219 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, | ||
220 | struct xhci_container_ctx *ctx) | ||
221 | { | ||
222 | BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); | ||
223 | return (struct xhci_input_control_ctx *)ctx->bytes; | ||
224 | } | ||
225 | |||
226 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, | ||
227 | struct xhci_container_ctx *ctx) | ||
228 | { | ||
229 | if (ctx->type == XHCI_CTX_TYPE_DEVICE) | ||
230 | return (struct xhci_slot_ctx *)ctx->bytes; | ||
231 | |||
232 | return (struct xhci_slot_ctx *) | ||
233 | (ctx->bytes + CTX_SIZE(xhci->hcc_params)); | ||
234 | } | ||
235 | |||
236 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | ||
237 | struct xhci_container_ctx *ctx, | ||
238 | unsigned int ep_index) | ||
239 | { | ||
240 | /* increment ep index by offset of start of ep ctx array */ | ||
241 | ep_index++; | ||
242 | if (ctx->type == XHCI_CTX_TYPE_INPUT) | ||
243 | ep_index++; | ||
244 | |||
245 | return (struct xhci_ep_ctx *) | ||
246 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | ||
247 | } | ||
248 | |||
192 | /* All the xhci_tds in the ring's TD list should be freed at this point */ | 249 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
193 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | 250 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
194 | { | 251 | { |
@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
200 | return; | 257 | return; |
201 | 258 | ||
202 | dev = xhci->devs[slot_id]; | 259 | dev = xhci->devs[slot_id]; |
203 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; | 260 | xhci->dcbaa->dev_context_ptrs[slot_id] = 0; |
204 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
205 | if (!dev) | 261 | if (!dev) |
206 | return; | 262 | return; |
207 | 263 | ||
@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
210 | xhci_ring_free(xhci, dev->ep_rings[i]); | 266 | xhci_ring_free(xhci, dev->ep_rings[i]); |
211 | 267 | ||
212 | if (dev->in_ctx) | 268 | if (dev->in_ctx) |
213 | dma_pool_free(xhci->device_pool, | 269 | xhci_free_container_ctx(xhci, dev->in_ctx); |
214 | dev->in_ctx, dev->in_ctx_dma); | ||
215 | if (dev->out_ctx) | 270 | if (dev->out_ctx) |
216 | dma_pool_free(xhci->device_pool, | 271 | xhci_free_container_ctx(xhci, dev->out_ctx); |
217 | dev->out_ctx, dev->out_ctx_dma); | 272 | |
218 | kfree(xhci->devs[slot_id]); | 273 | kfree(xhci->devs[slot_id]); |
219 | xhci->devs[slot_id] = 0; | 274 | xhci->devs[slot_id] = 0; |
220 | } | 275 | } |
@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
222 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | 277 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, |
223 | struct usb_device *udev, gfp_t flags) | 278 | struct usb_device *udev, gfp_t flags) |
224 | { | 279 | { |
225 | dma_addr_t dma; | ||
226 | struct xhci_virt_device *dev; | 280 | struct xhci_virt_device *dev; |
227 | 281 | ||
228 | /* Slot ID 0 is reserved */ | 282 | /* Slot ID 0 is reserved */ |
@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
236 | return 0; | 290 | return 0; |
237 | dev = xhci->devs[slot_id]; | 291 | dev = xhci->devs[slot_id]; |
238 | 292 | ||
239 | /* Allocate the (output) device context that will be used in the HC */ | 293 | /* Allocate the (output) device context that will be used in the HC. */ |
240 | dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 294 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); |
241 | if (!dev->out_ctx) | 295 | if (!dev->out_ctx) |
242 | goto fail; | 296 | goto fail; |
243 | dev->out_ctx_dma = dma; | 297 | |
244 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, | 298 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, |
245 | (unsigned long long)dma); | 299 | (unsigned long long)dev->out_ctx->dma); |
246 | memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); | ||
247 | 300 | ||
248 | /* Allocate the (input) device context for address device command */ | 301 | /* Allocate the (input) device context for address device command */ |
249 | dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 302 | dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); |
250 | if (!dev->in_ctx) | 303 | if (!dev->in_ctx) |
251 | goto fail; | 304 | goto fail; |
252 | dev->in_ctx_dma = dma; | 305 | |
253 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 306 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
254 | (unsigned long long)dma); | 307 | (unsigned long long)dev->in_ctx->dma); |
255 | memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); | ||
256 | 308 | ||
257 | /* Allocate endpoint 0 ring */ | 309 | /* Allocate endpoint 0 ring */ |
258 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | 310 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); |
@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
261 | 313 | ||
262 | init_completion(&dev->cmd_completion); | 314 | init_completion(&dev->cmd_completion); |
263 | 315 | ||
264 | /* | 316 | /* Point to output device context in dcbaa. */ |
265 | * Point to output device context in dcbaa; skip the output control | 317 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; |
266 | * context, which is eight 32 bit fields (or 32 bytes long) | ||
267 | */ | ||
268 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = | ||
269 | (u32) dev->out_ctx_dma + (32); | ||
270 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 318 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
271 | slot_id, | 319 | slot_id, |
272 | &xhci->dcbaa->dev_context_ptrs[2*slot_id], | 320 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
273 | (unsigned long long)dev->out_ctx_dma); | 321 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); |
274 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
275 | 322 | ||
276 | return 1; | 323 | return 1; |
277 | fail: | 324 | fail: |
@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
285 | struct xhci_virt_device *dev; | 332 | struct xhci_virt_device *dev; |
286 | struct xhci_ep_ctx *ep0_ctx; | 333 | struct xhci_ep_ctx *ep0_ctx; |
287 | struct usb_device *top_dev; | 334 | struct usb_device *top_dev; |
335 | struct xhci_slot_ctx *slot_ctx; | ||
336 | struct xhci_input_control_ctx *ctrl_ctx; | ||
288 | 337 | ||
289 | dev = xhci->devs[udev->slot_id]; | 338 | dev = xhci->devs[udev->slot_id]; |
290 | /* Slot ID 0 is reserved */ | 339 | /* Slot ID 0 is reserved */ |
@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
293 | udev->slot_id); | 342 | udev->slot_id); |
294 | return -EINVAL; | 343 | return -EINVAL; |
295 | } | 344 | } |
296 | ep0_ctx = &dev->in_ctx->ep[0]; | 345 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
346 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | ||
347 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | ||
297 | 348 | ||
298 | /* 2) New slot context and endpoint 0 context are valid*/ | 349 | /* 2) New slot context and endpoint 0 context are valid*/ |
299 | dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | 350 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; |
300 | 351 | ||
301 | /* 3) Only the control endpoint is valid - one endpoint context */ | 352 | /* 3) Only the control endpoint is valid - one endpoint context */ |
302 | dev->in_ctx->slot.dev_info |= LAST_CTX(1); | 353 | slot_ctx->dev_info |= LAST_CTX(1); |
303 | 354 | ||
304 | switch (udev->speed) { | 355 | switch (udev->speed) { |
305 | case USB_SPEED_SUPER: | 356 | case USB_SPEED_SUPER: |
306 | dev->in_ctx->slot.dev_info |= (u32) udev->route; | 357 | slot_ctx->dev_info |= (u32) udev->route; |
307 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; | 358 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; |
308 | break; | 359 | break; |
309 | case USB_SPEED_HIGH: | 360 | case USB_SPEED_HIGH: |
310 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; | 361 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; |
311 | break; | 362 | break; |
312 | case USB_SPEED_FULL: | 363 | case USB_SPEED_FULL: |
313 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; | 364 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; |
314 | break; | 365 | break; |
315 | case USB_SPEED_LOW: | 366 | case USB_SPEED_LOW: |
316 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; | 367 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; |
317 | break; | 368 | break; |
318 | case USB_SPEED_VARIABLE: | 369 | case USB_SPEED_VARIABLE: |
319 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 370 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
327 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 378 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
328 | top_dev = top_dev->parent) | 379 | top_dev = top_dev->parent) |
329 | /* Found device below root hub */; | 380 | /* Found device below root hub */; |
330 | dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); | 381 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); |
331 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); | 382 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); |
332 | 383 | ||
333 | /* Is this a LS/FS device under a HS hub? */ | 384 | /* Is this a LS/FS device under a HS hub? */ |
@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
337 | */ | 388 | */ |
338 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && | 389 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && |
339 | udev->tt) { | 390 | udev->tt) { |
340 | dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; | 391 | slot_ctx->tt_info = udev->tt->hub->slot_id; |
341 | dev->in_ctx->slot.tt_info |= udev->ttport << 8; | 392 | slot_ctx->tt_info |= udev->ttport << 8; |
342 | } | 393 | } |
343 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); | 394 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
344 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | 395 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
360 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 411 | ep0_ctx->ep_info2 |= MAX_BURST(0); |
361 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 412 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
362 | 413 | ||
363 | ep0_ctx->deq[0] = | 414 | ep0_ctx->deq = |
364 | dev->ep_rings[0]->first_seg->dma; | 415 | dev->ep_rings[0]->first_seg->dma; |
365 | ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; | 416 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; |
366 | ep0_ctx->deq[1] = 0; | ||
367 | 417 | ||
368 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 418 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
369 | 419 | ||
@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
470 | unsigned int max_burst; | 520 | unsigned int max_burst; |
471 | 521 | ||
472 | ep_index = xhci_get_endpoint_index(&ep->desc); | 522 | ep_index = xhci_get_endpoint_index(&ep->desc); |
473 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 523 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
474 | 524 | ||
475 | /* Set up the endpoint ring */ | 525 | /* Set up the endpoint ring */ |
476 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); | 526 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); |
477 | if (!virt_dev->new_ep_rings[ep_index]) | 527 | if (!virt_dev->new_ep_rings[ep_index]) |
478 | return -ENOMEM; | 528 | return -ENOMEM; |
479 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 529 | ep_ring = virt_dev->new_ep_rings[ep_index]; |
480 | ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; | 530 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
481 | ep_ctx->deq[1] = 0; | ||
482 | 531 | ||
483 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 532 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
484 | 533 | ||
485 | /* FIXME dig Mult and streams info out of ep companion desc */ | 534 | /* FIXME dig Mult and streams info out of ep companion desc */ |
486 | 535 | ||
487 | /* Allow 3 retries for everything but isoc */ | 536 | /* Allow 3 retries for everything but isoc; |
537 | * error count = 0 means infinite retries. | ||
538 | */ | ||
488 | if (!usb_endpoint_xfer_isoc(&ep->desc)) | 539 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
489 | ep_ctx->ep_info2 = ERROR_COUNT(3); | 540 | ep_ctx->ep_info2 = ERROR_COUNT(3); |
490 | else | 541 | else |
491 | ep_ctx->ep_info2 = ERROR_COUNT(0); | 542 | ep_ctx->ep_info2 = ERROR_COUNT(1); |
492 | 543 | ||
493 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | 544 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); |
494 | 545 | ||
@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
498 | max_packet = ep->desc.wMaxPacketSize; | 549 | max_packet = ep->desc.wMaxPacketSize; |
499 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 550 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); |
500 | /* dig out max burst from ep companion desc */ | 551 | /* dig out max burst from ep companion desc */ |
501 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | 552 | if (!ep->ss_ep_comp) { |
553 | xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); | ||
554 | max_packet = 0; | ||
555 | } else { | ||
556 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | ||
557 | } | ||
502 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); | 558 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); |
503 | break; | 559 | break; |
504 | case USB_SPEED_HIGH: | 560 | case USB_SPEED_HIGH: |
@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, | |||
531 | struct xhci_ep_ctx *ep_ctx; | 587 | struct xhci_ep_ctx *ep_ctx; |
532 | 588 | ||
533 | ep_index = xhci_get_endpoint_index(&ep->desc); | 589 | ep_index = xhci_get_endpoint_index(&ep->desc); |
534 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 590 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
535 | 591 | ||
536 | ep_ctx->ep_info = 0; | 592 | ep_ctx->ep_info = 0; |
537 | ep_ctx->ep_info2 = 0; | 593 | ep_ctx->ep_info2 = 0; |
538 | ep_ctx->deq[0] = 0; | 594 | ep_ctx->deq = 0; |
539 | ep_ctx->deq[1] = 0; | ||
540 | ep_ctx->tx_info = 0; | 595 | ep_ctx->tx_info = 0; |
541 | /* Don't free the endpoint ring until the set interface or configuration | 596 | /* Don't free the endpoint ring until the set interface or configuration |
542 | * request succeeds. | 597 | * request succeeds. |
543 | */ | 598 | */ |
544 | } | 599 | } |
545 | 600 | ||
601 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ | ||
602 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | ||
603 | { | ||
604 | int i; | ||
605 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | ||
606 | int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
607 | |||
608 | xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); | ||
609 | |||
610 | if (!num_sp) | ||
611 | return 0; | ||
612 | |||
613 | xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); | ||
614 | if (!xhci->scratchpad) | ||
615 | goto fail_sp; | ||
616 | |||
617 | xhci->scratchpad->sp_array = | ||
618 | pci_alloc_consistent(to_pci_dev(dev), | ||
619 | num_sp * sizeof(u64), | ||
620 | &xhci->scratchpad->sp_dma); | ||
621 | if (!xhci->scratchpad->sp_array) | ||
622 | goto fail_sp2; | ||
623 | |||
624 | xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); | ||
625 | if (!xhci->scratchpad->sp_buffers) | ||
626 | goto fail_sp3; | ||
627 | |||
628 | xhci->scratchpad->sp_dma_buffers = | ||
629 | kzalloc(sizeof(dma_addr_t) * num_sp, flags); | ||
630 | |||
631 | if (!xhci->scratchpad->sp_dma_buffers) | ||
632 | goto fail_sp4; | ||
633 | |||
634 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | ||
635 | for (i = 0; i < num_sp; i++) { | ||
636 | dma_addr_t dma; | ||
637 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | ||
638 | xhci->page_size, &dma); | ||
639 | if (!buf) | ||
640 | goto fail_sp5; | ||
641 | |||
642 | xhci->scratchpad->sp_array[i] = dma; | ||
643 | xhci->scratchpad->sp_buffers[i] = buf; | ||
644 | xhci->scratchpad->sp_dma_buffers[i] = dma; | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | |||
649 | fail_sp5: | ||
650 | for (i = i - 1; i >= 0; i--) { | ||
651 | pci_free_consistent(to_pci_dev(dev), xhci->page_size, | ||
652 | xhci->scratchpad->sp_buffers[i], | ||
653 | xhci->scratchpad->sp_dma_buffers[i]); | ||
654 | } | ||
655 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
656 | |||
657 | fail_sp4: | ||
658 | kfree(xhci->scratchpad->sp_buffers); | ||
659 | |||
660 | fail_sp3: | ||
661 | pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), | ||
662 | xhci->scratchpad->sp_array, | ||
663 | xhci->scratchpad->sp_dma); | ||
664 | |||
665 | fail_sp2: | ||
666 | kfree(xhci->scratchpad); | ||
667 | xhci->scratchpad = NULL; | ||
668 | |||
669 | fail_sp: | ||
670 | return -ENOMEM; | ||
671 | } | ||
672 | |||
673 | static void scratchpad_free(struct xhci_hcd *xhci) | ||
674 | { | ||
675 | int num_sp; | ||
676 | int i; | ||
677 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
678 | |||
679 | if (!xhci->scratchpad) | ||
680 | return; | ||
681 | |||
682 | num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
683 | |||
684 | for (i = 0; i < num_sp; i++) { | ||
685 | pci_free_consistent(pdev, xhci->page_size, | ||
686 | xhci->scratchpad->sp_buffers[i], | ||
687 | xhci->scratchpad->sp_dma_buffers[i]); | ||
688 | } | ||
689 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
690 | kfree(xhci->scratchpad->sp_buffers); | ||
691 | pci_free_consistent(pdev, num_sp * sizeof(u64), | ||
692 | xhci->scratchpad->sp_array, | ||
693 | xhci->scratchpad->sp_dma); | ||
694 | kfree(xhci->scratchpad); | ||
695 | xhci->scratchpad = NULL; | ||
696 | } | ||
697 | |||
546 | void xhci_mem_cleanup(struct xhci_hcd *xhci) | 698 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
547 | { | 699 | { |
548 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 700 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
551 | 703 | ||
552 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 704 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
553 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | 705 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); |
554 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | 706 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); |
555 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | 707 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); |
556 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | ||
557 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | ||
558 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 708 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
559 | if (xhci->erst.entries) | 709 | if (xhci->erst.entries) |
560 | pci_free_consistent(pdev, size, | 710 | pci_free_consistent(pdev, size, |
@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
566 | xhci->event_ring = NULL; | 716 | xhci->event_ring = NULL; |
567 | xhci_dbg(xhci, "Freed event ring\n"); | 717 | xhci_dbg(xhci, "Freed event ring\n"); |
568 | 718 | ||
569 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | 719 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); |
570 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | ||
571 | if (xhci->cmd_ring) | 720 | if (xhci->cmd_ring) |
572 | xhci_ring_free(xhci, xhci->cmd_ring); | 721 | xhci_ring_free(xhci, xhci->cmd_ring); |
573 | xhci->cmd_ring = NULL; | 722 | xhci->cmd_ring = NULL; |
@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
586 | xhci->device_pool = NULL; | 735 | xhci->device_pool = NULL; |
587 | xhci_dbg(xhci, "Freed device context pool\n"); | 736 | xhci_dbg(xhci, "Freed device context pool\n"); |
588 | 737 | ||
589 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | 738 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
590 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
591 | if (xhci->dcbaa) | 739 | if (xhci->dcbaa) |
592 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | 740 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), |
593 | xhci->dcbaa, xhci->dcbaa->dma); | 741 | xhci->dcbaa, xhci->dcbaa->dma); |
@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
595 | 743 | ||
596 | xhci->page_size = 0; | 744 | xhci->page_size = 0; |
597 | xhci->page_shift = 0; | 745 | xhci->page_shift = 0; |
746 | scratchpad_free(xhci); | ||
598 | } | 747 | } |
599 | 748 | ||
600 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | 749 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
602 | dma_addr_t dma; | 751 | dma_addr_t dma; |
603 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | 752 | struct device *dev = xhci_to_hcd(xhci)->self.controller; |
604 | unsigned int val, val2; | 753 | unsigned int val, val2; |
754 | u64 val_64; | ||
605 | struct xhci_segment *seg; | 755 | struct xhci_segment *seg; |
606 | u32 page_size; | 756 | u32 page_size; |
607 | int i; | 757 | int i; |
@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
647 | xhci->dcbaa->dma = dma; | 797 | xhci->dcbaa->dma = dma; |
648 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", | 798 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", |
649 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | 799 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
650 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | 800 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
651 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
652 | 801 | ||
653 | /* | 802 | /* |
654 | * Initialize the ring segment pool. The ring must be a contiguous | 803 | * Initialize the ring segment pool. The ring must be a contiguous |
@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
658 | */ | 807 | */ |
659 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | 808 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, |
660 | SEGMENT_SIZE, 64, xhci->page_size); | 809 | SEGMENT_SIZE, 64, xhci->page_size); |
810 | |||
661 | /* See Table 46 and Note on Figure 55 */ | 811 | /* See Table 46 and Note on Figure 55 */ |
662 | /* FIXME support 64-byte contexts */ | ||
663 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, | 812 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, |
664 | sizeof(struct xhci_device_control), | 813 | 2112, 64, xhci->page_size); |
665 | 64, xhci->page_size); | ||
666 | if (!xhci->segment_pool || !xhci->device_pool) | 814 | if (!xhci->segment_pool || !xhci->device_pool) |
667 | goto fail; | 815 | goto fail; |
668 | 816 | ||
@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
675 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | 823 | (unsigned long long)xhci->cmd_ring->first_seg->dma); |
676 | 824 | ||
677 | /* Set the address in the Command Ring Control register */ | 825 | /* Set the address in the Command Ring Control register */ |
678 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 826 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
679 | val = (val & ~CMD_RING_ADDR_MASK) | | 827 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
680 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | 828 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | |
681 | xhci->cmd_ring->cycle_state; | 829 | xhci->cmd_ring->cycle_state; |
682 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | 830 | xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); |
683 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | 831 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
684 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | ||
685 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | ||
686 | xhci_dbg_cmd_ptrs(xhci); | 832 | xhci_dbg_cmd_ptrs(xhci); |
687 | 833 | ||
688 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | 834 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); |
@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
722 | /* set ring base address and size for each segment table entry */ | 868 | /* set ring base address and size for each segment table entry */ |
723 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | 869 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { |
724 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | 870 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; |
725 | entry->seg_addr[0] = seg->dma; | 871 | entry->seg_addr = seg->dma; |
726 | entry->seg_addr[1] = 0; | ||
727 | entry->seg_size = TRBS_PER_SEGMENT; | 872 | entry->seg_size = TRBS_PER_SEGMENT; |
728 | entry->rsvd = 0; | 873 | entry->rsvd = 0; |
729 | seg = seg->next; | 874 | seg = seg->next; |
@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
741 | /* set the segment table base address */ | 886 | /* set the segment table base address */ |
742 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", | 887 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", |
743 | (unsigned long long)xhci->erst.erst_dma_addr); | 888 | (unsigned long long)xhci->erst.erst_dma_addr); |
744 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | 889 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
745 | val &= ERST_PTR_MASK; | 890 | val_64 &= ERST_PTR_MASK; |
746 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | 891 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
747 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | 892 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
748 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | ||
749 | 893 | ||
750 | /* Set the event ring dequeue address */ | 894 | /* Set the event ring dequeue address */ |
751 | xhci_set_hc_event_deq(xhci); | 895 | xhci_set_hc_event_deq(xhci); |
@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
761 | for (i = 0; i < MAX_HC_SLOTS; ++i) | 905 | for (i = 0; i < MAX_HC_SLOTS; ++i) |
762 | xhci->devs[i] = 0; | 906 | xhci->devs[i] = 0; |
763 | 907 | ||
908 | if (scratchpad_alloc(xhci, flags)) | ||
909 | goto fail; | ||
910 | |||
764 | return 0; | 911 | return 0; |
912 | |||
765 | fail: | 913 | fail: |
766 | xhci_warn(xhci, "Couldn't initialize memory\n"); | 914 | xhci_warn(xhci, "Couldn't initialize memory\n"); |
767 | xhci_mem_cleanup(xhci); | 915 | xhci_mem_cleanup(xhci); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1462709e26c0..592fe7e623f7 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = { | |||
117 | .free_dev = xhci_free_dev, | 117 | .free_dev = xhci_free_dev, |
118 | .add_endpoint = xhci_add_endpoint, | 118 | .add_endpoint = xhci_add_endpoint, |
119 | .drop_endpoint = xhci_drop_endpoint, | 119 | .drop_endpoint = xhci_drop_endpoint, |
120 | .endpoint_reset = xhci_endpoint_reset, | ||
120 | .check_bandwidth = xhci_check_bandwidth, | 121 | .check_bandwidth = xhci_check_bandwidth, |
121 | .reset_bandwidth = xhci_reset_bandwidth, | 122 | .reset_bandwidth = xhci_reset_bandwidth, |
122 | .address_device = xhci_address_device, | 123 | .address_device = xhci_address_device, |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 02d81985c454..aa88a067148b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci, | |||
135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | 135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) |
136 | { | 136 | { |
137 | union xhci_trb *next = ++(ring->dequeue); | 137 | union xhci_trb *next = ++(ring->dequeue); |
138 | unsigned long long addr; | ||
138 | 139 | ||
139 | ring->deq_updates++; | 140 | ring->deq_updates++; |
140 | /* Update the dequeue pointer further if that was a link TRB or we're at | 141 | /* Update the dequeue pointer further if that was a link TRB or we're at |
@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
152 | ring->dequeue = ring->deq_seg->trbs; | 153 | ring->dequeue = ring->deq_seg->trbs; |
153 | next = ring->dequeue; | 154 | next = ring->dequeue; |
154 | } | 155 | } |
156 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); | ||
157 | if (ring == xhci->event_ring) | ||
158 | xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); | ||
159 | else if (ring == xhci->cmd_ring) | ||
160 | xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); | ||
161 | else | ||
162 | xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); | ||
155 | } | 163 | } |
156 | 164 | ||
157 | /* | 165 | /* |
@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
171 | { | 179 | { |
172 | u32 chain; | 180 | u32 chain; |
173 | union xhci_trb *next; | 181 | union xhci_trb *next; |
182 | unsigned long long addr; | ||
174 | 183 | ||
175 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | 184 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; |
176 | next = ++(ring->enqueue); | 185 | next = ++(ring->enqueue); |
@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
204 | ring->enqueue = ring->enq_seg->trbs; | 213 | ring->enqueue = ring->enq_seg->trbs; |
205 | next = ring->enqueue; | 214 | next = ring->enqueue; |
206 | } | 215 | } |
216 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); | ||
217 | if (ring == xhci->event_ring) | ||
218 | xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); | ||
219 | else if (ring == xhci->cmd_ring) | ||
220 | xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); | ||
221 | else | ||
222 | xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); | ||
207 | } | 223 | } |
208 | 224 | ||
209 | /* | 225 | /* |
@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
237 | 253 | ||
238 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | 254 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) |
239 | { | 255 | { |
240 | u32 temp; | 256 | u64 temp; |
241 | dma_addr_t deq; | 257 | dma_addr_t deq; |
242 | 258 | ||
243 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | 259 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
246 | xhci_warn(xhci, "WARN something wrong with SW event ring " | 262 | xhci_warn(xhci, "WARN something wrong with SW event ring " |
247 | "dequeue ptr.\n"); | 263 | "dequeue ptr.\n"); |
248 | /* Update HC event ring dequeue pointer */ | 264 | /* Update HC event ring dequeue pointer */ |
249 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 265 | temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
250 | temp &= ERST_PTR_MASK; | 266 | temp &= ERST_PTR_MASK; |
251 | if (!in_interrupt()) | 267 | /* Don't clear the EHB bit (which is RW1C) because |
252 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | 268 | * there might be more events to service. |
253 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | 269 | */ |
254 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | 270 | temp &= ~ERST_EHB; |
255 | &xhci->ir_set->erst_dequeue[0]); | 271 | xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n"); |
272 | xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, | ||
273 | &xhci->ir_set->erst_dequeue); | ||
256 | } | 274 | } |
257 | 275 | ||
258 | /* Ring the host controller doorbell after placing a command on the ring */ | 276 | /* Ring the host controller doorbell after placing a command on the ring */ |
@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
279 | /* Don't ring the doorbell for this endpoint if there are pending | 297 | /* Don't ring the doorbell for this endpoint if there are pending |
280 | * cancellations because the we don't want to interrupt processing. | 298 | * cancellations because the we don't want to interrupt processing. |
281 | */ | 299 | */ |
282 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { | 300 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) |
301 | && !(ep_ring->state & EP_HALTED)) { | ||
283 | field = xhci_readl(xhci, db_addr) & DB_MASK; | 302 | field = xhci_readl(xhci, db_addr) & DB_MASK; |
284 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | 303 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); |
285 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | 304 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this |
@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg( | |||
316 | return cur_seg; | 335 | return cur_seg; |
317 | } | 336 | } |
318 | 337 | ||
319 | struct dequeue_state { | ||
320 | struct xhci_segment *new_deq_seg; | ||
321 | union xhci_trb *new_deq_ptr; | ||
322 | int new_cycle_state; | ||
323 | }; | ||
324 | |||
325 | /* | 338 | /* |
326 | * Move the xHC's endpoint ring dequeue pointer past cur_td. | 339 | * Move the xHC's endpoint ring dequeue pointer past cur_td. |
327 | * Record the new state of the xHC's endpoint ring dequeue segment, | 340 | * Record the new state of the xHC's endpoint ring dequeue segment, |
@@ -336,24 +349,30 @@ struct dequeue_state { | |||
336 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit | 349 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit |
337 | * if we've moved it past a link TRB with the toggle cycle bit set. | 350 | * if we've moved it past a link TRB with the toggle cycle bit set. |
338 | */ | 351 | */ |
339 | static void find_new_dequeue_state(struct xhci_hcd *xhci, | 352 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
340 | unsigned int slot_id, unsigned int ep_index, | 353 | unsigned int slot_id, unsigned int ep_index, |
341 | struct xhci_td *cur_td, struct dequeue_state *state) | 354 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) |
342 | { | 355 | { |
343 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 356 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
344 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | 357 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; |
345 | struct xhci_generic_trb *trb; | 358 | struct xhci_generic_trb *trb; |
359 | struct xhci_ep_ctx *ep_ctx; | ||
360 | dma_addr_t addr; | ||
346 | 361 | ||
347 | state->new_cycle_state = 0; | 362 | state->new_cycle_state = 0; |
363 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | ||
348 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 364 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
349 | ep_ring->stopped_trb, | 365 | ep_ring->stopped_trb, |
350 | &state->new_cycle_state); | 366 | &state->new_cycle_state); |
351 | if (!state->new_deq_seg) | 367 | if (!state->new_deq_seg) |
352 | BUG(); | 368 | BUG(); |
353 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 369 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
354 | state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; | 370 | xhci_dbg(xhci, "Finding endpoint context\n"); |
371 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
372 | state->new_cycle_state = 0x1 & ep_ctx->deq; | ||
355 | 373 | ||
356 | state->new_deq_ptr = cur_td->last_trb; | 374 | state->new_deq_ptr = cur_td->last_trb; |
375 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); | ||
357 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 376 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
358 | state->new_deq_ptr, | 377 | state->new_deq_ptr, |
359 | &state->new_cycle_state); | 378 | &state->new_cycle_state); |
@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, | |||
367 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 386 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
368 | 387 | ||
369 | /* Don't update the ring cycle state for the producer (us). */ | 388 | /* Don't update the ring cycle state for the producer (us). */ |
389 | xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", | ||
390 | state->new_deq_seg); | ||
391 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); | ||
392 | xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", | ||
393 | (unsigned long long) addr); | ||
394 | xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); | ||
370 | ep_ring->dequeue = state->new_deq_ptr; | 395 | ep_ring->dequeue = state->new_deq_ptr; |
371 | ep_ring->deq_seg = state->new_deq_seg; | 396 | ep_ring->deq_seg = state->new_deq_seg; |
372 | } | 397 | } |
@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
416 | unsigned int ep_index, struct xhci_segment *deq_seg, | 441 | unsigned int ep_index, struct xhci_segment *deq_seg, |
417 | union xhci_trb *deq_ptr, u32 cycle_state); | 442 | union xhci_trb *deq_ptr, u32 cycle_state); |
418 | 443 | ||
444 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
445 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
446 | unsigned int ep_index, struct xhci_dequeue_state *deq_state) | ||
447 | { | ||
448 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | ||
449 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | ||
450 | deq_state->new_deq_seg, | ||
451 | (unsigned long long)deq_state->new_deq_seg->dma, | ||
452 | deq_state->new_deq_ptr, | ||
453 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), | ||
454 | deq_state->new_cycle_state); | ||
455 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
456 | deq_state->new_deq_seg, | ||
457 | deq_state->new_deq_ptr, | ||
458 | (u32) deq_state->new_cycle_state); | ||
459 | /* Stop the TD queueing code from ringing the doorbell until | ||
460 | * this command completes. The HC won't set the dequeue pointer | ||
461 | * if the ring is running, and ringing the doorbell starts the | ||
462 | * ring running. | ||
463 | */ | ||
464 | ep_ring->state |= SET_DEQ_PENDING; | ||
465 | xhci_ring_cmd_db(xhci); | ||
466 | } | ||
467 | |||
419 | /* | 468 | /* |
420 | * When we get a command completion for a Stop Endpoint Command, we need to | 469 | * When we get a command completion for a Stop Endpoint Command, we need to |
421 | * unlink any cancelled TDs from the ring. There are two ways to do that: | 470 | * unlink any cancelled TDs from the ring. There are two ways to do that: |
@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
436 | struct xhci_td *cur_td = 0; | 485 | struct xhci_td *cur_td = 0; |
437 | struct xhci_td *last_unlinked_td; | 486 | struct xhci_td *last_unlinked_td; |
438 | 487 | ||
439 | struct dequeue_state deq_state; | 488 | struct xhci_dequeue_state deq_state; |
440 | #ifdef CONFIG_USB_HCD_STAT | 489 | #ifdef CONFIG_USB_HCD_STAT |
441 | ktime_t stop_time = ktime_get(); | 490 | ktime_t stop_time = ktime_get(); |
442 | #endif | 491 | #endif |
@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
464 | * move the xHC endpoint ring dequeue pointer past this TD. | 513 | * move the xHC endpoint ring dequeue pointer past this TD. |
465 | */ | 514 | */ |
466 | if (cur_td == ep_ring->stopped_td) | 515 | if (cur_td == ep_ring->stopped_td) |
467 | find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 516 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, |
468 | &deq_state); | 517 | &deq_state); |
469 | else | 518 | else |
470 | td_to_noop(xhci, ep_ring, cur_td); | 519 | td_to_noop(xhci, ep_ring, cur_td); |
@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
480 | 529 | ||
481 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 530 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
482 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 531 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
483 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 532 | xhci_queue_new_dequeue_state(xhci, ep_ring, |
484 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 533 | slot_id, ep_index, &deq_state); |
485 | deq_state.new_deq_seg, | ||
486 | (unsigned long long)deq_state.new_deq_seg->dma, | ||
487 | deq_state.new_deq_ptr, | ||
488 | (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), | ||
489 | deq_state.new_cycle_state); | ||
490 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
491 | deq_state.new_deq_seg, | ||
492 | deq_state.new_deq_ptr, | ||
493 | (u32) deq_state.new_cycle_state); | ||
494 | /* Stop the TD queueing code from ringing the doorbell until | ||
495 | * this command completes. The HC won't set the dequeue pointer | ||
496 | * if the ring is running, and ringing the doorbell starts the | ||
497 | * ring running. | ||
498 | */ | ||
499 | ep_ring->state |= SET_DEQ_PENDING; | ||
500 | xhci_ring_cmd_db(xhci); | ||
501 | } else { | 534 | } else { |
502 | /* Otherwise just ring the doorbell to restart the ring */ | 535 | /* Otherwise just ring the doorbell to restart the ring */ |
503 | ring_ep_doorbell(xhci, slot_id, ep_index); | 536 | ring_ep_doorbell(xhci, slot_id, ep_index); |
@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
551 | unsigned int ep_index; | 584 | unsigned int ep_index; |
552 | struct xhci_ring *ep_ring; | 585 | struct xhci_ring *ep_ring; |
553 | struct xhci_virt_device *dev; | 586 | struct xhci_virt_device *dev; |
587 | struct xhci_ep_ctx *ep_ctx; | ||
588 | struct xhci_slot_ctx *slot_ctx; | ||
554 | 589 | ||
555 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 590 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
556 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 591 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
557 | dev = xhci->devs[slot_id]; | 592 | dev = xhci->devs[slot_id]; |
558 | ep_ring = dev->ep_rings[ep_index]; | 593 | ep_ring = dev->ep_rings[ep_index]; |
594 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
595 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | ||
559 | 596 | ||
560 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { | 597 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { |
561 | unsigned int ep_state; | 598 | unsigned int ep_state; |
@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
569 | case COMP_CTX_STATE: | 606 | case COMP_CTX_STATE: |
570 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " | 607 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " |
571 | "to incorrect slot or ep state.\n"); | 608 | "to incorrect slot or ep state.\n"); |
572 | ep_state = dev->out_ctx->ep[ep_index].ep_info; | 609 | ep_state = ep_ctx->ep_info; |
573 | ep_state &= EP_STATE_MASK; | 610 | ep_state &= EP_STATE_MASK; |
574 | slot_state = dev->out_ctx->slot.dev_state; | 611 | slot_state = slot_ctx->dev_state; |
575 | slot_state = GET_SLOT_STATE(slot_state); | 612 | slot_state = GET_SLOT_STATE(slot_state); |
576 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", | 613 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", |
577 | slot_state, ep_state); | 614 | slot_state, ep_state); |
@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
593 | * cancelling URBs, which might not be an error... | 630 | * cancelling URBs, which might not be an error... |
594 | */ | 631 | */ |
595 | } else { | 632 | } else { |
596 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " | 633 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", |
597 | "deq[1] = 0x%x.\n", | 634 | ep_ctx->deq); |
598 | dev->out_ctx->ep[ep_index].deq[0], | ||
599 | dev->out_ctx->ep[ep_index].deq[1]); | ||
600 | } | 635 | } |
601 | 636 | ||
602 | ep_ring->state &= ~SET_DEQ_PENDING; | 637 | ep_ring->state &= ~SET_DEQ_PENDING; |
603 | ring_ep_doorbell(xhci, slot_id, ep_index); | 638 | ring_ep_doorbell(xhci, slot_id, ep_index); |
604 | } | 639 | } |
605 | 640 | ||
641 | static void handle_reset_ep_completion(struct xhci_hcd *xhci, | ||
642 | struct xhci_event_cmd *event, | ||
643 | union xhci_trb *trb) | ||
644 | { | ||
645 | int slot_id; | ||
646 | unsigned int ep_index; | ||
647 | |||
648 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | ||
649 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | ||
650 | /* This command will only fail if the endpoint wasn't halted, | ||
651 | * but we don't care. | ||
652 | */ | ||
653 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", | ||
654 | (unsigned int) GET_COMP_CODE(event->status)); | ||
655 | |||
656 | /* Clear our internal halted state and restart the ring */ | ||
657 | xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; | ||
658 | ring_ep_doorbell(xhci, slot_id, ep_index); | ||
659 | } | ||
606 | 660 | ||
607 | static void handle_cmd_completion(struct xhci_hcd *xhci, | 661 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
608 | struct xhci_event_cmd *event) | 662 | struct xhci_event_cmd *event) |
@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
611 | u64 cmd_dma; | 665 | u64 cmd_dma; |
612 | dma_addr_t cmd_dequeue_dma; | 666 | dma_addr_t cmd_dequeue_dma; |
613 | 667 | ||
614 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; | 668 | cmd_dma = event->cmd_trb; |
615 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 669 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
616 | xhci->cmd_ring->dequeue); | 670 | xhci->cmd_ring->dequeue); |
617 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | 671 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ |
@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
653 | case TRB_TYPE(TRB_CMD_NOOP): | 707 | case TRB_TYPE(TRB_CMD_NOOP): |
654 | ++xhci->noops_handled; | 708 | ++xhci->noops_handled; |
655 | break; | 709 | break; |
710 | case TRB_TYPE(TRB_RESET_EP): | ||
711 | handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); | ||
712 | break; | ||
656 | default: | 713 | default: |
657 | /* Skip over unknown commands on the event ring */ | 714 | /* Skip over unknown commands on the event ring */ |
658 | xhci->error_bitmask |= 1 << 6; | 715 | xhci->error_bitmask |= 1 << 6; |
@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
756 | union xhci_trb *event_trb; | 813 | union xhci_trb *event_trb; |
757 | struct urb *urb = 0; | 814 | struct urb *urb = 0; |
758 | int status = -EINPROGRESS; | 815 | int status = -EINPROGRESS; |
816 | struct xhci_ep_ctx *ep_ctx; | ||
759 | 817 | ||
818 | xhci_dbg(xhci, "In %s\n", __func__); | ||
760 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | 819 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; |
761 | if (!xdev) { | 820 | if (!xdev) { |
762 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | 821 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); |
@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
765 | 824 | ||
766 | /* Endpoint ID is 1 based, our index is zero based */ | 825 | /* Endpoint ID is 1 based, our index is zero based */ |
767 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 826 | ep_index = TRB_TO_EP_ID(event->flags) - 1; |
827 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | ||
768 | ep_ring = xdev->ep_rings[ep_index]; | 828 | ep_ring = xdev->ep_rings[ep_index]; |
769 | if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 829 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
830 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | ||
770 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | 831 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); |
771 | return -ENODEV; | 832 | return -ENODEV; |
772 | } | 833 | } |
773 | 834 | ||
774 | event_dma = event->buffer[0]; | 835 | event_dma = event->buffer; |
775 | if (event->buffer[1] != 0) | ||
776 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | ||
777 | |||
778 | /* This TRB should be in the TD at the head of this ring's TD list */ | 836 | /* This TRB should be in the TD at the head of this ring's TD list */ |
837 | xhci_dbg(xhci, "%s - checking for list empty\n", __func__); | ||
779 | if (list_empty(&ep_ring->td_list)) { | 838 | if (list_empty(&ep_ring->td_list)) { |
780 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | 839 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", |
781 | TRB_TO_SLOT_ID(event->flags), ep_index); | 840 | TRB_TO_SLOT_ID(event->flags), ep_index); |
@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
785 | urb = NULL; | 844 | urb = NULL; |
786 | goto cleanup; | 845 | goto cleanup; |
787 | } | 846 | } |
847 | xhci_dbg(xhci, "%s - getting list entry\n", __func__); | ||
788 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | 848 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); |
789 | 849 | ||
790 | /* Is this a TRB in the currently executing TD? */ | 850 | /* Is this a TRB in the currently executing TD? */ |
851 | xhci_dbg(xhci, "%s - looking for TD\n", __func__); | ||
791 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | 852 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, |
792 | td->last_trb, event_dma); | 853 | td->last_trb, event_dma); |
854 | xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg); | ||
793 | if (!event_seg) { | 855 | if (!event_seg) { |
794 | /* HC is busted, give up! */ | 856 | /* HC is busted, give up! */ |
795 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); | 857 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); |
@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
798 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | 860 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; |
799 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 861 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
800 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | 862 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); |
801 | xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", | 863 | xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", |
802 | (unsigned int) event->buffer[0]); | 864 | lower_32_bits(event->buffer)); |
803 | xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", | 865 | xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", |
804 | (unsigned int) event->buffer[1]); | 866 | upper_32_bits(event->buffer)); |
805 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", | 867 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", |
806 | (unsigned int) event->transfer_len); | 868 | (unsigned int) event->transfer_len); |
807 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", | 869 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", |
@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
823 | break; | 885 | break; |
824 | case COMP_STALL: | 886 | case COMP_STALL: |
825 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | 887 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); |
888 | ep_ring->state |= EP_HALTED; | ||
826 | status = -EPIPE; | 889 | status = -EPIPE; |
827 | break; | 890 | break; |
828 | case COMP_TRB_ERR: | 891 | case COMP_TRB_ERR: |
@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
833 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); | 896 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); |
834 | status = -EPROTO; | 897 | status = -EPROTO; |
835 | break; | 898 | break; |
899 | case COMP_BABBLE: | ||
900 | xhci_warn(xhci, "WARN: babble error on endpoint\n"); | ||
901 | status = -EOVERFLOW; | ||
902 | break; | ||
836 | case COMP_DB_ERR: | 903 | case COMP_DB_ERR: |
837 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); | 904 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); |
838 | status = -ENOSR; | 905 | status = -ENOSR; |
@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
874 | if (event_trb != ep_ring->dequeue) { | 941 | if (event_trb != ep_ring->dequeue) { |
875 | /* The event was for the status stage */ | 942 | /* The event was for the status stage */ |
876 | if (event_trb == td->last_trb) { | 943 | if (event_trb == td->last_trb) { |
877 | td->urb->actual_length = | 944 | if (td->urb->actual_length != 0) { |
878 | td->urb->transfer_buffer_length; | 945 | /* Don't overwrite a previously set error code */ |
946 | if (status == -EINPROGRESS || status == 0) | ||
947 | /* Did we already see a short data stage? */ | ||
948 | status = -EREMOTEIO; | ||
949 | } else { | ||
950 | td->urb->actual_length = | ||
951 | td->urb->transfer_buffer_length; | ||
952 | } | ||
879 | } else { | 953 | } else { |
880 | /* Maybe the event was for the data stage? */ | 954 | /* Maybe the event was for the data stage? */ |
881 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) | 955 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { |
882 | /* We didn't stop on a link TRB in the middle */ | 956 | /* We didn't stop on a link TRB in the middle */ |
883 | td->urb->actual_length = | 957 | td->urb->actual_length = |
884 | td->urb->transfer_buffer_length - | 958 | td->urb->transfer_buffer_length - |
885 | TRB_LEN(event->transfer_len); | 959 | TRB_LEN(event->transfer_len); |
960 | xhci_dbg(xhci, "Waiting for status stage event\n"); | ||
961 | urb = NULL; | ||
962 | goto cleanup; | ||
963 | } | ||
886 | } | 964 | } |
887 | } | 965 | } |
888 | } else { | 966 | } else { |
@@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
929 | TRB_LEN(event->transfer_len)); | 1007 | TRB_LEN(event->transfer_len)); |
930 | td->urb->actual_length = 0; | 1008 | td->urb->actual_length = 0; |
931 | } | 1009 | } |
932 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 1010 | /* Don't overwrite a previously set error code */ |
933 | status = -EREMOTEIO; | 1011 | if (status == -EINPROGRESS) { |
934 | else | 1012 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
935 | status = 0; | 1013 | status = -EREMOTEIO; |
1014 | else | ||
1015 | status = 0; | ||
1016 | } | ||
936 | } else { | 1017 | } else { |
937 | td->urb->actual_length = td->urb->transfer_buffer_length; | 1018 | td->urb->actual_length = td->urb->transfer_buffer_length; |
938 | /* Ignore a short packet completion if the | 1019 | /* Ignore a short packet completion if the |
939 | * untransferred length was zero. | 1020 | * untransferred length was zero. |
940 | */ | 1021 | */ |
941 | status = 0; | 1022 | if (status == -EREMOTEIO) |
1023 | status = 0; | ||
942 | } | 1024 | } |
943 | } else { | 1025 | } else { |
944 | /* Slow path - walk the list, starting from the dequeue | 1026 | /* Slow path - walk the list, starting from the dequeue |
@@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
965 | TRB_LEN(event->transfer_len); | 1047 | TRB_LEN(event->transfer_len); |
966 | } | 1048 | } |
967 | } | 1049 | } |
968 | /* The Endpoint Stop Command completion will take care of | ||
969 | * any stopped TDs. A stopped TD may be restarted, so don't update the | ||
970 | * ring dequeue pointer or take this TD off any lists yet. | ||
971 | */ | ||
972 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || | 1050 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || |
973 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { | 1051 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { |
1052 | /* The Endpoint Stop Command completion will take care of any | ||
1053 | * stopped TDs. A stopped TD may be restarted, so don't update | ||
1054 | * the ring dequeue pointer or take this TD off any lists yet. | ||
1055 | */ | ||
974 | ep_ring->stopped_td = td; | 1056 | ep_ring->stopped_td = td; |
975 | ep_ring->stopped_trb = event_trb; | 1057 | ep_ring->stopped_trb = event_trb; |
976 | } else { | 1058 | } else { |
977 | /* Update ring dequeue pointer */ | 1059 | if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { |
978 | while (ep_ring->dequeue != td->last_trb) | 1060 | /* The transfer is completed from the driver's |
1061 | * perspective, but we need to issue a set dequeue | ||
1062 | * command for this stalled endpoint to move the dequeue | ||
1063 | * pointer past the TD. We can't do that here because | ||
1064 | * the halt condition must be cleared first. | ||
1065 | */ | ||
1066 | ep_ring->stopped_td = td; | ||
1067 | ep_ring->stopped_trb = event_trb; | ||
1068 | } else { | ||
1069 | /* Update ring dequeue pointer */ | ||
1070 | while (ep_ring->dequeue != td->last_trb) | ||
1071 | inc_deq(xhci, ep_ring, false); | ||
979 | inc_deq(xhci, ep_ring, false); | 1072 | inc_deq(xhci, ep_ring, false); |
980 | inc_deq(xhci, ep_ring, false); | 1073 | } |
981 | 1074 | ||
982 | /* Clean up the endpoint's TD list */ | 1075 | /* Clean up the endpoint's TD list */ |
983 | urb = td->urb; | 1076 | urb = td->urb; |
@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
987 | list_del(&td->cancelled_td_list); | 1080 | list_del(&td->cancelled_td_list); |
988 | ep_ring->cancels_pending--; | 1081 | ep_ring->cancels_pending--; |
989 | } | 1082 | } |
990 | kfree(td); | 1083 | /* Leave the TD around for the reset endpoint function to use */ |
1084 | if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { | ||
1085 | kfree(td); | ||
1086 | } | ||
991 | urb->hcpriv = NULL; | 1087 | urb->hcpriv = NULL; |
992 | } | 1088 | } |
993 | cleanup: | 1089 | cleanup: |
@@ -997,6 +1093,8 @@ cleanup: | |||
997 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ | 1093 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ |
998 | if (urb) { | 1094 | if (urb) { |
999 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | 1095 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); |
1096 | xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", | ||
1097 | urb, td->urb->actual_length, status); | ||
1000 | spin_unlock(&xhci->lock); | 1098 | spin_unlock(&xhci->lock); |
1001 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | 1099 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); |
1002 | spin_lock(&xhci->lock); | 1100 | spin_lock(&xhci->lock); |
@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1014 | int update_ptrs = 1; | 1112 | int update_ptrs = 1; |
1015 | int ret; | 1113 | int ret; |
1016 | 1114 | ||
1115 | xhci_dbg(xhci, "In %s\n", __func__); | ||
1017 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | 1116 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { |
1018 | xhci->error_bitmask |= 1 << 1; | 1117 | xhci->error_bitmask |= 1 << 1; |
1019 | return; | 1118 | return; |
@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1026 | xhci->error_bitmask |= 1 << 2; | 1125 | xhci->error_bitmask |= 1 << 2; |
1027 | return; | 1126 | return; |
1028 | } | 1127 | } |
1128 | xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); | ||
1029 | 1129 | ||
1030 | /* FIXME: Handle more event types. */ | 1130 | /* FIXME: Handle more event types. */ |
1031 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { | 1131 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { |
1032 | case TRB_TYPE(TRB_COMPLETION): | 1132 | case TRB_TYPE(TRB_COMPLETION): |
1133 | xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); | ||
1033 | handle_cmd_completion(xhci, &event->event_cmd); | 1134 | handle_cmd_completion(xhci, &event->event_cmd); |
1135 | xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); | ||
1034 | break; | 1136 | break; |
1035 | case TRB_TYPE(TRB_PORT_STATUS): | 1137 | case TRB_TYPE(TRB_PORT_STATUS): |
1138 | xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); | ||
1036 | handle_port_status(xhci, event); | 1139 | handle_port_status(xhci, event); |
1140 | xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); | ||
1037 | update_ptrs = 0; | 1141 | update_ptrs = 0; |
1038 | break; | 1142 | break; |
1039 | case TRB_TYPE(TRB_TRANSFER): | 1143 | case TRB_TYPE(TRB_TRANSFER): |
1144 | xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); | ||
1040 | ret = handle_tx_event(xhci, &event->trans_event); | 1145 | ret = handle_tx_event(xhci, &event->trans_event); |
1146 | xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); | ||
1041 | if (ret < 0) | 1147 | if (ret < 0) |
1042 | xhci->error_bitmask |= 1 << 9; | 1148 | xhci->error_bitmask |= 1 << 9; |
1043 | else | 1149 | else |
@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
1093 | */ | 1199 | */ |
1094 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | 1200 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); |
1095 | return -ENOENT; | 1201 | return -ENOENT; |
1096 | case EP_STATE_HALTED: | ||
1097 | case EP_STATE_ERROR: | 1202 | case EP_STATE_ERROR: |
1098 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | 1203 | xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); |
1099 | "to be cleared\n"); | ||
1100 | /* FIXME event handling code for error needs to clear it */ | 1204 | /* FIXME event handling code for error needs to clear it */ |
1101 | /* XXX not sure if this should be -ENOENT or not */ | 1205 | /* XXX not sure if this should be -ENOENT or not */ |
1102 | return -EINVAL; | 1206 | return -EINVAL; |
1207 | case EP_STATE_HALTED: | ||
1208 | xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); | ||
1103 | case EP_STATE_STOPPED: | 1209 | case EP_STATE_STOPPED: |
1104 | case EP_STATE_RUNNING: | 1210 | case EP_STATE_RUNNING: |
1105 | break; | 1211 | break; |
@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1128 | gfp_t mem_flags) | 1234 | gfp_t mem_flags) |
1129 | { | 1235 | { |
1130 | int ret; | 1236 | int ret; |
1131 | 1237 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | |
1132 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | 1238 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], |
1133 | xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, | 1239 | ep_ctx->ep_info & EP_STATE_MASK, |
1134 | num_trbs, mem_flags); | 1240 | num_trbs, mem_flags); |
1135 | if (ret) | 1241 | if (ret) |
1136 | return ret; | 1242 | return ret; |
@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1285 | /* Queue the first TRB, even if it's zero-length */ | 1391 | /* Queue the first TRB, even if it's zero-length */ |
1286 | do { | 1392 | do { |
1287 | u32 field = 0; | 1393 | u32 field = 0; |
1394 | u32 length_field = 0; | ||
1288 | 1395 | ||
1289 | /* Don't change the cycle bit of the first TRB until later */ | 1396 | /* Don't change the cycle bit of the first TRB until later */ |
1290 | if (first_trb) | 1397 | if (first_trb) |
@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1314 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 1421 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
1315 | (unsigned int) addr + trb_buff_len); | 1422 | (unsigned int) addr + trb_buff_len); |
1316 | } | 1423 | } |
1424 | length_field = TRB_LEN(trb_buff_len) | | ||
1425 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
1426 | TRB_INTR_TARGET(0); | ||
1317 | queue_trb(xhci, ep_ring, false, | 1427 | queue_trb(xhci, ep_ring, false, |
1318 | (u32) addr, | 1428 | lower_32_bits(addr), |
1319 | (u32) ((u64) addr >> 32), | 1429 | upper_32_bits(addr), |
1320 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1430 | length_field, |
1321 | /* We always want to know if the TRB was short, | 1431 | /* We always want to know if the TRB was short, |
1322 | * or we won't get an event when it completes. | 1432 | * or we won't get an event when it completes. |
1323 | * (Unless we use event data TRBs, which are a | 1433 | * (Unless we use event data TRBs, which are a |
@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1365 | struct xhci_generic_trb *start_trb; | 1475 | struct xhci_generic_trb *start_trb; |
1366 | bool first_trb; | 1476 | bool first_trb; |
1367 | int start_cycle; | 1477 | int start_cycle; |
1368 | u32 field; | 1478 | u32 field, length_field; |
1369 | 1479 | ||
1370 | int running_total, trb_buff_len, ret; | 1480 | int running_total, trb_buff_len, ret; |
1371 | u64 addr; | 1481 | u64 addr; |
@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1443 | td->last_trb = ep_ring->enqueue; | 1553 | td->last_trb = ep_ring->enqueue; |
1444 | field |= TRB_IOC; | 1554 | field |= TRB_IOC; |
1445 | } | 1555 | } |
1556 | length_field = TRB_LEN(trb_buff_len) | | ||
1557 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
1558 | TRB_INTR_TARGET(0); | ||
1446 | queue_trb(xhci, ep_ring, false, | 1559 | queue_trb(xhci, ep_ring, false, |
1447 | (u32) addr, | 1560 | lower_32_bits(addr), |
1448 | (u32) ((u64) addr >> 32), | 1561 | upper_32_bits(addr), |
1449 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1562 | length_field, |
1450 | /* We always want to know if the TRB was short, | 1563 | /* We always want to know if the TRB was short, |
1451 | * or we won't get an event when it completes. | 1564 | * or we won't get an event when it completes. |
1452 | * (Unless we use event data TRBs, which are a | 1565 | * (Unless we use event data TRBs, which are a |
@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1478 | struct usb_ctrlrequest *setup; | 1591 | struct usb_ctrlrequest *setup; |
1479 | struct xhci_generic_trb *start_trb; | 1592 | struct xhci_generic_trb *start_trb; |
1480 | int start_cycle; | 1593 | int start_cycle; |
1481 | u32 field; | 1594 | u32 field, length_field; |
1482 | struct xhci_td *td; | 1595 | struct xhci_td *td; |
1483 | 1596 | ||
1484 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1597 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; |
@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1528 | 1641 | ||
1529 | /* If there's data, queue data TRBs */ | 1642 | /* If there's data, queue data TRBs */ |
1530 | field = 0; | 1643 | field = 0; |
1644 | length_field = TRB_LEN(urb->transfer_buffer_length) | | ||
1645 | TD_REMAINDER(urb->transfer_buffer_length) | | ||
1646 | TRB_INTR_TARGET(0); | ||
1531 | if (urb->transfer_buffer_length > 0) { | 1647 | if (urb->transfer_buffer_length > 0) { |
1532 | if (setup->bRequestType & USB_DIR_IN) | 1648 | if (setup->bRequestType & USB_DIR_IN) |
1533 | field |= TRB_DIR_IN; | 1649 | field |= TRB_DIR_IN; |
1534 | queue_trb(xhci, ep_ring, false, | 1650 | queue_trb(xhci, ep_ring, false, |
1535 | lower_32_bits(urb->transfer_dma), | 1651 | lower_32_bits(urb->transfer_dma), |
1536 | upper_32_bits(urb->transfer_dma), | 1652 | upper_32_bits(urb->transfer_dma), |
1537 | TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), | 1653 | length_field, |
1538 | /* Event on short tx */ | 1654 | /* Event on short tx */ |
1539 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | 1655 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); |
1540 | } | 1656 | } |
@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | |||
1603 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1719 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1604 | u32 slot_id) | 1720 | u32 slot_id) |
1605 | { | 1721 | { |
1606 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1722 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1723 | upper_32_bits(in_ctx_ptr), 0, | ||
1607 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | 1724 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); |
1608 | } | 1725 | } |
1609 | 1726 | ||
@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
1611 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1728 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1612 | u32 slot_id) | 1729 | u32 slot_id) |
1613 | { | 1730 | { |
1614 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1731 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1732 | upper_32_bits(in_ctx_ptr), 0, | ||
1615 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | 1733 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); |
1616 | } | 1734 | } |
1617 | 1735 | ||
@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
1639 | u32 type = TRB_TYPE(TRB_SET_DEQ); | 1757 | u32 type = TRB_TYPE(TRB_SET_DEQ); |
1640 | 1758 | ||
1641 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); | 1759 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); |
1642 | if (addr == 0) | 1760 | if (addr == 0) { |
1643 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | 1761 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
1644 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", | 1762 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", |
1645 | deq_seg, deq_ptr); | 1763 | deq_seg, deq_ptr); |
1646 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, | 1764 | return 0; |
1765 | } | ||
1766 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, | ||
1767 | upper_32_bits(addr), 0, | ||
1647 | trb_slot_id | trb_ep_index | type); | 1768 | trb_slot_id | trb_ep_index | type); |
1648 | } | 1769 | } |
1770 | |||
1771 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | ||
1772 | unsigned int ep_index) | ||
1773 | { | ||
1774 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | ||
1775 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | ||
1776 | u32 type = TRB_TYPE(TRB_RESET_EP); | ||
1777 | |||
1778 | return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); | ||
1779 | } | ||
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8936eeb5588b..d31d32206ba3 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/usb.h> | 26 | #include <linux/usb.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/kernel.h> | ||
28 | 29 | ||
29 | #include "../core/hcd.h" | 30 | #include "../core/hcd.h" |
30 | /* Code sharing between pci-quirks and xhci hcd */ | 31 | /* Code sharing between pci-quirks and xhci hcd */ |
@@ -42,14 +43,6 @@ | |||
42 | * xHCI register interface. | 43 | * xHCI register interface. |
43 | * This corresponds to the eXtensible Host Controller Interface (xHCI) | 44 | * This corresponds to the eXtensible Host Controller Interface (xHCI) |
44 | * Revision 0.95 specification | 45 | * Revision 0.95 specification |
45 | * | ||
46 | * Registers should always be accessed with double word or quad word accesses. | ||
47 | * | ||
48 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
49 | * with 64-bit address pointers should be written to with dword accesses by | ||
50 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
51 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
52 | * the high dword, and write order is irrelevant. | ||
53 | */ | 46 | */ |
54 | 47 | ||
55 | /** | 48 | /** |
@@ -96,6 +89,7 @@ struct xhci_cap_regs { | |||
96 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) | 89 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) |
97 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ | 90 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ |
98 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ | 91 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ |
92 | #define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) | ||
99 | 93 | ||
100 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ | 94 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ |
101 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ | 95 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ |
@@ -166,10 +160,10 @@ struct xhci_op_regs { | |||
166 | u32 reserved1; | 160 | u32 reserved1; |
167 | u32 reserved2; | 161 | u32 reserved2; |
168 | u32 dev_notification; | 162 | u32 dev_notification; |
169 | u32 cmd_ring[2]; | 163 | u64 cmd_ring; |
170 | /* rsvd: offset 0x20-2F */ | 164 | /* rsvd: offset 0x20-2F */ |
171 | u32 reserved3[4]; | 165 | u32 reserved3[4]; |
172 | u32 dcbaa_ptr[2]; | 166 | u64 dcbaa_ptr; |
173 | u32 config_reg; | 167 | u32 config_reg; |
174 | /* rsvd: offset 0x3C-3FF */ | 168 | /* rsvd: offset 0x3C-3FF */ |
175 | u32 reserved4[241]; | 169 | u32 reserved4[241]; |
@@ -254,7 +248,7 @@ struct xhci_op_regs { | |||
254 | #define CMD_RING_RUNNING (1 << 3) | 248 | #define CMD_RING_RUNNING (1 << 3) |
255 | /* bits 4:5 reserved and should be preserved */ | 249 | /* bits 4:5 reserved and should be preserved */ |
256 | /* Command Ring pointer - bit mask for the lower 32 bits. */ | 250 | /* Command Ring pointer - bit mask for the lower 32 bits. */ |
257 | #define CMD_RING_ADDR_MASK (0xffffffc0) | 251 | #define CMD_RING_RSVD_BITS (0x3f) |
258 | 252 | ||
259 | /* CONFIG - Configure Register - config_reg bitmasks */ | 253 | /* CONFIG - Configure Register - config_reg bitmasks */ |
260 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ | 254 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ |
@@ -382,8 +376,8 @@ struct xhci_intr_reg { | |||
382 | u32 irq_control; | 376 | u32 irq_control; |
383 | u32 erst_size; | 377 | u32 erst_size; |
384 | u32 rsvd; | 378 | u32 rsvd; |
385 | u32 erst_base[2]; | 379 | u64 erst_base; |
386 | u32 erst_dequeue[2]; | 380 | u64 erst_dequeue; |
387 | }; | 381 | }; |
388 | 382 | ||
389 | /* irq_pending bitmasks */ | 383 | /* irq_pending bitmasks */ |
@@ -453,6 +447,27 @@ struct xhci_doorbell_array { | |||
453 | 447 | ||
454 | 448 | ||
455 | /** | 449 | /** |
450 | * struct xhci_container_ctx | ||
451 | * @type: Type of context. Used to calculated offsets to contained contexts. | ||
452 | * @size: Size of the context data | ||
453 | * @bytes: The raw context data given to HW | ||
454 | * @dma: dma address of the bytes | ||
455 | * | ||
456 | * Represents either a Device or Input context. Holds a pointer to the raw | ||
457 | * memory used for the context (bytes) and dma address of it (dma). | ||
458 | */ | ||
459 | struct xhci_container_ctx { | ||
460 | unsigned type; | ||
461 | #define XHCI_CTX_TYPE_DEVICE 0x1 | ||
462 | #define XHCI_CTX_TYPE_INPUT 0x2 | ||
463 | |||
464 | int size; | ||
465 | |||
466 | u8 *bytes; | ||
467 | dma_addr_t dma; | ||
468 | }; | ||
469 | |||
470 | /** | ||
456 | * struct xhci_slot_ctx | 471 | * struct xhci_slot_ctx |
457 | * @dev_info: Route string, device speed, hub info, and last valid endpoint | 472 | * @dev_info: Route string, device speed, hub info, and last valid endpoint |
458 | * @dev_info2: Max exit latency for device number, root hub port number | 473 | * @dev_info2: Max exit latency for device number, root hub port number |
@@ -538,7 +553,7 @@ struct xhci_slot_ctx { | |||
538 | struct xhci_ep_ctx { | 553 | struct xhci_ep_ctx { |
539 | u32 ep_info; | 554 | u32 ep_info; |
540 | u32 ep_info2; | 555 | u32 ep_info2; |
541 | u32 deq[2]; | 556 | u64 deq; |
542 | u32 tx_info; | 557 | u32 tx_info; |
543 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 558 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
544 | u32 reserved[3]; | 559 | u32 reserved[3]; |
@@ -589,18 +604,16 @@ struct xhci_ep_ctx { | |||
589 | 604 | ||
590 | 605 | ||
591 | /** | 606 | /** |
592 | * struct xhci_device_control | 607 | * struct xhci_input_control_context |
593 | * Input/Output context; see section 6.2.5. | 608 | * Input control context; see section 6.2.5. |
594 | * | 609 | * |
595 | * @drop_context: set the bit of the endpoint context you want to disable | 610 | * @drop_context: set the bit of the endpoint context you want to disable |
596 | * @add_context: set the bit of the endpoint context you want to enable | 611 | * @add_context: set the bit of the endpoint context you want to enable |
597 | */ | 612 | */ |
598 | struct xhci_device_control { | 613 | struct xhci_input_control_ctx { |
599 | u32 drop_flags; | 614 | u32 drop_flags; |
600 | u32 add_flags; | 615 | u32 add_flags; |
601 | u32 rsvd[6]; | 616 | u32 rsvd2[6]; |
602 | struct xhci_slot_ctx slot; | ||
603 | struct xhci_ep_ctx ep[31]; | ||
604 | }; | 617 | }; |
605 | 618 | ||
606 | /* drop context bitmasks */ | 619 | /* drop context bitmasks */ |
@@ -608,7 +621,6 @@ struct xhci_device_control { | |||
608 | /* add context bitmasks */ | 621 | /* add context bitmasks */ |
609 | #define ADD_EP(x) (0x1 << x) | 622 | #define ADD_EP(x) (0x1 << x) |
610 | 623 | ||
611 | |||
612 | struct xhci_virt_device { | 624 | struct xhci_virt_device { |
613 | /* | 625 | /* |
614 | * Commands to the hardware are passed an "input context" that | 626 | * Commands to the hardware are passed an "input context" that |
@@ -618,11 +630,10 @@ struct xhci_virt_device { | |||
618 | * track of input and output contexts separately because | 630 | * track of input and output contexts separately because |
619 | * these commands might fail and we don't trust the hardware. | 631 | * these commands might fail and we don't trust the hardware. |
620 | */ | 632 | */ |
621 | struct xhci_device_control *out_ctx; | 633 | struct xhci_container_ctx *out_ctx; |
622 | dma_addr_t out_ctx_dma; | ||
623 | /* Used for addressing devices and configuration changes */ | 634 | /* Used for addressing devices and configuration changes */ |
624 | struct xhci_device_control *in_ctx; | 635 | struct xhci_container_ctx *in_ctx; |
625 | dma_addr_t in_ctx_dma; | 636 | |
626 | /* FIXME when stream support is added */ | 637 | /* FIXME when stream support is added */ |
627 | struct xhci_ring *ep_rings[31]; | 638 | struct xhci_ring *ep_rings[31]; |
628 | /* Temporary storage in case the configure endpoint command fails and we | 639 | /* Temporary storage in case the configure endpoint command fails and we |
@@ -641,7 +652,7 @@ struct xhci_virt_device { | |||
641 | */ | 652 | */ |
642 | struct xhci_device_context_array { | 653 | struct xhci_device_context_array { |
643 | /* 64-bit device addresses; we only write 32-bit addresses */ | 654 | /* 64-bit device addresses; we only write 32-bit addresses */ |
644 | u32 dev_context_ptrs[2*MAX_HC_SLOTS]; | 655 | u64 dev_context_ptrs[MAX_HC_SLOTS]; |
645 | /* private xHCD pointers */ | 656 | /* private xHCD pointers */ |
646 | dma_addr_t dma; | 657 | dma_addr_t dma; |
647 | }; | 658 | }; |
@@ -654,7 +665,7 @@ struct xhci_device_context_array { | |||
654 | 665 | ||
655 | struct xhci_stream_ctx { | 666 | struct xhci_stream_ctx { |
656 | /* 64-bit stream ring address, cycle state, and stream type */ | 667 | /* 64-bit stream ring address, cycle state, and stream type */ |
657 | u32 stream_ring[2]; | 668 | u64 stream_ring; |
658 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 669 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
659 | u32 reserved[2]; | 670 | u32 reserved[2]; |
660 | }; | 671 | }; |
@@ -662,7 +673,7 @@ struct xhci_stream_ctx { | |||
662 | 673 | ||
663 | struct xhci_transfer_event { | 674 | struct xhci_transfer_event { |
664 | /* 64-bit buffer address, or immediate data */ | 675 | /* 64-bit buffer address, or immediate data */ |
665 | u32 buffer[2]; | 676 | u64 buffer; |
666 | u32 transfer_len; | 677 | u32 transfer_len; |
667 | /* This field is interpreted differently based on the type of TRB */ | 678 | /* This field is interpreted differently based on the type of TRB */ |
668 | u32 flags; | 679 | u32 flags; |
@@ -744,7 +755,7 @@ struct xhci_transfer_event { | |||
744 | 755 | ||
745 | struct xhci_link_trb { | 756 | struct xhci_link_trb { |
746 | /* 64-bit segment pointer*/ | 757 | /* 64-bit segment pointer*/ |
747 | u32 segment_ptr[2]; | 758 | u64 segment_ptr; |
748 | u32 intr_target; | 759 | u32 intr_target; |
749 | u32 control; | 760 | u32 control; |
750 | }; | 761 | }; |
@@ -755,7 +766,7 @@ struct xhci_link_trb { | |||
755 | /* Command completion event TRB */ | 766 | /* Command completion event TRB */ |
756 | struct xhci_event_cmd { | 767 | struct xhci_event_cmd { |
757 | /* Pointer to command TRB, or the value passed by the event data trb */ | 768 | /* Pointer to command TRB, or the value passed by the event data trb */ |
758 | u32 cmd_trb[2]; | 769 | u64 cmd_trb; |
759 | u32 status; | 770 | u32 status; |
760 | u32 flags; | 771 | u32 flags; |
761 | }; | 772 | }; |
@@ -848,8 +859,8 @@ union xhci_trb { | |||
848 | #define TRB_CONFIG_EP 12 | 859 | #define TRB_CONFIG_EP 12 |
849 | /* Evaluate Context Command */ | 860 | /* Evaluate Context Command */ |
850 | #define TRB_EVAL_CONTEXT 13 | 861 | #define TRB_EVAL_CONTEXT 13 |
851 | /* Reset Transfer Ring Command */ | 862 | /* Reset Endpoint Command */ |
852 | #define TRB_RESET_RING 14 | 863 | #define TRB_RESET_EP 14 |
853 | /* Stop Transfer Ring Command */ | 864 | /* Stop Transfer Ring Command */ |
854 | #define TRB_STOP_RING 15 | 865 | #define TRB_STOP_RING 15 |
855 | /* Set Transfer Ring Dequeue Pointer Command */ | 866 | /* Set Transfer Ring Dequeue Pointer Command */ |
@@ -929,6 +940,7 @@ struct xhci_ring { | |||
929 | unsigned int cancels_pending; | 940 | unsigned int cancels_pending; |
930 | unsigned int state; | 941 | unsigned int state; |
931 | #define SET_DEQ_PENDING (1 << 0) | 942 | #define SET_DEQ_PENDING (1 << 0) |
943 | #define EP_HALTED (1 << 1) | ||
932 | /* The TRB that was last reported in a stopped endpoint ring */ | 944 | /* The TRB that was last reported in a stopped endpoint ring */ |
933 | union xhci_trb *stopped_trb; | 945 | union xhci_trb *stopped_trb; |
934 | struct xhci_td *stopped_td; | 946 | struct xhci_td *stopped_td; |
@@ -940,9 +952,15 @@ struct xhci_ring { | |||
940 | u32 cycle_state; | 952 | u32 cycle_state; |
941 | }; | 953 | }; |
942 | 954 | ||
955 | struct xhci_dequeue_state { | ||
956 | struct xhci_segment *new_deq_seg; | ||
957 | union xhci_trb *new_deq_ptr; | ||
958 | int new_cycle_state; | ||
959 | }; | ||
960 | |||
943 | struct xhci_erst_entry { | 961 | struct xhci_erst_entry { |
944 | /* 64-bit event ring segment address */ | 962 | /* 64-bit event ring segment address */ |
945 | u32 seg_addr[2]; | 963 | u64 seg_addr; |
946 | u32 seg_size; | 964 | u32 seg_size; |
947 | /* Set to zero */ | 965 | /* Set to zero */ |
948 | u32 rsvd; | 966 | u32 rsvd; |
@@ -957,6 +975,13 @@ struct xhci_erst { | |||
957 | unsigned int erst_size; | 975 | unsigned int erst_size; |
958 | }; | 976 | }; |
959 | 977 | ||
978 | struct xhci_scratchpad { | ||
979 | u64 *sp_array; | ||
980 | dma_addr_t sp_dma; | ||
981 | void **sp_buffers; | ||
982 | dma_addr_t *sp_dma_buffers; | ||
983 | }; | ||
984 | |||
960 | /* | 985 | /* |
961 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: | 986 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: |
962 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, | 987 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, |
@@ -1011,6 +1036,9 @@ struct xhci_hcd { | |||
1011 | struct xhci_ring *cmd_ring; | 1036 | struct xhci_ring *cmd_ring; |
1012 | struct xhci_ring *event_ring; | 1037 | struct xhci_ring *event_ring; |
1013 | struct xhci_erst erst; | 1038 | struct xhci_erst erst; |
1039 | /* Scratchpad */ | ||
1040 | struct xhci_scratchpad *scratchpad; | ||
1041 | |||
1014 | /* slot enabling and address device helpers */ | 1042 | /* slot enabling and address device helpers */ |
1015 | struct completion addr_dev; | 1043 | struct completion addr_dev; |
1016 | int slot_id; | 1044 | int slot_id; |
@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, | |||
1071 | static inline void xhci_writel(struct xhci_hcd *xhci, | 1099 | static inline void xhci_writel(struct xhci_hcd *xhci, |
1072 | const unsigned int val, __u32 __iomem *regs) | 1100 | const unsigned int val, __u32 __iomem *regs) |
1073 | { | 1101 | { |
1074 | if (!in_interrupt()) | 1102 | xhci_dbg(xhci, |
1075 | xhci_dbg(xhci, | 1103 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", |
1076 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", | 1104 | regs, val); |
1077 | regs, val); | ||
1078 | writel(val, regs); | 1105 | writel(val, regs); |
1079 | } | 1106 | } |
1080 | 1107 | ||
1108 | /* | ||
1109 | * Registers should always be accessed with double word or quad word accesses. | ||
1110 | * | ||
1111 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
1112 | * with 64-bit address pointers should be written to with dword accesses by | ||
1113 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
1114 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
1115 | * the high dword, and write order is irrelevant. | ||
1116 | */ | ||
1117 | static inline u64 xhci_read_64(const struct xhci_hcd *xhci, | ||
1118 | __u64 __iomem *regs) | ||
1119 | { | ||
1120 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1121 | u64 val_lo = readl(ptr); | ||
1122 | u64 val_hi = readl(ptr + 1); | ||
1123 | return val_lo + (val_hi << 32); | ||
1124 | } | ||
1125 | static inline void xhci_write_64(struct xhci_hcd *xhci, | ||
1126 | const u64 val, __u64 __iomem *regs) | ||
1127 | { | ||
1128 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1129 | u32 val_lo = lower_32_bits(val); | ||
1130 | u32 val_hi = upper_32_bits(val); | ||
1131 | |||
1132 | xhci_dbg(xhci, | ||
1133 | "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n", | ||
1134 | regs, (long unsigned int) val); | ||
1135 | writel(val_lo, ptr); | ||
1136 | writel(val_hi, ptr + 1); | ||
1137 | } | ||
1138 | |||
1081 | /* xHCI debugging */ | 1139 | /* xHCI debugging */ |
1082 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); | 1140 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); |
1083 | void xhci_print_registers(struct xhci_hcd *xhci); | 1141 | void xhci_print_registers(struct xhci_hcd *xhci); |
@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); | |||
1090 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); | 1148 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); |
1091 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); | 1149 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); |
1092 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); | 1150 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); |
1093 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); | 1151 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); |
1094 | 1152 | ||
1095 | /* xHCI memory managment */ | 1153 | /* xHCI memory managment */ |
1096 | void xhci_mem_cleanup(struct xhci_hcd *xhci); | 1154 | void xhci_mem_cleanup(struct xhci_hcd *xhci); |
@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); | |||
1128 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); | 1186 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); |
1129 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); | 1187 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); |
1130 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); | 1188 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); |
1189 | void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); | ||
1131 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); | 1190 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1132 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); | 1191 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1133 | 1192 | ||
@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | |||
1148 | int slot_id, unsigned int ep_index); | 1207 | int slot_id, unsigned int ep_index); |
1149 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1208 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1150 | u32 slot_id); | 1209 | u32 slot_id); |
1210 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | ||
1211 | unsigned int ep_index); | ||
1212 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | ||
1213 | unsigned int slot_id, unsigned int ep_index, | ||
1214 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); | ||
1215 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
1216 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
1217 | unsigned int ep_index, struct xhci_dequeue_state *deq_state); | ||
1151 | 1218 | ||
1152 | /* xHCI roothub code */ | 1219 | /* xHCI roothub code */ |
1153 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, | 1220 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |
1154 | char *buf, u16 wLength); | 1221 | char *buf, u16 wLength); |
1155 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); | 1222 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); |
1156 | 1223 | ||
1224 | /* xHCI contexts */ | ||
1225 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); | ||
1226 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); | ||
1227 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); | ||
1228 | |||
1157 | #endif /* __LINUX_XHCI_HCD_H */ | 1229 | #endif /* __LINUX_XHCI_HCD_H */ |
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index a68d91a11bee..abe3aa67ed00 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig | |||
@@ -220,7 +220,7 @@ config USB_IOWARRIOR | |||
220 | 220 | ||
221 | config USB_TEST | 221 | config USB_TEST |
222 | tristate "USB testing driver" | 222 | tristate "USB testing driver" |
223 | depends on USB && USB_DEVICEFS | 223 | depends on USB |
224 | help | 224 | help |
225 | This driver is for testing host controller software. It is used | 225 | This driver is for testing host controller software. It is used |
226 | with specialized device firmware for regression and stress testing, | 226 | with specialized device firmware for regression and stress testing, |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 554a414f65d1..c7c1ca0494cd 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1326 | int i; | 1326 | int i; |
1327 | 1327 | ||
1328 | /* log core options (read using indexed model) */ | 1328 | /* log core options (read using indexed model) */ |
1329 | musb_ep_select(mbase, 0); | ||
1330 | reg = musb_read_configdata(mbase); | 1329 | reg = musb_read_configdata(mbase); |
1331 | 1330 | ||
1332 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); | 1331 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); |
@@ -1990,7 +1989,7 @@ bad_config: | |||
1990 | if (status < 0) | 1989 | if (status < 0) |
1991 | goto fail2; | 1990 | goto fail2; |
1992 | 1991 | ||
1993 | #ifdef CONFIG_USB_OTG | 1992 | #ifdef CONFIG_USB_MUSB_OTG |
1994 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); | 1993 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); |
1995 | #endif | 1994 | #endif |
1996 | 1995 | ||
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 40ed50ecedff..7a6778675ad3 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -407,7 +407,7 @@ stall: | |||
407 | csr |= MUSB_RXCSR_P_SENDSTALL | 407 | csr |= MUSB_RXCSR_P_SENDSTALL |
408 | | MUSB_RXCSR_FLUSHFIFO | 408 | | MUSB_RXCSR_FLUSHFIFO |
409 | | MUSB_RXCSR_CLRDATATOG | 409 | | MUSB_RXCSR_CLRDATATOG |
410 | | MUSB_TXCSR_P_WZC_BITS; | 410 | | MUSB_RXCSR_P_WZC_BITS; |
411 | musb_writew(regs, MUSB_RXCSR, | 411 | musb_writew(regs, MUSB_RXCSR, |
412 | csr); | 412 | csr); |
413 | } | 413 | } |
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index de3b2f18db44..fbfd3fd9ce1f 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) | |||
323 | 323 | ||
324 | static inline u8 musb_read_configdata(void __iomem *mbase) | 324 | static inline u8 musb_read_configdata(void __iomem *mbase) |
325 | { | 325 | { |
326 | musb_writeb(mbase, MUSB_INDEX, 0); | ||
326 | return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); | 327 | return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); |
327 | } | 328 | } |
328 | 329 | ||
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index e9a40b820fd4..985cbcf48bda 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = { | |||
80 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ | 80 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ |
81 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ | 81 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ |
82 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ | 82 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ |
83 | { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ | ||
83 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ | 84 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
84 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ | 85 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
85 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ | 86 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ |
@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = { | |||
96 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ | 97 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ |
97 | { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ | 98 | { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ |
98 | { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ | 99 | { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ |
100 | { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ | ||
99 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ | 101 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ |
102 | { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ | ||
100 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ | 103 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ |
101 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 104 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
102 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 105 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 60c64cc5be2a..b574878c78b2 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = { | |||
698 | { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), | 698 | { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), |
699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, | 700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, |
701 | { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, | ||
701 | { }, /* Optional parameter entry */ | 702 | { }, /* Optional parameter entry */ |
702 | { } /* Terminating entry */ | 703 | { } /* Terminating entry */ |
703 | }; | 704 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index c9fbd7415092..24dbd99e87d7 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -947,6 +947,13 @@ | |||
947 | #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ | 947 | #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ |
948 | 948 | ||
949 | /* | 949 | /* |
950 | * GN Otometrics (http://www.otometrics.com) | ||
951 | * Submitted by Ville Sundberg. | ||
952 | */ | ||
953 | #define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */ | ||
954 | #define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ | ||
955 | |||
956 | /* | ||
950 | * BmRequestType: 1100 0000b | 957 | * BmRequestType: 1100 0000b |
951 | * bRequest: FTDI_E2_READ | 958 | * bRequest: FTDI_E2_READ |
952 | * wValue: 0 | 959 | * wValue: 0 |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index c31940a307f8..270009afdf77 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -124,10 +124,13 @@ | |||
124 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | 124 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 |
125 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | 125 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 |
126 | 126 | ||
127 | /* This driver also supports the ATEN UC2324 device since it is mos7840 based | 127 | /* This driver also supports |
128 | * - if I knew the device id it would also support the ATEN UC2322 */ | 128 | * ATEN UC2324 device using Moschip MCS7840 |
129 | * ATEN UC2322 device using Moschip MCS7820 | ||
130 | */ | ||
129 | #define USB_VENDOR_ID_ATENINTL 0x0557 | 131 | #define USB_VENDOR_ID_ATENINTL 0x0557 |
130 | #define ATENINTL_DEVICE_ID_UC2324 0x2011 | 132 | #define ATENINTL_DEVICE_ID_UC2324 0x2011 |
133 | #define ATENINTL_DEVICE_ID_UC2322 0x7820 | ||
131 | 134 | ||
132 | /* Interrupt Routine Defines */ | 135 | /* Interrupt Routine Defines */ |
133 | 136 | ||
@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = { | |||
177 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 180 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
178 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 181 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
179 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 182 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
183 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
180 | {} /* terminating entry */ | 184 | {} /* terminating entry */ |
181 | }; | 185 | }; |
182 | 186 | ||
@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | |||
186 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 190 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
187 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 191 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
188 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 192 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
193 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
189 | {} /* terminating entry */ | 194 | {} /* terminating entry */ |
190 | }; | 195 | }; |
191 | 196 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 98262dd552bb..c784ddbe7b61 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file); | |||
66 | static int option_tiocmset(struct tty_struct *tty, struct file *file, | 66 | static int option_tiocmset(struct tty_struct *tty, struct file *file, |
67 | unsigned int set, unsigned int clear); | 67 | unsigned int set, unsigned int clear); |
68 | static int option_send_setup(struct usb_serial_port *port); | 68 | static int option_send_setup(struct usb_serial_port *port); |
69 | #ifdef CONFIG_PM | ||
69 | static int option_suspend(struct usb_serial *serial, pm_message_t message); | 70 | static int option_suspend(struct usb_serial *serial, pm_message_t message); |
70 | static int option_resume(struct usb_serial *serial); | 71 | static int option_resume(struct usb_serial *serial); |
72 | #endif | ||
71 | 73 | ||
72 | /* Vendor and product IDs */ | 74 | /* Vendor and product IDs */ |
73 | #define OPTION_VENDOR_ID 0x0AF0 | 75 | #define OPTION_VENDOR_ID 0x0AF0 |
@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial); | |||
205 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | 207 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 |
206 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | 208 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 |
207 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | 209 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 |
210 | #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 | ||
208 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 | 211 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 |
209 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 | 212 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 |
210 | 213 | ||
@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial); | |||
259 | #define AXESSTEL_VENDOR_ID 0x1726 | 262 | #define AXESSTEL_VENDOR_ID 0x1726 |
260 | #define AXESSTEL_PRODUCT_MV110H 0x1000 | 263 | #define AXESSTEL_PRODUCT_MV110H 0x1000 |
261 | 264 | ||
262 | #define ONDA_VENDOR_ID 0x19d2 | ||
263 | #define ONDA_PRODUCT_MSA501HS 0x0001 | ||
264 | #define ONDA_PRODUCT_ET502HS 0x0002 | ||
265 | #define ONDA_PRODUCT_MT503HS 0x2000 | ||
266 | |||
267 | #define BANDRICH_VENDOR_ID 0x1A8D | 265 | #define BANDRICH_VENDOR_ID 0x1A8D |
268 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 266 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
269 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 267 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial); | |||
301 | #define ZTE_PRODUCT_MF628 0x0015 | 299 | #define ZTE_PRODUCT_MF628 0x0015 |
302 | #define ZTE_PRODUCT_MF626 0x0031 | 300 | #define ZTE_PRODUCT_MF626 0x0031 |
303 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe | 301 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe |
302 | #define ZTE_PRODUCT_AC8710 0xfff1 | ||
304 | 303 | ||
305 | #define BENQ_VENDOR_ID 0x04a5 | 304 | #define BENQ_VENDOR_ID 0x04a5 |
306 | #define BENQ_PRODUCT_H10 0x4068 | 305 | #define BENQ_PRODUCT_H10 0x4068 |
@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial); | |||
322 | #define ALINK_VENDOR_ID 0x1e0e | 321 | #define ALINK_VENDOR_ID 0x1e0e |
323 | #define ALINK_PRODUCT_3GU 0x9200 | 322 | #define ALINK_PRODUCT_3GU 0x9200 |
324 | 323 | ||
324 | /* ALCATEL PRODUCTS */ | ||
325 | #define ALCATEL_VENDOR_ID 0x1bbb | ||
326 | #define ALCATEL_PRODUCT_X060S 0x0000 | ||
327 | |||
328 | |||
325 | static struct usb_device_id option_ids[] = { | 329 | static struct usb_device_id option_ids[] = { |
326 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 330 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
327 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 331 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = { | |||
438 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ | 442 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ |
439 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | 443 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ |
440 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | 444 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ |
445 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ | ||
441 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ | 446 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ |
442 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ | 447 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ |
443 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ | 448 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ |
@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = { | |||
474 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 479 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
475 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, | 480 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, |
476 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | 481 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, |
477 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, | ||
478 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, | ||
479 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) }, | ||
480 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) }, | ||
481 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) }, | ||
482 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) }, | ||
483 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) }, | ||
484 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) }, | ||
485 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) }, | ||
486 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) }, | ||
487 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) }, | ||
488 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) }, | ||
489 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) }, | ||
490 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) }, | ||
491 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) }, | ||
492 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) }, | ||
493 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) }, | ||
494 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) }, | ||
495 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) }, | ||
496 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) }, | ||
497 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) }, | ||
498 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) }, | ||
499 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) }, | ||
500 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) }, | ||
501 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) }, | ||
502 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) }, | ||
503 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) }, | ||
504 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) }, | ||
505 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) }, | ||
506 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) }, | ||
507 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) }, | ||
508 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) }, | ||
509 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) }, | ||
510 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) }, | ||
511 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) }, | ||
512 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) }, | ||
513 | { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, | 482 | { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, |
514 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 483 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
515 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 484 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = { | |||
534 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 503 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
535 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 504 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
536 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, | 505 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
537 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, | 506 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
538 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, | 507 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, |
539 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | 508 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, |
540 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, | 509 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, |
510 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, | ||
511 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, | ||
512 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) }, | ||
513 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, | ||
514 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, | ||
515 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, | ||
516 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, | ||
517 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, | ||
518 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, | ||
519 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, | ||
520 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, | ||
521 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, | ||
522 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, | ||
523 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, | ||
524 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, | ||
525 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, | ||
526 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, | ||
527 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, | ||
528 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, | ||
529 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, | ||
530 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, | ||
531 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) }, | ||
532 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, | ||
533 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, | ||
534 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, | ||
535 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, | ||
536 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, | ||
537 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, | ||
538 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, | ||
539 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, | ||
540 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, | ||
541 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, | ||
542 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, | ||
543 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, | ||
544 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, | ||
545 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, | ||
546 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, | ||
547 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, | ||
548 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, | ||
549 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, | ||
550 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, | ||
551 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, | ||
552 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, | ||
553 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, | ||
554 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, | ||
555 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, | ||
556 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, | ||
557 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, | ||
558 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, | ||
559 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, | ||
560 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, | ||
561 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, | ||
562 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, | ||
563 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, | ||
564 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, | ||
565 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, | ||
566 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, | ||
567 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ | ||
568 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, | ||
569 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, | ||
570 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, | ||
571 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, | ||
572 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, | ||
573 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, | ||
574 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, | ||
541 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 575 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
542 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, | 576 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, |
543 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, | 577 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, |
@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = { | |||
547 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ | 581 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ |
548 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, | 582 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, |
549 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, | 583 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, |
584 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, | ||
550 | { } /* Terminating entry */ | 585 | { } /* Terminating entry */ |
551 | }; | 586 | }; |
552 | MODULE_DEVICE_TABLE(usb, option_ids); | 587 | MODULE_DEVICE_TABLE(usb, option_ids); |
@@ -555,8 +590,10 @@ static struct usb_driver option_driver = { | |||
555 | .name = "option", | 590 | .name = "option", |
556 | .probe = usb_serial_probe, | 591 | .probe = usb_serial_probe, |
557 | .disconnect = usb_serial_disconnect, | 592 | .disconnect = usb_serial_disconnect, |
593 | #ifdef CONFIG_PM | ||
558 | .suspend = usb_serial_suspend, | 594 | .suspend = usb_serial_suspend, |
559 | .resume = usb_serial_resume, | 595 | .resume = usb_serial_resume, |
596 | #endif | ||
560 | .id_table = option_ids, | 597 | .id_table = option_ids, |
561 | .no_dynamic_id = 1, | 598 | .no_dynamic_id = 1, |
562 | }; | 599 | }; |
@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = { | |||
588 | .disconnect = option_disconnect, | 625 | .disconnect = option_disconnect, |
589 | .release = option_release, | 626 | .release = option_release, |
590 | .read_int_callback = option_instat_callback, | 627 | .read_int_callback = option_instat_callback, |
628 | #ifdef CONFIG_PM | ||
591 | .suspend = option_suspend, | 629 | .suspend = option_suspend, |
592 | .resume = option_resume, | 630 | .resume = option_resume, |
631 | #endif | ||
593 | }; | 632 | }; |
594 | 633 | ||
595 | static int debug; | 634 | static int debug; |
@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb) | |||
831 | int status = urb->status; | 870 | int status = urb->status; |
832 | struct usb_serial_port *port = urb->context; | 871 | struct usb_serial_port *port = urb->context; |
833 | struct option_port_private *portdata = usb_get_serial_port_data(port); | 872 | struct option_port_private *portdata = usb_get_serial_port_data(port); |
834 | struct usb_serial *serial = port->serial; | ||
835 | 873 | ||
836 | dbg("%s", __func__); | 874 | dbg("%s", __func__); |
837 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); | 875 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); |
@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty, | |||
927 | struct usb_serial_port *port, struct file *filp) | 965 | struct usb_serial_port *port, struct file *filp) |
928 | { | 966 | { |
929 | struct option_port_private *portdata; | 967 | struct option_port_private *portdata; |
930 | struct usb_serial *serial = port->serial; | ||
931 | int i, err; | 968 | int i, err; |
932 | struct urb *urb; | 969 | struct urb *urb; |
933 | 970 | ||
@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial) | |||
1187 | } | 1224 | } |
1188 | } | 1225 | } |
1189 | 1226 | ||
1227 | #ifdef CONFIG_PM | ||
1190 | static int option_suspend(struct usb_serial *serial, pm_message_t message) | 1228 | static int option_suspend(struct usb_serial *serial, pm_message_t message) |
1191 | { | 1229 | { |
1192 | dbg("%s entered", __func__); | 1230 | dbg("%s entered", __func__); |
@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial) | |||
1245 | } | 1283 | } |
1246 | return 0; | 1284 | return 0; |
1247 | } | 1285 | } |
1286 | #endif | ||
1248 | 1287 | ||
1249 | MODULE_AUTHOR(DRIVER_AUTHOR); | 1288 | MODULE_AUTHOR(DRIVER_AUTHOR); |
1250 | MODULE_DESCRIPTION(DRIVER_DESC); | 1289 | MODULE_DESCRIPTION(DRIVER_DESC); |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index bd7581b3a48a..99188c92068b 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
35 | #include <linux/serial.h> | ||
35 | #include <linux/usb.h> | 36 | #include <linux/usb.h> |
36 | #include <linux/usb/serial.h> | 37 | #include <linux/usb/serial.h> |
37 | #include "pl2303.h" | 38 | #include "pl2303.h" |
@@ -184,6 +185,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | |||
184 | struct usb_serial_port *port; | 185 | struct usb_serial_port *port; |
185 | unsigned int portNumber; | 186 | unsigned int portNumber; |
186 | int retval = 0; | 187 | int retval = 0; |
188 | int first = 0; | ||
187 | 189 | ||
188 | dbg("%s", __func__); | 190 | dbg("%s", __func__); |
189 | 191 | ||
@@ -223,7 +225,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | |||
223 | 225 | ||
224 | /* If the console is attached, the device is already open */ | 226 | /* If the console is attached, the device is already open */ |
225 | if (port->port.count == 1 && !port->console) { | 227 | if (port->port.count == 1 && !port->console) { |
226 | 228 | first = 1; | |
227 | /* lock this module before we call it | 229 | /* lock this module before we call it |
228 | * this may fail, which means we must bail out, | 230 | * this may fail, which means we must bail out, |
229 | * safe because we are called with BKL held */ | 231 | * safe because we are called with BKL held */ |
@@ -246,13 +248,21 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | |||
246 | if (retval) | 248 | if (retval) |
247 | goto bailout_interface_put; | 249 | goto bailout_interface_put; |
248 | mutex_unlock(&serial->disc_mutex); | 250 | mutex_unlock(&serial->disc_mutex); |
251 | set_bit(ASYNCB_INITIALIZED, &port->port.flags); | ||
249 | } | 252 | } |
250 | mutex_unlock(&port->mutex); | 253 | mutex_unlock(&port->mutex); |
251 | /* Now do the correct tty layer semantics */ | 254 | /* Now do the correct tty layer semantics */ |
252 | retval = tty_port_block_til_ready(&port->port, tty, filp); | 255 | retval = tty_port_block_til_ready(&port->port, tty, filp); |
253 | if (retval == 0) | 256 | if (retval == 0) { |
257 | if (!first) | ||
258 | usb_serial_put(serial); | ||
254 | return 0; | 259 | return 0; |
255 | 260 | } | |
261 | mutex_lock(&port->mutex); | ||
262 | if (first == 0) | ||
263 | goto bailout_mutex_unlock; | ||
264 | /* Undo the initial port actions */ | ||
265 | mutex_lock(&serial->disc_mutex); | ||
256 | bailout_interface_put: | 266 | bailout_interface_put: |
257 | usb_autopm_put_interface(serial->interface); | 267 | usb_autopm_put_interface(serial->interface); |
258 | bailout_module_put: | 268 | bailout_module_put: |
@@ -340,6 +350,22 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | |||
340 | 350 | ||
341 | dbg("%s - port %d", __func__, port->number); | 351 | dbg("%s - port %d", __func__, port->number); |
342 | 352 | ||
353 | /* FIXME: | ||
354 | This leaves a very narrow race. Really we should do the | ||
355 | serial_do_free() on tty->shutdown(), but tty->shutdown can | ||
356 | be called from IRQ context and serial_do_free can sleep. | ||
357 | |||
358 | The right fix is probably to make the tty free (which is rare) | ||
359 | and thus tty->shutdown() occur via a work queue and simplify all | ||
360 | the drivers that use it. | ||
361 | */ | ||
362 | if (tty_hung_up_p(filp)) { | ||
363 | /* serial_hangup already called serial_down at this point. | ||
364 | Another user may have already reopened the port but | ||
365 | serial_do_free is refcounted */ | ||
366 | serial_do_free(port); | ||
367 | return; | ||
368 | } | ||
343 | 369 | ||
344 | if (tty_port_close_start(&port->port, tty, filp) == 0) | 370 | if (tty_port_close_start(&port->port, tty, filp) == 0) |
345 | return; | 371 | return; |
@@ -355,7 +381,8 @@ static void serial_hangup(struct tty_struct *tty) | |||
355 | struct usb_serial_port *port = tty->driver_data; | 381 | struct usb_serial_port *port = tty->driver_data; |
356 | serial_do_down(port); | 382 | serial_do_down(port); |
357 | tty_port_hangup(&port->port); | 383 | tty_port_hangup(&port->port); |
358 | serial_do_free(port); | 384 | /* We must not free port yet - the USB serial layer depends on it's |
385 | continued existence */ | ||
359 | } | 386 | } |
360 | 387 | ||
361 | static int serial_write(struct tty_struct *tty, const unsigned char *buf, | 388 | static int serial_write(struct tty_struct *tty, const unsigned char *buf, |
@@ -394,7 +421,6 @@ static int serial_chars_in_buffer(struct tty_struct *tty) | |||
394 | struct usb_serial_port *port = tty->driver_data; | 421 | struct usb_serial_port *port = tty->driver_data; |
395 | dbg("%s = port %d", __func__, port->number); | 422 | dbg("%s = port %d", __func__, port->number); |
396 | 423 | ||
397 | WARN_ON(!port->port.count); | ||
398 | /* if the device was unplugged then any remaining characters | 424 | /* if the device was unplugged then any remaining characters |
399 | fell out of the connector ;) */ | 425 | fell out of the connector ;) */ |
400 | if (port->serial->disconnected) | 426 | if (port->serial->disconnected) |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index fcb320217218..e20dc525d177 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us) | |||
961 | US_BULK_GET_MAX_LUN, | 961 | US_BULK_GET_MAX_LUN, |
962 | USB_DIR_IN | USB_TYPE_CLASS | | 962 | USB_DIR_IN | USB_TYPE_CLASS | |
963 | USB_RECIP_INTERFACE, | 963 | USB_RECIP_INTERFACE, |
964 | 0, us->ifnum, us->iobuf, 1, HZ); | 964 | 0, us->ifnum, us->iobuf, 1, 10*HZ); |
965 | 965 | ||
966 | US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", | 966 | US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", |
967 | result, us->iobuf[0]); | 967 | result, us->iobuf[0]); |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 6e4f6c50a120..019e8af449ab 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
424 | * list | 424 | * list |
425 | */ | 425 | */ |
426 | if (worker->idle) { | 426 | if (worker->idle) { |
427 | spin_lock_irqsave(&worker->workers->lock, flags); | 427 | spin_lock(&worker->workers->lock); |
428 | worker->idle = 0; | 428 | worker->idle = 0; |
429 | list_move_tail(&worker->worker_list, | 429 | list_move_tail(&worker->worker_list, |
430 | &worker->workers->worker_list); | 430 | &worker->workers->worker_list); |
431 | spin_unlock_irqrestore(&worker->workers->lock, flags); | 431 | spin_unlock(&worker->workers->lock); |
432 | } | 432 | } |
433 | if (!worker->working) { | 433 | if (!worker->working) { |
434 | wake = 1; | 434 | wake = 1; |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 60a45f3a4e91..3fdcc0512d3a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -557,19 +557,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) | |||
557 | 557 | ||
558 | btrfs_disk_key_to_cpu(&k1, disk); | 558 | btrfs_disk_key_to_cpu(&k1, disk); |
559 | 559 | ||
560 | if (k1.objectid > k2->objectid) | 560 | return btrfs_comp_cpu_keys(&k1, k2); |
561 | return 1; | ||
562 | if (k1.objectid < k2->objectid) | ||
563 | return -1; | ||
564 | if (k1.type > k2->type) | ||
565 | return 1; | ||
566 | if (k1.type < k2->type) | ||
567 | return -1; | ||
568 | if (k1.offset > k2->offset) | ||
569 | return 1; | ||
570 | if (k1.offset < k2->offset) | ||
571 | return -1; | ||
572 | return 0; | ||
573 | } | 561 | } |
574 | 562 | ||
575 | /* | 563 | /* |
@@ -1052,9 +1040,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1052 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) | 1040 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) |
1053 | return 0; | 1041 | return 0; |
1054 | 1042 | ||
1055 | if (btrfs_header_nritems(mid) > 2) | ||
1056 | return 0; | ||
1057 | |||
1058 | if (btrfs_header_nritems(mid) < 2) | 1043 | if (btrfs_header_nritems(mid) < 2) |
1059 | err_on_enospc = 1; | 1044 | err_on_enospc = 1; |
1060 | 1045 | ||
@@ -1701,6 +1686,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root | |||
1701 | struct extent_buffer *b; | 1686 | struct extent_buffer *b; |
1702 | int slot; | 1687 | int slot; |
1703 | int ret; | 1688 | int ret; |
1689 | int err; | ||
1704 | int level; | 1690 | int level; |
1705 | int lowest_unlock = 1; | 1691 | int lowest_unlock = 1; |
1706 | u8 lowest_level = 0; | 1692 | u8 lowest_level = 0; |
@@ -1737,8 +1723,6 @@ again: | |||
1737 | p->locks[level] = 1; | 1723 | p->locks[level] = 1; |
1738 | 1724 | ||
1739 | if (cow) { | 1725 | if (cow) { |
1740 | int wret; | ||
1741 | |||
1742 | /* | 1726 | /* |
1743 | * if we don't really need to cow this block | 1727 | * if we don't really need to cow this block |
1744 | * then we don't want to set the path blocking, | 1728 | * then we don't want to set the path blocking, |
@@ -1749,12 +1733,12 @@ again: | |||
1749 | 1733 | ||
1750 | btrfs_set_path_blocking(p); | 1734 | btrfs_set_path_blocking(p); |
1751 | 1735 | ||
1752 | wret = btrfs_cow_block(trans, root, b, | 1736 | err = btrfs_cow_block(trans, root, b, |
1753 | p->nodes[level + 1], | 1737 | p->nodes[level + 1], |
1754 | p->slots[level + 1], &b); | 1738 | p->slots[level + 1], &b); |
1755 | if (wret) { | 1739 | if (err) { |
1756 | free_extent_buffer(b); | 1740 | free_extent_buffer(b); |
1757 | ret = wret; | 1741 | ret = err; |
1758 | goto done; | 1742 | goto done; |
1759 | } | 1743 | } |
1760 | } | 1744 | } |
@@ -1793,41 +1777,45 @@ cow_done: | |||
1793 | ret = bin_search(b, key, level, &slot); | 1777 | ret = bin_search(b, key, level, &slot); |
1794 | 1778 | ||
1795 | if (level != 0) { | 1779 | if (level != 0) { |
1796 | if (ret && slot > 0) | 1780 | int dec = 0; |
1781 | if (ret && slot > 0) { | ||
1782 | dec = 1; | ||
1797 | slot -= 1; | 1783 | slot -= 1; |
1784 | } | ||
1798 | p->slots[level] = slot; | 1785 | p->slots[level] = slot; |
1799 | ret = setup_nodes_for_search(trans, root, p, b, level, | 1786 | err = setup_nodes_for_search(trans, root, p, b, level, |
1800 | ins_len); | 1787 | ins_len); |
1801 | if (ret == -EAGAIN) | 1788 | if (err == -EAGAIN) |
1802 | goto again; | 1789 | goto again; |
1803 | else if (ret) | 1790 | if (err) { |
1791 | ret = err; | ||
1804 | goto done; | 1792 | goto done; |
1793 | } | ||
1805 | b = p->nodes[level]; | 1794 | b = p->nodes[level]; |
1806 | slot = p->slots[level]; | 1795 | slot = p->slots[level]; |
1807 | 1796 | ||
1808 | unlock_up(p, level, lowest_unlock); | 1797 | unlock_up(p, level, lowest_unlock); |
1809 | 1798 | ||
1810 | /* this is only true while dropping a snapshot */ | ||
1811 | if (level == lowest_level) { | 1799 | if (level == lowest_level) { |
1812 | ret = 0; | 1800 | if (dec) |
1801 | p->slots[level]++; | ||
1813 | goto done; | 1802 | goto done; |
1814 | } | 1803 | } |
1815 | 1804 | ||
1816 | ret = read_block_for_search(trans, root, p, | 1805 | err = read_block_for_search(trans, root, p, |
1817 | &b, level, slot, key); | 1806 | &b, level, slot, key); |
1818 | if (ret == -EAGAIN) | 1807 | if (err == -EAGAIN) |
1819 | goto again; | 1808 | goto again; |
1820 | 1809 | if (err) { | |
1821 | if (ret == -EIO) | 1810 | ret = err; |
1822 | goto done; | 1811 | goto done; |
1812 | } | ||
1823 | 1813 | ||
1824 | if (!p->skip_locking) { | 1814 | if (!p->skip_locking) { |
1825 | int lret; | ||
1826 | |||
1827 | btrfs_clear_path_blocking(p, NULL); | 1815 | btrfs_clear_path_blocking(p, NULL); |
1828 | lret = btrfs_try_spin_lock(b); | 1816 | err = btrfs_try_spin_lock(b); |
1829 | 1817 | ||
1830 | if (!lret) { | 1818 | if (!err) { |
1831 | btrfs_set_path_blocking(p); | 1819 | btrfs_set_path_blocking(p); |
1832 | btrfs_tree_lock(b); | 1820 | btrfs_tree_lock(b); |
1833 | btrfs_clear_path_blocking(p, b); | 1821 | btrfs_clear_path_blocking(p, b); |
@@ -1837,16 +1825,14 @@ cow_done: | |||
1837 | p->slots[level] = slot; | 1825 | p->slots[level] = slot; |
1838 | if (ins_len > 0 && | 1826 | if (ins_len > 0 && |
1839 | btrfs_leaf_free_space(root, b) < ins_len) { | 1827 | btrfs_leaf_free_space(root, b) < ins_len) { |
1840 | int sret; | ||
1841 | |||
1842 | btrfs_set_path_blocking(p); | 1828 | btrfs_set_path_blocking(p); |
1843 | sret = split_leaf(trans, root, key, | 1829 | err = split_leaf(trans, root, key, |
1844 | p, ins_len, ret == 0); | 1830 | p, ins_len, ret == 0); |
1845 | btrfs_clear_path_blocking(p, NULL); | 1831 | btrfs_clear_path_blocking(p, NULL); |
1846 | 1832 | ||
1847 | BUG_ON(sret > 0); | 1833 | BUG_ON(err > 0); |
1848 | if (sret) { | 1834 | if (err) { |
1849 | ret = sret; | 1835 | ret = err; |
1850 | goto done; | 1836 | goto done; |
1851 | } | 1837 | } |
1852 | } | 1838 | } |
@@ -3807,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
3807 | } | 3793 | } |
3808 | 3794 | ||
3809 | /* delete the leaf if it is mostly empty */ | 3795 | /* delete the leaf if it is mostly empty */ |
3810 | if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) { | 3796 | if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { |
3811 | /* push_leaf_left fixes the path. | 3797 | /* push_leaf_left fixes the path. |
3812 | * make sure the path still points to our leaf | 3798 | * make sure the path still points to our leaf |
3813 | * for possible call to del_ptr below | 3799 | * for possible call to del_ptr below |
@@ -4042,10 +4028,9 @@ out: | |||
4042 | * calling this function. | 4028 | * calling this function. |
4043 | */ | 4029 | */ |
4044 | int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, | 4030 | int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, |
4045 | struct btrfs_key *key, int lowest_level, | 4031 | struct btrfs_key *key, int level, |
4046 | int cache_only, u64 min_trans) | 4032 | int cache_only, u64 min_trans) |
4047 | { | 4033 | { |
4048 | int level = lowest_level; | ||
4049 | int slot; | 4034 | int slot; |
4050 | struct extent_buffer *c; | 4035 | struct extent_buffer *c; |
4051 | 4036 | ||
@@ -4058,11 +4043,40 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, | |||
4058 | c = path->nodes[level]; | 4043 | c = path->nodes[level]; |
4059 | next: | 4044 | next: |
4060 | if (slot >= btrfs_header_nritems(c)) { | 4045 | if (slot >= btrfs_header_nritems(c)) { |
4061 | level++; | 4046 | int ret; |
4062 | if (level == BTRFS_MAX_LEVEL) | 4047 | int orig_lowest; |
4048 | struct btrfs_key cur_key; | ||
4049 | if (level + 1 >= BTRFS_MAX_LEVEL || | ||
4050 | !path->nodes[level + 1]) | ||
4063 | return 1; | 4051 | return 1; |
4064 | continue; | 4052 | |
4053 | if (path->locks[level + 1]) { | ||
4054 | level++; | ||
4055 | continue; | ||
4056 | } | ||
4057 | |||
4058 | slot = btrfs_header_nritems(c) - 1; | ||
4059 | if (level == 0) | ||
4060 | btrfs_item_key_to_cpu(c, &cur_key, slot); | ||
4061 | else | ||
4062 | btrfs_node_key_to_cpu(c, &cur_key, slot); | ||
4063 | |||
4064 | orig_lowest = path->lowest_level; | ||
4065 | btrfs_release_path(root, path); | ||
4066 | path->lowest_level = level; | ||
4067 | ret = btrfs_search_slot(NULL, root, &cur_key, path, | ||
4068 | 0, 0); | ||
4069 | path->lowest_level = orig_lowest; | ||
4070 | if (ret < 0) | ||
4071 | return ret; | ||
4072 | |||
4073 | c = path->nodes[level]; | ||
4074 | slot = path->slots[level]; | ||
4075 | if (ret == 0) | ||
4076 | slot++; | ||
4077 | goto next; | ||
4065 | } | 4078 | } |
4079 | |||
4066 | if (level == 0) | 4080 | if (level == 0) |
4067 | btrfs_item_key_to_cpu(c, key, slot); | 4081 | btrfs_item_key_to_cpu(c, key, slot); |
4068 | else { | 4082 | else { |
@@ -4146,7 +4160,8 @@ again: | |||
4146 | * advance the path if there are now more items available. | 4160 | * advance the path if there are now more items available. |
4147 | */ | 4161 | */ |
4148 | if (nritems > 0 && path->slots[0] < nritems - 1) { | 4162 | if (nritems > 0 && path->slots[0] < nritems - 1) { |
4149 | path->slots[0]++; | 4163 | if (ret == 0) |
4164 | path->slots[0]++; | ||
4150 | ret = 0; | 4165 | ret = 0; |
4151 | goto done; | 4166 | goto done; |
4152 | } | 4167 | } |
@@ -4278,10 +4293,10 @@ int btrfs_previous_item(struct btrfs_root *root, | |||
4278 | path->slots[0]--; | 4293 | path->slots[0]--; |
4279 | 4294 | ||
4280 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 4295 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
4281 | if (found_key.type == type) | ||
4282 | return 0; | ||
4283 | if (found_key.objectid < min_objectid) | 4296 | if (found_key.objectid < min_objectid) |
4284 | break; | 4297 | break; |
4298 | if (found_key.type == type) | ||
4299 | return 0; | ||
4285 | if (found_key.objectid == min_objectid && | 4300 | if (found_key.objectid == min_objectid && |
4286 | found_key.type < type) | 4301 | found_key.type < type) |
4287 | break; | 4302 | break; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 98a873838717..215ef8cae823 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -481,7 +481,7 @@ struct btrfs_shared_data_ref { | |||
481 | 481 | ||
482 | struct btrfs_extent_inline_ref { | 482 | struct btrfs_extent_inline_ref { |
483 | u8 type; | 483 | u8 type; |
484 | u64 offset; | 484 | __le64 offset; |
485 | } __attribute__ ((__packed__)); | 485 | } __attribute__ ((__packed__)); |
486 | 486 | ||
487 | /* old style backrefs item */ | 487 | /* old style backrefs item */ |
@@ -689,6 +689,7 @@ struct btrfs_space_info { | |||
689 | struct list_head block_groups; | 689 | struct list_head block_groups; |
690 | spinlock_t lock; | 690 | spinlock_t lock; |
691 | struct rw_semaphore groups_sem; | 691 | struct rw_semaphore groups_sem; |
692 | atomic_t caching_threads; | ||
692 | }; | 693 | }; |
693 | 694 | ||
694 | /* | 695 | /* |
@@ -707,6 +708,9 @@ struct btrfs_free_cluster { | |||
707 | /* first extent starting offset */ | 708 | /* first extent starting offset */ |
708 | u64 window_start; | 709 | u64 window_start; |
709 | 710 | ||
711 | /* if this cluster simply points at a bitmap in the block group */ | ||
712 | bool points_to_bitmap; | ||
713 | |||
710 | struct btrfs_block_group_cache *block_group; | 714 | struct btrfs_block_group_cache *block_group; |
711 | /* | 715 | /* |
712 | * when a cluster is allocated from a block group, we put the | 716 | * when a cluster is allocated from a block group, we put the |
@@ -716,24 +720,37 @@ struct btrfs_free_cluster { | |||
716 | struct list_head block_group_list; | 720 | struct list_head block_group_list; |
717 | }; | 721 | }; |
718 | 722 | ||
723 | enum btrfs_caching_type { | ||
724 | BTRFS_CACHE_NO = 0, | ||
725 | BTRFS_CACHE_STARTED = 1, | ||
726 | BTRFS_CACHE_FINISHED = 2, | ||
727 | }; | ||
728 | |||
719 | struct btrfs_block_group_cache { | 729 | struct btrfs_block_group_cache { |
720 | struct btrfs_key key; | 730 | struct btrfs_key key; |
721 | struct btrfs_block_group_item item; | 731 | struct btrfs_block_group_item item; |
732 | struct btrfs_fs_info *fs_info; | ||
722 | spinlock_t lock; | 733 | spinlock_t lock; |
723 | struct mutex cache_mutex; | ||
724 | u64 pinned; | 734 | u64 pinned; |
725 | u64 reserved; | 735 | u64 reserved; |
726 | u64 flags; | 736 | u64 flags; |
727 | int cached; | 737 | u64 sectorsize; |
738 | int extents_thresh; | ||
739 | int free_extents; | ||
740 | int total_bitmaps; | ||
728 | int ro; | 741 | int ro; |
729 | int dirty; | 742 | int dirty; |
730 | 743 | ||
744 | /* cache tracking stuff */ | ||
745 | wait_queue_head_t caching_q; | ||
746 | int cached; | ||
747 | |||
731 | struct btrfs_space_info *space_info; | 748 | struct btrfs_space_info *space_info; |
732 | 749 | ||
733 | /* free space cache stuff */ | 750 | /* free space cache stuff */ |
734 | spinlock_t tree_lock; | 751 | spinlock_t tree_lock; |
735 | struct rb_root free_space_bytes; | ||
736 | struct rb_root free_space_offset; | 752 | struct rb_root free_space_offset; |
753 | u64 free_space; | ||
737 | 754 | ||
738 | /* block group cache stuff */ | 755 | /* block group cache stuff */ |
739 | struct rb_node cache_node; | 756 | struct rb_node cache_node; |
@@ -942,6 +959,9 @@ struct btrfs_root { | |||
942 | /* the node lock is held while changing the node pointer */ | 959 | /* the node lock is held while changing the node pointer */ |
943 | spinlock_t node_lock; | 960 | spinlock_t node_lock; |
944 | 961 | ||
962 | /* taken when updating the commit root */ | ||
963 | struct rw_semaphore commit_root_sem; | ||
964 | |||
945 | struct extent_buffer *commit_root; | 965 | struct extent_buffer *commit_root; |
946 | struct btrfs_root *log_root; | 966 | struct btrfs_root *log_root; |
947 | struct btrfs_root *reloc_root; | 967 | struct btrfs_root *reloc_root; |
@@ -1988,6 +2008,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, | |||
1988 | u64 bytes); | 2008 | u64 bytes); |
1989 | void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, | 2009 | void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, |
1990 | u64 bytes); | 2010 | u64 bytes); |
2011 | void btrfs_free_pinned_extents(struct btrfs_fs_info *info); | ||
1991 | /* ctree.c */ | 2012 | /* ctree.c */ |
1992 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 2013 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
1993 | int level, int *slot); | 2014 | int level, int *slot); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d28d29c95f7c..7dcaa8138864 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -909,6 +909,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
909 | spin_lock_init(&root->inode_lock); | 909 | spin_lock_init(&root->inode_lock); |
910 | mutex_init(&root->objectid_mutex); | 910 | mutex_init(&root->objectid_mutex); |
911 | mutex_init(&root->log_mutex); | 911 | mutex_init(&root->log_mutex); |
912 | init_rwsem(&root->commit_root_sem); | ||
912 | init_waitqueue_head(&root->log_writer_wait); | 913 | init_waitqueue_head(&root->log_writer_wait); |
913 | init_waitqueue_head(&root->log_commit_wait[0]); | 914 | init_waitqueue_head(&root->log_commit_wait[0]); |
914 | init_waitqueue_head(&root->log_commit_wait[1]); | 915 | init_waitqueue_head(&root->log_commit_wait[1]); |
@@ -1799,6 +1800,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1799 | btrfs_super_chunk_root(disk_super), | 1800 | btrfs_super_chunk_root(disk_super), |
1800 | blocksize, generation); | 1801 | blocksize, generation); |
1801 | BUG_ON(!chunk_root->node); | 1802 | BUG_ON(!chunk_root->node); |
1803 | if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { | ||
1804 | printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", | ||
1805 | sb->s_id); | ||
1806 | goto fail_chunk_root; | ||
1807 | } | ||
1802 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); | 1808 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); |
1803 | chunk_root->commit_root = btrfs_root_node(chunk_root); | 1809 | chunk_root->commit_root = btrfs_root_node(chunk_root); |
1804 | 1810 | ||
@@ -1826,6 +1832,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1826 | blocksize, generation); | 1832 | blocksize, generation); |
1827 | if (!tree_root->node) | 1833 | if (!tree_root->node) |
1828 | goto fail_chunk_root; | 1834 | goto fail_chunk_root; |
1835 | if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { | ||
1836 | printk(KERN_WARNING "btrfs: failed to read tree root on %s\n", | ||
1837 | sb->s_id); | ||
1838 | goto fail_tree_root; | ||
1839 | } | ||
1829 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); | 1840 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); |
1830 | tree_root->commit_root = btrfs_root_node(tree_root); | 1841 | tree_root->commit_root = btrfs_root_node(tree_root); |
1831 | 1842 | ||
@@ -2322,6 +2333,9 @@ int close_ctree(struct btrfs_root *root) | |||
2322 | printk(KERN_ERR "btrfs: commit super ret %d\n", ret); | 2333 | printk(KERN_ERR "btrfs: commit super ret %d\n", ret); |
2323 | } | 2334 | } |
2324 | 2335 | ||
2336 | fs_info->closing = 2; | ||
2337 | smp_mb(); | ||
2338 | |||
2325 | if (fs_info->delalloc_bytes) { | 2339 | if (fs_info->delalloc_bytes) { |
2326 | printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", | 2340 | printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", |
2327 | (unsigned long long)fs_info->delalloc_bytes); | 2341 | (unsigned long long)fs_info->delalloc_bytes); |
@@ -2343,6 +2357,7 @@ int close_ctree(struct btrfs_root *root) | |||
2343 | free_extent_buffer(root->fs_info->csum_root->commit_root); | 2357 | free_extent_buffer(root->fs_info->csum_root->commit_root); |
2344 | 2358 | ||
2345 | btrfs_free_block_groups(root->fs_info); | 2359 | btrfs_free_block_groups(root->fs_info); |
2360 | btrfs_free_pinned_extents(root->fs_info); | ||
2346 | 2361 | ||
2347 | del_fs_roots(fs_info); | 2362 | del_fs_roots(fs_info); |
2348 | 2363 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a5aca3997d42..fadf69a2764b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/blkdev.h> | 21 | #include <linux/blkdev.h> |
22 | #include <linux/sort.h> | 22 | #include <linux/sort.h> |
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/kthread.h> | ||
24 | #include "compat.h" | 25 | #include "compat.h" |
25 | #include "hash.h" | 26 | #include "hash.h" |
26 | #include "ctree.h" | 27 | #include "ctree.h" |
@@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
61 | struct btrfs_root *extent_root, u64 alloc_bytes, | 62 | struct btrfs_root *extent_root, u64 alloc_bytes, |
62 | u64 flags, int force); | 63 | u64 flags, int force); |
63 | 64 | ||
65 | static noinline int | ||
66 | block_group_cache_done(struct btrfs_block_group_cache *cache) | ||
67 | { | ||
68 | smp_mb(); | ||
69 | return cache->cached == BTRFS_CACHE_FINISHED; | ||
70 | } | ||
71 | |||
64 | static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) | 72 | static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) |
65 | { | 73 | { |
66 | return (cache->flags & bits) == bits; | 74 | return (cache->flags & bits) == bits; |
@@ -146,20 +154,70 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, | |||
146 | } | 154 | } |
147 | 155 | ||
148 | /* | 156 | /* |
157 | * We always set EXTENT_LOCKED for the super mirror extents so we don't | ||
158 | * overwrite them, so those bits need to be unset. Also, if we are unmounting | ||
159 | * with pinned extents still sitting there because we had a block group caching, | ||
160 | * we need to clear those now, since we are done. | ||
161 | */ | ||
162 | void btrfs_free_pinned_extents(struct btrfs_fs_info *info) | ||
163 | { | ||
164 | u64 start, end, last = 0; | ||
165 | int ret; | ||
166 | |||
167 | while (1) { | ||
168 | ret = find_first_extent_bit(&info->pinned_extents, last, | ||
169 | &start, &end, | ||
170 | EXTENT_LOCKED|EXTENT_DIRTY); | ||
171 | if (ret) | ||
172 | break; | ||
173 | |||
174 | clear_extent_bits(&info->pinned_extents, start, end, | ||
175 | EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS); | ||
176 | last = end+1; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static int remove_sb_from_cache(struct btrfs_root *root, | ||
181 | struct btrfs_block_group_cache *cache) | ||
182 | { | ||
183 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
184 | u64 bytenr; | ||
185 | u64 *logical; | ||
186 | int stripe_len; | ||
187 | int i, nr, ret; | ||
188 | |||
189 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | ||
190 | bytenr = btrfs_sb_offset(i); | ||
191 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, | ||
192 | cache->key.objectid, bytenr, | ||
193 | 0, &logical, &nr, &stripe_len); | ||
194 | BUG_ON(ret); | ||
195 | while (nr--) { | ||
196 | try_lock_extent(&fs_info->pinned_extents, | ||
197 | logical[nr], | ||
198 | logical[nr] + stripe_len - 1, GFP_NOFS); | ||
199 | } | ||
200 | kfree(logical); | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | /* | ||
149 | * this is only called by cache_block_group, since we could have freed extents | 207 | * this is only called by cache_block_group, since we could have freed extents |
150 | * we need to check the pinned_extents for any extents that can't be used yet | 208 | * we need to check the pinned_extents for any extents that can't be used yet |
151 | * since their free space will be released as soon as the transaction commits. | 209 | * since their free space will be released as soon as the transaction commits. |
152 | */ | 210 | */ |
153 | static int add_new_free_space(struct btrfs_block_group_cache *block_group, | 211 | static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, |
154 | struct btrfs_fs_info *info, u64 start, u64 end) | 212 | struct btrfs_fs_info *info, u64 start, u64 end) |
155 | { | 213 | { |
156 | u64 extent_start, extent_end, size; | 214 | u64 extent_start, extent_end, size, total_added = 0; |
157 | int ret; | 215 | int ret; |
158 | 216 | ||
159 | while (start < end) { | 217 | while (start < end) { |
160 | ret = find_first_extent_bit(&info->pinned_extents, start, | 218 | ret = find_first_extent_bit(&info->pinned_extents, start, |
161 | &extent_start, &extent_end, | 219 | &extent_start, &extent_end, |
162 | EXTENT_DIRTY); | 220 | EXTENT_DIRTY|EXTENT_LOCKED); |
163 | if (ret) | 221 | if (ret) |
164 | break; | 222 | break; |
165 | 223 | ||
@@ -167,6 +225,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, | |||
167 | start = extent_end + 1; | 225 | start = extent_end + 1; |
168 | } else if (extent_start > start && extent_start < end) { | 226 | } else if (extent_start > start && extent_start < end) { |
169 | size = extent_start - start; | 227 | size = extent_start - start; |
228 | total_added += size; | ||
170 | ret = btrfs_add_free_space(block_group, start, | 229 | ret = btrfs_add_free_space(block_group, start, |
171 | size); | 230 | size); |
172 | BUG_ON(ret); | 231 | BUG_ON(ret); |
@@ -178,84 +237,79 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, | |||
178 | 237 | ||
179 | if (start < end) { | 238 | if (start < end) { |
180 | size = end - start; | 239 | size = end - start; |
240 | total_added += size; | ||
181 | ret = btrfs_add_free_space(block_group, start, size); | 241 | ret = btrfs_add_free_space(block_group, start, size); |
182 | BUG_ON(ret); | 242 | BUG_ON(ret); |
183 | } | 243 | } |
184 | 244 | ||
185 | return 0; | 245 | return total_added; |
186 | } | 246 | } |
187 | 247 | ||
188 | static int remove_sb_from_cache(struct btrfs_root *root, | 248 | static int caching_kthread(void *data) |
189 | struct btrfs_block_group_cache *cache) | ||
190 | { | ||
191 | u64 bytenr; | ||
192 | u64 *logical; | ||
193 | int stripe_len; | ||
194 | int i, nr, ret; | ||
195 | |||
196 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | ||
197 | bytenr = btrfs_sb_offset(i); | ||
198 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, | ||
199 | cache->key.objectid, bytenr, 0, | ||
200 | &logical, &nr, &stripe_len); | ||
201 | BUG_ON(ret); | ||
202 | while (nr--) { | ||
203 | btrfs_remove_free_space(cache, logical[nr], | ||
204 | stripe_len); | ||
205 | } | ||
206 | kfree(logical); | ||
207 | } | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int cache_block_group(struct btrfs_root *root, | ||
212 | struct btrfs_block_group_cache *block_group) | ||
213 | { | 249 | { |
250 | struct btrfs_block_group_cache *block_group = data; | ||
251 | struct btrfs_fs_info *fs_info = block_group->fs_info; | ||
252 | u64 last = 0; | ||
214 | struct btrfs_path *path; | 253 | struct btrfs_path *path; |
215 | int ret = 0; | 254 | int ret = 0; |
216 | struct btrfs_key key; | 255 | struct btrfs_key key; |
217 | struct extent_buffer *leaf; | 256 | struct extent_buffer *leaf; |
218 | int slot; | 257 | int slot; |
219 | u64 last; | 258 | u64 total_found = 0; |
220 | |||
221 | if (!block_group) | ||
222 | return 0; | ||
223 | 259 | ||
224 | root = root->fs_info->extent_root; | 260 | BUG_ON(!fs_info); |
225 | |||
226 | if (block_group->cached) | ||
227 | return 0; | ||
228 | 261 | ||
229 | path = btrfs_alloc_path(); | 262 | path = btrfs_alloc_path(); |
230 | if (!path) | 263 | if (!path) |
231 | return -ENOMEM; | 264 | return -ENOMEM; |
232 | 265 | ||
233 | path->reada = 2; | 266 | atomic_inc(&block_group->space_info->caching_threads); |
267 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | ||
268 | again: | ||
269 | /* need to make sure the commit_root doesn't disappear */ | ||
270 | down_read(&fs_info->extent_root->commit_root_sem); | ||
271 | |||
234 | /* | 272 | /* |
235 | * we get into deadlocks with paths held by callers of this function. | 273 | * We don't want to deadlock with somebody trying to allocate a new |
236 | * since the alloc_mutex is protecting things right now, just | 274 | * extent for the extent root while also trying to search the extent |
237 | * skip the locking here | 275 | * root to add free space. So we skip locking and search the commit |
276 | * root, since its read-only | ||
238 | */ | 277 | */ |
239 | path->skip_locking = 1; | 278 | path->skip_locking = 1; |
240 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | 279 | path->search_commit_root = 1; |
280 | path->reada = 2; | ||
281 | |||
241 | key.objectid = last; | 282 | key.objectid = last; |
242 | key.offset = 0; | 283 | key.offset = 0; |
243 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); | 284 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); |
244 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 285 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); |
245 | if (ret < 0) | 286 | if (ret < 0) |
246 | goto err; | 287 | goto err; |
247 | 288 | ||
248 | while (1) { | 289 | while (1) { |
290 | smp_mb(); | ||
291 | if (block_group->fs_info->closing > 1) { | ||
292 | last = (u64)-1; | ||
293 | break; | ||
294 | } | ||
295 | |||
249 | leaf = path->nodes[0]; | 296 | leaf = path->nodes[0]; |
250 | slot = path->slots[0]; | 297 | slot = path->slots[0]; |
251 | if (slot >= btrfs_header_nritems(leaf)) { | 298 | if (slot >= btrfs_header_nritems(leaf)) { |
252 | ret = btrfs_next_leaf(root, path); | 299 | ret = btrfs_next_leaf(fs_info->extent_root, path); |
253 | if (ret < 0) | 300 | if (ret < 0) |
254 | goto err; | 301 | goto err; |
255 | if (ret == 0) | 302 | else if (ret) |
256 | continue; | ||
257 | else | ||
258 | break; | 303 | break; |
304 | |||
305 | if (need_resched()) { | ||
306 | btrfs_release_path(fs_info->extent_root, path); | ||
307 | up_read(&fs_info->extent_root->commit_root_sem); | ||
308 | cond_resched(); | ||
309 | goto again; | ||
310 | } | ||
311 | |||
312 | continue; | ||
259 | } | 313 | } |
260 | btrfs_item_key_to_cpu(leaf, &key, slot); | 314 | btrfs_item_key_to_cpu(leaf, &key, slot); |
261 | if (key.objectid < block_group->key.objectid) | 315 | if (key.objectid < block_group->key.objectid) |
@@ -266,24 +320,59 @@ static int cache_block_group(struct btrfs_root *root, | |||
266 | break; | 320 | break; |
267 | 321 | ||
268 | if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { | 322 | if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { |
269 | add_new_free_space(block_group, root->fs_info, last, | 323 | total_found += add_new_free_space(block_group, |
270 | key.objectid); | 324 | fs_info, last, |
271 | 325 | key.objectid); | |
272 | last = key.objectid + key.offset; | 326 | last = key.objectid + key.offset; |
273 | } | 327 | } |
328 | |||
329 | if (total_found > (1024 * 1024 * 2)) { | ||
330 | total_found = 0; | ||
331 | wake_up(&block_group->caching_q); | ||
332 | } | ||
274 | next: | 333 | next: |
275 | path->slots[0]++; | 334 | path->slots[0]++; |
276 | } | 335 | } |
336 | ret = 0; | ||
277 | 337 | ||
278 | add_new_free_space(block_group, root->fs_info, last, | 338 | total_found += add_new_free_space(block_group, fs_info, last, |
279 | block_group->key.objectid + | 339 | block_group->key.objectid + |
280 | block_group->key.offset); | 340 | block_group->key.offset); |
341 | |||
342 | spin_lock(&block_group->lock); | ||
343 | block_group->cached = BTRFS_CACHE_FINISHED; | ||
344 | spin_unlock(&block_group->lock); | ||
281 | 345 | ||
282 | block_group->cached = 1; | ||
283 | remove_sb_from_cache(root, block_group); | ||
284 | ret = 0; | ||
285 | err: | 346 | err: |
286 | btrfs_free_path(path); | 347 | btrfs_free_path(path); |
348 | up_read(&fs_info->extent_root->commit_root_sem); | ||
349 | atomic_dec(&block_group->space_info->caching_threads); | ||
350 | wake_up(&block_group->caching_q); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static int cache_block_group(struct btrfs_block_group_cache *cache) | ||
356 | { | ||
357 | struct task_struct *tsk; | ||
358 | int ret = 0; | ||
359 | |||
360 | spin_lock(&cache->lock); | ||
361 | if (cache->cached != BTRFS_CACHE_NO) { | ||
362 | spin_unlock(&cache->lock); | ||
363 | return ret; | ||
364 | } | ||
365 | cache->cached = BTRFS_CACHE_STARTED; | ||
366 | spin_unlock(&cache->lock); | ||
367 | |||
368 | tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", | ||
369 | cache->key.objectid); | ||
370 | if (IS_ERR(tsk)) { | ||
371 | ret = PTR_ERR(tsk); | ||
372 | printk(KERN_ERR "error running thread %d\n", ret); | ||
373 | BUG(); | ||
374 | } | ||
375 | |||
287 | return ret; | 376 | return ret; |
288 | } | 377 | } |
289 | 378 | ||
@@ -2387,13 +2476,29 @@ fail: | |||
2387 | 2476 | ||
2388 | } | 2477 | } |
2389 | 2478 | ||
2479 | static struct btrfs_block_group_cache * | ||
2480 | next_block_group(struct btrfs_root *root, | ||
2481 | struct btrfs_block_group_cache *cache) | ||
2482 | { | ||
2483 | struct rb_node *node; | ||
2484 | spin_lock(&root->fs_info->block_group_cache_lock); | ||
2485 | node = rb_next(&cache->cache_node); | ||
2486 | btrfs_put_block_group(cache); | ||
2487 | if (node) { | ||
2488 | cache = rb_entry(node, struct btrfs_block_group_cache, | ||
2489 | cache_node); | ||
2490 | atomic_inc(&cache->count); | ||
2491 | } else | ||
2492 | cache = NULL; | ||
2493 | spin_unlock(&root->fs_info->block_group_cache_lock); | ||
2494 | return cache; | ||
2495 | } | ||
2496 | |||
2390 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 2497 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
2391 | struct btrfs_root *root) | 2498 | struct btrfs_root *root) |
2392 | { | 2499 | { |
2393 | struct btrfs_block_group_cache *cache, *entry; | 2500 | struct btrfs_block_group_cache *cache; |
2394 | struct rb_node *n; | ||
2395 | int err = 0; | 2501 | int err = 0; |
2396 | int werr = 0; | ||
2397 | struct btrfs_path *path; | 2502 | struct btrfs_path *path; |
2398 | u64 last = 0; | 2503 | u64 last = 0; |
2399 | 2504 | ||
@@ -2402,39 +2507,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | |||
2402 | return -ENOMEM; | 2507 | return -ENOMEM; |
2403 | 2508 | ||
2404 | while (1) { | 2509 | while (1) { |
2405 | cache = NULL; | 2510 | if (last == 0) { |
2406 | spin_lock(&root->fs_info->block_group_cache_lock); | 2511 | err = btrfs_run_delayed_refs(trans, root, |
2407 | for (n = rb_first(&root->fs_info->block_group_cache_tree); | 2512 | (unsigned long)-1); |
2408 | n; n = rb_next(n)) { | 2513 | BUG_ON(err); |
2409 | entry = rb_entry(n, struct btrfs_block_group_cache, | ||
2410 | cache_node); | ||
2411 | if (entry->dirty) { | ||
2412 | cache = entry; | ||
2413 | break; | ||
2414 | } | ||
2415 | } | 2514 | } |
2416 | spin_unlock(&root->fs_info->block_group_cache_lock); | ||
2417 | 2515 | ||
2418 | if (!cache) | 2516 | cache = btrfs_lookup_first_block_group(root->fs_info, last); |
2419 | break; | 2517 | while (cache) { |
2518 | if (cache->dirty) | ||
2519 | break; | ||
2520 | cache = next_block_group(root, cache); | ||
2521 | } | ||
2522 | if (!cache) { | ||
2523 | if (last == 0) | ||
2524 | break; | ||
2525 | last = 0; | ||
2526 | continue; | ||
2527 | } | ||
2420 | 2528 | ||
2421 | cache->dirty = 0; | 2529 | cache->dirty = 0; |
2422 | last += cache->key.offset; | 2530 | last = cache->key.objectid + cache->key.offset; |
2423 | 2531 | ||
2424 | err = write_one_cache_group(trans, root, | 2532 | err = write_one_cache_group(trans, root, path, cache); |
2425 | path, cache); | 2533 | BUG_ON(err); |
2426 | /* | 2534 | btrfs_put_block_group(cache); |
2427 | * if we fail to write the cache group, we want | ||
2428 | * to keep it marked dirty in hopes that a later | ||
2429 | * write will work | ||
2430 | */ | ||
2431 | if (err) { | ||
2432 | werr = err; | ||
2433 | continue; | ||
2434 | } | ||
2435 | } | 2535 | } |
2536 | |||
2436 | btrfs_free_path(path); | 2537 | btrfs_free_path(path); |
2437 | return werr; | 2538 | return 0; |
2438 | } | 2539 | } |
2439 | 2540 | ||
2440 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) | 2541 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) |
@@ -2484,6 +2585,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
2484 | found->force_alloc = 0; | 2585 | found->force_alloc = 0; |
2485 | *space_info = found; | 2586 | *space_info = found; |
2486 | list_add_rcu(&found->list, &info->space_info); | 2587 | list_add_rcu(&found->list, &info->space_info); |
2588 | atomic_set(&found->caching_threads, 0); | ||
2487 | return 0; | 2589 | return 0; |
2488 | } | 2590 | } |
2489 | 2591 | ||
@@ -2947,13 +3049,9 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, | |||
2947 | struct btrfs_block_group_cache *cache; | 3049 | struct btrfs_block_group_cache *cache; |
2948 | struct btrfs_fs_info *fs_info = root->fs_info; | 3050 | struct btrfs_fs_info *fs_info = root->fs_info; |
2949 | 3051 | ||
2950 | if (pin) { | 3052 | if (pin) |
2951 | set_extent_dirty(&fs_info->pinned_extents, | 3053 | set_extent_dirty(&fs_info->pinned_extents, |
2952 | bytenr, bytenr + num - 1, GFP_NOFS); | 3054 | bytenr, bytenr + num - 1, GFP_NOFS); |
2953 | } else { | ||
2954 | clear_extent_dirty(&fs_info->pinned_extents, | ||
2955 | bytenr, bytenr + num - 1, GFP_NOFS); | ||
2956 | } | ||
2957 | 3055 | ||
2958 | while (num > 0) { | 3056 | while (num > 0) { |
2959 | cache = btrfs_lookup_block_group(fs_info, bytenr); | 3057 | cache = btrfs_lookup_block_group(fs_info, bytenr); |
@@ -2969,14 +3067,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, | |||
2969 | spin_unlock(&cache->space_info->lock); | 3067 | spin_unlock(&cache->space_info->lock); |
2970 | fs_info->total_pinned += len; | 3068 | fs_info->total_pinned += len; |
2971 | } else { | 3069 | } else { |
3070 | int unpin = 0; | ||
3071 | |||
3072 | /* | ||
3073 | * in order to not race with the block group caching, we | ||
3074 | * only want to unpin the extent if we are cached. If | ||
3075 | * we aren't cached, we want to start async caching this | ||
3076 | * block group so we can free the extent the next time | ||
3077 | * around. | ||
3078 | */ | ||
2972 | spin_lock(&cache->space_info->lock); | 3079 | spin_lock(&cache->space_info->lock); |
2973 | spin_lock(&cache->lock); | 3080 | spin_lock(&cache->lock); |
2974 | cache->pinned -= len; | 3081 | unpin = (cache->cached == BTRFS_CACHE_FINISHED); |
2975 | cache->space_info->bytes_pinned -= len; | 3082 | if (likely(unpin)) { |
3083 | cache->pinned -= len; | ||
3084 | cache->space_info->bytes_pinned -= len; | ||
3085 | fs_info->total_pinned -= len; | ||
3086 | } | ||
2976 | spin_unlock(&cache->lock); | 3087 | spin_unlock(&cache->lock); |
2977 | spin_unlock(&cache->space_info->lock); | 3088 | spin_unlock(&cache->space_info->lock); |
2978 | fs_info->total_pinned -= len; | 3089 | |
2979 | if (cache->cached) | 3090 | if (likely(unpin)) |
3091 | clear_extent_dirty(&fs_info->pinned_extents, | ||
3092 | bytenr, bytenr + len -1, | ||
3093 | GFP_NOFS); | ||
3094 | else | ||
3095 | cache_block_group(cache); | ||
3096 | |||
3097 | if (unpin) | ||
2980 | btrfs_add_free_space(cache, bytenr, len); | 3098 | btrfs_add_free_space(cache, bytenr, len); |
2981 | } | 3099 | } |
2982 | btrfs_put_block_group(cache); | 3100 | btrfs_put_block_group(cache); |
@@ -3030,6 +3148,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) | |||
3030 | &start, &end, EXTENT_DIRTY); | 3148 | &start, &end, EXTENT_DIRTY); |
3031 | if (ret) | 3149 | if (ret) |
3032 | break; | 3150 | break; |
3151 | |||
3033 | set_extent_dirty(copy, start, end, GFP_NOFS); | 3152 | set_extent_dirty(copy, start, end, GFP_NOFS); |
3034 | last = end + 1; | 3153 | last = end + 1; |
3035 | } | 3154 | } |
@@ -3058,6 +3177,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
3058 | 3177 | ||
3059 | cond_resched(); | 3178 | cond_resched(); |
3060 | } | 3179 | } |
3180 | |||
3061 | return ret; | 3181 | return ret; |
3062 | } | 3182 | } |
3063 | 3183 | ||
@@ -3436,6 +3556,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val) | |||
3436 | } | 3556 | } |
3437 | 3557 | ||
3438 | /* | 3558 | /* |
3559 | * when we wait for progress in the block group caching, its because | ||
3560 | * our allocation attempt failed at least once. So, we must sleep | ||
3561 | * and let some progress happen before we try again. | ||
3562 | * | ||
3563 | * This function will sleep at least once waiting for new free space to | ||
3564 | * show up, and then it will check the block group free space numbers | ||
3565 | * for our min num_bytes. Another option is to have it go ahead | ||
3566 | * and look in the rbtree for a free extent of a given size, but this | ||
3567 | * is a good start. | ||
3568 | */ | ||
3569 | static noinline int | ||
3570 | wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, | ||
3571 | u64 num_bytes) | ||
3572 | { | ||
3573 | DEFINE_WAIT(wait); | ||
3574 | |||
3575 | prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE); | ||
3576 | |||
3577 | if (block_group_cache_done(cache)) { | ||
3578 | finish_wait(&cache->caching_q, &wait); | ||
3579 | return 0; | ||
3580 | } | ||
3581 | schedule(); | ||
3582 | finish_wait(&cache->caching_q, &wait); | ||
3583 | |||
3584 | wait_event(cache->caching_q, block_group_cache_done(cache) || | ||
3585 | (cache->free_space >= num_bytes)); | ||
3586 | return 0; | ||
3587 | } | ||
3588 | |||
3589 | enum btrfs_loop_type { | ||
3590 | LOOP_CACHED_ONLY = 0, | ||
3591 | LOOP_CACHING_NOWAIT = 1, | ||
3592 | LOOP_CACHING_WAIT = 2, | ||
3593 | LOOP_ALLOC_CHUNK = 3, | ||
3594 | LOOP_NO_EMPTY_SIZE = 4, | ||
3595 | }; | ||
3596 | |||
3597 | /* | ||
3439 | * walks the btree of allocated extents and find a hole of a given size. | 3598 | * walks the btree of allocated extents and find a hole of a given size. |
3440 | * The key ins is changed to record the hole: | 3599 | * The key ins is changed to record the hole: |
3441 | * ins->objectid == block start | 3600 | * ins->objectid == block start |
@@ -3460,6 +3619,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
3460 | struct btrfs_space_info *space_info; | 3619 | struct btrfs_space_info *space_info; |
3461 | int last_ptr_loop = 0; | 3620 | int last_ptr_loop = 0; |
3462 | int loop = 0; | 3621 | int loop = 0; |
3622 | bool found_uncached_bg = false; | ||
3463 | 3623 | ||
3464 | WARN_ON(num_bytes < root->sectorsize); | 3624 | WARN_ON(num_bytes < root->sectorsize); |
3465 | btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); | 3625 | btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); |
@@ -3491,15 +3651,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
3491 | search_start = max(search_start, first_logical_byte(root, 0)); | 3651 | search_start = max(search_start, first_logical_byte(root, 0)); |
3492 | search_start = max(search_start, hint_byte); | 3652 | search_start = max(search_start, hint_byte); |
3493 | 3653 | ||
3494 | if (!last_ptr) { | 3654 | if (!last_ptr) |
3495 | empty_cluster = 0; | 3655 | empty_cluster = 0; |
3496 | loop = 1; | ||
3497 | } | ||
3498 | 3656 | ||
3499 | if (search_start == hint_byte) { | 3657 | if (search_start == hint_byte) { |
3500 | block_group = btrfs_lookup_block_group(root->fs_info, | 3658 | block_group = btrfs_lookup_block_group(root->fs_info, |
3501 | search_start); | 3659 | search_start); |
3502 | if (block_group && block_group_bits(block_group, data)) { | 3660 | /* |
3661 | * we don't want to use the block group if it doesn't match our | ||
3662 | * allocation bits, or if its not cached. | ||
3663 | */ | ||
3664 | if (block_group && block_group_bits(block_group, data) && | ||
3665 | block_group_cache_done(block_group)) { | ||
3503 | down_read(&space_info->groups_sem); | 3666 | down_read(&space_info->groups_sem); |
3504 | if (list_empty(&block_group->list) || | 3667 | if (list_empty(&block_group->list) || |
3505 | block_group->ro) { | 3668 | block_group->ro) { |
@@ -3522,21 +3685,35 @@ search: | |||
3522 | down_read(&space_info->groups_sem); | 3685 | down_read(&space_info->groups_sem); |
3523 | list_for_each_entry(block_group, &space_info->block_groups, list) { | 3686 | list_for_each_entry(block_group, &space_info->block_groups, list) { |
3524 | u64 offset; | 3687 | u64 offset; |
3688 | int cached; | ||
3525 | 3689 | ||
3526 | atomic_inc(&block_group->count); | 3690 | atomic_inc(&block_group->count); |
3527 | search_start = block_group->key.objectid; | 3691 | search_start = block_group->key.objectid; |
3528 | 3692 | ||
3529 | have_block_group: | 3693 | have_block_group: |
3530 | if (unlikely(!block_group->cached)) { | 3694 | if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { |
3531 | mutex_lock(&block_group->cache_mutex); | 3695 | /* |
3532 | ret = cache_block_group(root, block_group); | 3696 | * we want to start caching kthreads, but not too many |
3533 | mutex_unlock(&block_group->cache_mutex); | 3697 | * right off the bat so we don't overwhelm the system, |
3534 | if (ret) { | 3698 | * so only start them if there are less than 2 and we're |
3535 | btrfs_put_block_group(block_group); | 3699 | * in the initial allocation phase. |
3536 | break; | 3700 | */ |
3701 | if (loop > LOOP_CACHING_NOWAIT || | ||
3702 | atomic_read(&space_info->caching_threads) < 2) { | ||
3703 | ret = cache_block_group(block_group); | ||
3704 | BUG_ON(ret); | ||
3537 | } | 3705 | } |
3538 | } | 3706 | } |
3539 | 3707 | ||
3708 | cached = block_group_cache_done(block_group); | ||
3709 | if (unlikely(!cached)) { | ||
3710 | found_uncached_bg = true; | ||
3711 | |||
3712 | /* if we only want cached bgs, loop */ | ||
3713 | if (loop == LOOP_CACHED_ONLY) | ||
3714 | goto loop; | ||
3715 | } | ||
3716 | |||
3540 | if (unlikely(block_group->ro)) | 3717 | if (unlikely(block_group->ro)) |
3541 | goto loop; | 3718 | goto loop; |
3542 | 3719 | ||
@@ -3615,14 +3792,21 @@ refill_cluster: | |||
3615 | spin_unlock(&last_ptr->refill_lock); | 3792 | spin_unlock(&last_ptr->refill_lock); |
3616 | goto checks; | 3793 | goto checks; |
3617 | } | 3794 | } |
3795 | } else if (!cached && loop > LOOP_CACHING_NOWAIT) { | ||
3796 | spin_unlock(&last_ptr->refill_lock); | ||
3797 | |||
3798 | wait_block_group_cache_progress(block_group, | ||
3799 | num_bytes + empty_cluster + empty_size); | ||
3800 | goto have_block_group; | ||
3618 | } | 3801 | } |
3802 | |||
3619 | /* | 3803 | /* |
3620 | * at this point we either didn't find a cluster | 3804 | * at this point we either didn't find a cluster |
3621 | * or we weren't able to allocate a block from our | 3805 | * or we weren't able to allocate a block from our |
3622 | * cluster. Free the cluster we've been trying | 3806 | * cluster. Free the cluster we've been trying |
3623 | * to use, and go to the next block group | 3807 | * to use, and go to the next block group |
3624 | */ | 3808 | */ |
3625 | if (loop < 2) { | 3809 | if (loop < LOOP_NO_EMPTY_SIZE) { |
3626 | btrfs_return_cluster_to_free_space(NULL, | 3810 | btrfs_return_cluster_to_free_space(NULL, |
3627 | last_ptr); | 3811 | last_ptr); |
3628 | spin_unlock(&last_ptr->refill_lock); | 3812 | spin_unlock(&last_ptr->refill_lock); |
@@ -3633,11 +3817,17 @@ refill_cluster: | |||
3633 | 3817 | ||
3634 | offset = btrfs_find_space_for_alloc(block_group, search_start, | 3818 | offset = btrfs_find_space_for_alloc(block_group, search_start, |
3635 | num_bytes, empty_size); | 3819 | num_bytes, empty_size); |
3636 | if (!offset) | 3820 | if (!offset && (cached || (!cached && |
3821 | loop == LOOP_CACHING_NOWAIT))) { | ||
3637 | goto loop; | 3822 | goto loop; |
3823 | } else if (!offset && (!cached && | ||
3824 | loop > LOOP_CACHING_NOWAIT)) { | ||
3825 | wait_block_group_cache_progress(block_group, | ||
3826 | num_bytes + empty_size); | ||
3827 | goto have_block_group; | ||
3828 | } | ||
3638 | checks: | 3829 | checks: |
3639 | search_start = stripe_align(root, offset); | 3830 | search_start = stripe_align(root, offset); |
3640 | |||
3641 | /* move on to the next group */ | 3831 | /* move on to the next group */ |
3642 | if (search_start + num_bytes >= search_end) { | 3832 | if (search_start + num_bytes >= search_end) { |
3643 | btrfs_add_free_space(block_group, offset, num_bytes); | 3833 | btrfs_add_free_space(block_group, offset, num_bytes); |
@@ -3683,13 +3873,26 @@ loop: | |||
3683 | } | 3873 | } |
3684 | up_read(&space_info->groups_sem); | 3874 | up_read(&space_info->groups_sem); |
3685 | 3875 | ||
3686 | /* loop == 0, try to find a clustered alloc in every block group | 3876 | /* LOOP_CACHED_ONLY, only search fully cached block groups |
3687 | * loop == 1, try again after forcing a chunk allocation | 3877 | * LOOP_CACHING_NOWAIT, search partially cached block groups, but |
3688 | * loop == 2, set empty_size and empty_cluster to 0 and try again | 3878 | * dont wait foR them to finish caching |
3879 | * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching | ||
3880 | * LOOP_ALLOC_CHUNK, force a chunk allocation and try again | ||
3881 | * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try | ||
3882 | * again | ||
3689 | */ | 3883 | */ |
3690 | if (!ins->objectid && loop < 3 && | 3884 | if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && |
3691 | (empty_size || empty_cluster || allowed_chunk_alloc)) { | 3885 | (found_uncached_bg || empty_size || empty_cluster || |
3692 | if (loop >= 2) { | 3886 | allowed_chunk_alloc)) { |
3887 | if (found_uncached_bg) { | ||
3888 | found_uncached_bg = false; | ||
3889 | if (loop < LOOP_CACHING_WAIT) { | ||
3890 | loop++; | ||
3891 | goto search; | ||
3892 | } | ||
3893 | } | ||
3894 | |||
3895 | if (loop == LOOP_ALLOC_CHUNK) { | ||
3693 | empty_size = 0; | 3896 | empty_size = 0; |
3694 | empty_cluster = 0; | 3897 | empty_cluster = 0; |
3695 | } | 3898 | } |
@@ -3702,7 +3905,7 @@ loop: | |||
3702 | space_info->force_alloc = 1; | 3905 | space_info->force_alloc = 1; |
3703 | } | 3906 | } |
3704 | 3907 | ||
3705 | if (loop < 3) { | 3908 | if (loop < LOOP_NO_EMPTY_SIZE) { |
3706 | loop++; | 3909 | loop++; |
3707 | goto search; | 3910 | goto search; |
3708 | } | 3911 | } |
@@ -3798,7 +4001,7 @@ again: | |||
3798 | num_bytes, data, 1); | 4001 | num_bytes, data, 1); |
3799 | goto again; | 4002 | goto again; |
3800 | } | 4003 | } |
3801 | if (ret) { | 4004 | if (ret == -ENOSPC) { |
3802 | struct btrfs_space_info *sinfo; | 4005 | struct btrfs_space_info *sinfo; |
3803 | 4006 | ||
3804 | sinfo = __find_space_info(root->fs_info, data); | 4007 | sinfo = __find_space_info(root->fs_info, data); |
@@ -3806,7 +4009,6 @@ again: | |||
3806 | "wanted %llu\n", (unsigned long long)data, | 4009 | "wanted %llu\n", (unsigned long long)data, |
3807 | (unsigned long long)num_bytes); | 4010 | (unsigned long long)num_bytes); |
3808 | dump_space_info(sinfo, num_bytes); | 4011 | dump_space_info(sinfo, num_bytes); |
3809 | BUG(); | ||
3810 | } | 4012 | } |
3811 | 4013 | ||
3812 | return ret; | 4014 | return ret; |
@@ -3844,7 +4046,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, | |||
3844 | ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, | 4046 | ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, |
3845 | empty_size, hint_byte, search_end, ins, | 4047 | empty_size, hint_byte, search_end, ins, |
3846 | data); | 4048 | data); |
3847 | update_reserved_extents(root, ins->objectid, ins->offset, 1); | 4049 | if (!ret) |
4050 | update_reserved_extents(root, ins->objectid, ins->offset, 1); | ||
4051 | |||
3848 | return ret; | 4052 | return ret; |
3849 | } | 4053 | } |
3850 | 4054 | ||
@@ -4006,9 +4210,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, | |||
4006 | struct btrfs_block_group_cache *block_group; | 4210 | struct btrfs_block_group_cache *block_group; |
4007 | 4211 | ||
4008 | block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); | 4212 | block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); |
4009 | mutex_lock(&block_group->cache_mutex); | 4213 | cache_block_group(block_group); |
4010 | cache_block_group(root, block_group); | 4214 | wait_event(block_group->caching_q, |
4011 | mutex_unlock(&block_group->cache_mutex); | 4215 | block_group_cache_done(block_group)); |
4012 | 4216 | ||
4013 | ret = btrfs_remove_free_space(block_group, ins->objectid, | 4217 | ret = btrfs_remove_free_space(block_group, ins->objectid, |
4014 | ins->offset); | 4218 | ins->offset); |
@@ -4039,7 +4243,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans, | |||
4039 | ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, | 4243 | ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, |
4040 | empty_size, hint_byte, search_end, | 4244 | empty_size, hint_byte, search_end, |
4041 | ins, 0); | 4245 | ins, 0); |
4042 | BUG_ON(ret); | 4246 | if (ret) |
4247 | return ret; | ||
4043 | 4248 | ||
4044 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { | 4249 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { |
4045 | if (parent == 0) | 4250 | if (parent == 0) |
@@ -6955,11 +7160,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
6955 | &info->block_group_cache_tree); | 7160 | &info->block_group_cache_tree); |
6956 | spin_unlock(&info->block_group_cache_lock); | 7161 | spin_unlock(&info->block_group_cache_lock); |
6957 | 7162 | ||
6958 | btrfs_remove_free_space_cache(block_group); | ||
6959 | down_write(&block_group->space_info->groups_sem); | 7163 | down_write(&block_group->space_info->groups_sem); |
6960 | list_del(&block_group->list); | 7164 | list_del(&block_group->list); |
6961 | up_write(&block_group->space_info->groups_sem); | 7165 | up_write(&block_group->space_info->groups_sem); |
6962 | 7166 | ||
7167 | if (block_group->cached == BTRFS_CACHE_STARTED) | ||
7168 | wait_event(block_group->caching_q, | ||
7169 | block_group_cache_done(block_group)); | ||
7170 | |||
7171 | btrfs_remove_free_space_cache(block_group); | ||
7172 | |||
6963 | WARN_ON(atomic_read(&block_group->count) != 1); | 7173 | WARN_ON(atomic_read(&block_group->count) != 1); |
6964 | kfree(block_group); | 7174 | kfree(block_group); |
6965 | 7175 | ||
@@ -7025,9 +7235,19 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7025 | atomic_set(&cache->count, 1); | 7235 | atomic_set(&cache->count, 1); |
7026 | spin_lock_init(&cache->lock); | 7236 | spin_lock_init(&cache->lock); |
7027 | spin_lock_init(&cache->tree_lock); | 7237 | spin_lock_init(&cache->tree_lock); |
7028 | mutex_init(&cache->cache_mutex); | 7238 | cache->fs_info = info; |
7239 | init_waitqueue_head(&cache->caching_q); | ||
7029 | INIT_LIST_HEAD(&cache->list); | 7240 | INIT_LIST_HEAD(&cache->list); |
7030 | INIT_LIST_HEAD(&cache->cluster_list); | 7241 | INIT_LIST_HEAD(&cache->cluster_list); |
7242 | |||
7243 | /* | ||
7244 | * we only want to have 32k of ram per block group for keeping | ||
7245 | * track of free space, and if we pass 1/2 of that we want to | ||
7246 | * start converting things over to using bitmaps | ||
7247 | */ | ||
7248 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
7249 | sizeof(struct btrfs_free_space); | ||
7250 | |||
7031 | read_extent_buffer(leaf, &cache->item, | 7251 | read_extent_buffer(leaf, &cache->item, |
7032 | btrfs_item_ptr_offset(leaf, path->slots[0]), | 7252 | btrfs_item_ptr_offset(leaf, path->slots[0]), |
7033 | sizeof(cache->item)); | 7253 | sizeof(cache->item)); |
@@ -7036,6 +7256,26 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7036 | key.objectid = found_key.objectid + found_key.offset; | 7256 | key.objectid = found_key.objectid + found_key.offset; |
7037 | btrfs_release_path(root, path); | 7257 | btrfs_release_path(root, path); |
7038 | cache->flags = btrfs_block_group_flags(&cache->item); | 7258 | cache->flags = btrfs_block_group_flags(&cache->item); |
7259 | cache->sectorsize = root->sectorsize; | ||
7260 | |||
7261 | remove_sb_from_cache(root, cache); | ||
7262 | |||
7263 | /* | ||
7264 | * check for two cases, either we are full, and therefore | ||
7265 | * don't need to bother with the caching work since we won't | ||
7266 | * find any space, or we are empty, and we can just add all | ||
7267 | * the space in and be done with it. This saves us _alot_ of | ||
7268 | * time, particularly in the full case. | ||
7269 | */ | ||
7270 | if (found_key.offset == btrfs_block_group_used(&cache->item)) { | ||
7271 | cache->cached = BTRFS_CACHE_FINISHED; | ||
7272 | } else if (btrfs_block_group_used(&cache->item) == 0) { | ||
7273 | cache->cached = BTRFS_CACHE_FINISHED; | ||
7274 | add_new_free_space(cache, root->fs_info, | ||
7275 | found_key.objectid, | ||
7276 | found_key.objectid + | ||
7277 | found_key.offset); | ||
7278 | } | ||
7039 | 7279 | ||
7040 | ret = update_space_info(info, cache->flags, found_key.offset, | 7280 | ret = update_space_info(info, cache->flags, found_key.offset, |
7041 | btrfs_block_group_used(&cache->item), | 7281 | btrfs_block_group_used(&cache->item), |
@@ -7079,10 +7319,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
7079 | cache->key.objectid = chunk_offset; | 7319 | cache->key.objectid = chunk_offset; |
7080 | cache->key.offset = size; | 7320 | cache->key.offset = size; |
7081 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | 7321 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; |
7322 | cache->sectorsize = root->sectorsize; | ||
7323 | |||
7324 | /* | ||
7325 | * we only want to have 32k of ram per block group for keeping track | ||
7326 | * of free space, and if we pass 1/2 of that we want to start | ||
7327 | * converting things over to using bitmaps | ||
7328 | */ | ||
7329 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
7330 | sizeof(struct btrfs_free_space); | ||
7082 | atomic_set(&cache->count, 1); | 7331 | atomic_set(&cache->count, 1); |
7083 | spin_lock_init(&cache->lock); | 7332 | spin_lock_init(&cache->lock); |
7084 | spin_lock_init(&cache->tree_lock); | 7333 | spin_lock_init(&cache->tree_lock); |
7085 | mutex_init(&cache->cache_mutex); | 7334 | init_waitqueue_head(&cache->caching_q); |
7086 | INIT_LIST_HEAD(&cache->list); | 7335 | INIT_LIST_HEAD(&cache->list); |
7087 | INIT_LIST_HEAD(&cache->cluster_list); | 7336 | INIT_LIST_HEAD(&cache->cluster_list); |
7088 | 7337 | ||
@@ -7091,6 +7340,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
7091 | cache->flags = type; | 7340 | cache->flags = type; |
7092 | btrfs_set_block_group_flags(&cache->item, type); | 7341 | btrfs_set_block_group_flags(&cache->item, type); |
7093 | 7342 | ||
7343 | cache->cached = BTRFS_CACHE_FINISHED; | ||
7344 | remove_sb_from_cache(root, cache); | ||
7345 | |||
7346 | add_new_free_space(cache, root->fs_info, chunk_offset, | ||
7347 | chunk_offset + size); | ||
7348 | |||
7094 | ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, | 7349 | ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, |
7095 | &cache->space_info); | 7350 | &cache->space_info); |
7096 | BUG_ON(ret); | 7351 | BUG_ON(ret); |
@@ -7149,7 +7404,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
7149 | rb_erase(&block_group->cache_node, | 7404 | rb_erase(&block_group->cache_node, |
7150 | &root->fs_info->block_group_cache_tree); | 7405 | &root->fs_info->block_group_cache_tree); |
7151 | spin_unlock(&root->fs_info->block_group_cache_lock); | 7406 | spin_unlock(&root->fs_info->block_group_cache_lock); |
7152 | btrfs_remove_free_space_cache(block_group); | 7407 | |
7153 | down_write(&block_group->space_info->groups_sem); | 7408 | down_write(&block_group->space_info->groups_sem); |
7154 | /* | 7409 | /* |
7155 | * we must use list_del_init so people can check to see if they | 7410 | * we must use list_del_init so people can check to see if they |
@@ -7158,11 +7413,18 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
7158 | list_del_init(&block_group->list); | 7413 | list_del_init(&block_group->list); |
7159 | up_write(&block_group->space_info->groups_sem); | 7414 | up_write(&block_group->space_info->groups_sem); |
7160 | 7415 | ||
7416 | if (block_group->cached == BTRFS_CACHE_STARTED) | ||
7417 | wait_event(block_group->caching_q, | ||
7418 | block_group_cache_done(block_group)); | ||
7419 | |||
7420 | btrfs_remove_free_space_cache(block_group); | ||
7421 | |||
7161 | spin_lock(&block_group->space_info->lock); | 7422 | spin_lock(&block_group->space_info->lock); |
7162 | block_group->space_info->total_bytes -= block_group->key.offset; | 7423 | block_group->space_info->total_bytes -= block_group->key.offset; |
7163 | block_group->space_info->bytes_readonly -= block_group->key.offset; | 7424 | block_group->space_info->bytes_readonly -= block_group->key.offset; |
7164 | spin_unlock(&block_group->space_info->lock); | 7425 | spin_unlock(&block_group->space_info->lock); |
7165 | block_group->space_info->full = 0; | 7426 | |
7427 | btrfs_clear_space_info_full(root->fs_info); | ||
7166 | 7428 | ||
7167 | btrfs_put_block_group(block_group); | 7429 | btrfs_put_block_group(block_group); |
7168 | btrfs_put_block_group(block_group); | 7430 | btrfs_put_block_group(block_group); |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 4538e48581a5..af99b78b288e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -16,45 +16,46 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/pagemap.h> | ||
19 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/math64.h> | ||
20 | #include "ctree.h" | 22 | #include "ctree.h" |
21 | #include "free-space-cache.h" | 23 | #include "free-space-cache.h" |
22 | #include "transaction.h" | 24 | #include "transaction.h" |
23 | 25 | ||
24 | struct btrfs_free_space { | 26 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
25 | struct rb_node bytes_index; | 27 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) |
26 | struct rb_node offset_index; | ||
27 | u64 offset; | ||
28 | u64 bytes; | ||
29 | }; | ||
30 | 28 | ||
31 | static int tree_insert_offset(struct rb_root *root, u64 offset, | 29 | static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, |
32 | struct rb_node *node) | 30 | u64 offset) |
33 | { | 31 | { |
34 | struct rb_node **p = &root->rb_node; | 32 | BUG_ON(offset < bitmap_start); |
35 | struct rb_node *parent = NULL; | 33 | offset -= bitmap_start; |
36 | struct btrfs_free_space *info; | 34 | return (unsigned long)(div64_u64(offset, sectorsize)); |
35 | } | ||
37 | 36 | ||
38 | while (*p) { | 37 | static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) |
39 | parent = *p; | 38 | { |
40 | info = rb_entry(parent, struct btrfs_free_space, offset_index); | 39 | return (unsigned long)(div64_u64(bytes, sectorsize)); |
40 | } | ||
41 | 41 | ||
42 | if (offset < info->offset) | 42 | static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, |
43 | p = &(*p)->rb_left; | 43 | u64 offset) |
44 | else if (offset > info->offset) | 44 | { |
45 | p = &(*p)->rb_right; | 45 | u64 bitmap_start; |
46 | else | 46 | u64 bytes_per_bitmap; |
47 | return -EEXIST; | ||
48 | } | ||
49 | 47 | ||
50 | rb_link_node(node, parent, p); | 48 | bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; |
51 | rb_insert_color(node, root); | 49 | bitmap_start = offset - block_group->key.objectid; |
50 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); | ||
51 | bitmap_start *= bytes_per_bitmap; | ||
52 | bitmap_start += block_group->key.objectid; | ||
52 | 53 | ||
53 | return 0; | 54 | return bitmap_start; |
54 | } | 55 | } |
55 | 56 | ||
56 | static int tree_insert_bytes(struct rb_root *root, u64 bytes, | 57 | static int tree_insert_offset(struct rb_root *root, u64 offset, |
57 | struct rb_node *node) | 58 | struct rb_node *node, int bitmap) |
58 | { | 59 | { |
59 | struct rb_node **p = &root->rb_node; | 60 | struct rb_node **p = &root->rb_node; |
60 | struct rb_node *parent = NULL; | 61 | struct rb_node *parent = NULL; |
@@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes, | |||
62 | 63 | ||
63 | while (*p) { | 64 | while (*p) { |
64 | parent = *p; | 65 | parent = *p; |
65 | info = rb_entry(parent, struct btrfs_free_space, bytes_index); | 66 | info = rb_entry(parent, struct btrfs_free_space, offset_index); |
66 | 67 | ||
67 | if (bytes < info->bytes) | 68 | if (offset < info->offset) { |
68 | p = &(*p)->rb_left; | 69 | p = &(*p)->rb_left; |
69 | else | 70 | } else if (offset > info->offset) { |
70 | p = &(*p)->rb_right; | 71 | p = &(*p)->rb_right; |
72 | } else { | ||
73 | /* | ||
74 | * we could have a bitmap entry and an extent entry | ||
75 | * share the same offset. If this is the case, we want | ||
76 | * the extent entry to always be found first if we do a | ||
77 | * linear search through the tree, since we want to have | ||
78 | * the quickest allocation time, and allocating from an | ||
79 | * extent is faster than allocating from a bitmap. So | ||
80 | * if we're inserting a bitmap and we find an entry at | ||
81 | * this offset, we want to go right, or after this entry | ||
82 | * logically. If we are inserting an extent and we've | ||
83 | * found a bitmap, we want to go left, or before | ||
84 | * logically. | ||
85 | */ | ||
86 | if (bitmap) { | ||
87 | WARN_ON(info->bitmap); | ||
88 | p = &(*p)->rb_right; | ||
89 | } else { | ||
90 | WARN_ON(!info->bitmap); | ||
91 | p = &(*p)->rb_left; | ||
92 | } | ||
93 | } | ||
71 | } | 94 | } |
72 | 95 | ||
73 | rb_link_node(node, parent, p); | 96 | rb_link_node(node, parent, p); |
@@ -79,110 +102,143 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes, | |||
79 | /* | 102 | /* |
80 | * searches the tree for the given offset. | 103 | * searches the tree for the given offset. |
81 | * | 104 | * |
82 | * fuzzy == 1: this is used for allocations where we are given a hint of where | 105 | * fuzzy - If this is set, then we are trying to make an allocation, and we just |
83 | * to look for free space. Because the hint may not be completely on an offset | 106 | * want a section that has at least bytes size and comes at or after the given |
84 | * mark, or the hint may no longer point to free space we need to fudge our | 107 | * offset. |
85 | * results a bit. So we look for free space starting at or after offset with at | ||
86 | * least bytes size. We prefer to find as close to the given offset as we can. | ||
87 | * Also if the offset is within a free space range, then we will return the free | ||
88 | * space that contains the given offset, which means we can return a free space | ||
89 | * chunk with an offset before the provided offset. | ||
90 | * | ||
91 | * fuzzy == 0: this is just a normal tree search. Give us the free space that | ||
92 | * starts at the given offset which is at least bytes size, and if its not there | ||
93 | * return NULL. | ||
94 | */ | 108 | */ |
95 | static struct btrfs_free_space *tree_search_offset(struct rb_root *root, | 109 | static struct btrfs_free_space * |
96 | u64 offset, u64 bytes, | 110 | tree_search_offset(struct btrfs_block_group_cache *block_group, |
97 | int fuzzy) | 111 | u64 offset, int bitmap_only, int fuzzy) |
98 | { | 112 | { |
99 | struct rb_node *n = root->rb_node; | 113 | struct rb_node *n = block_group->free_space_offset.rb_node; |
100 | struct btrfs_free_space *entry, *ret = NULL; | 114 | struct btrfs_free_space *entry, *prev = NULL; |
115 | |||
116 | /* find entry that is closest to the 'offset' */ | ||
117 | while (1) { | ||
118 | if (!n) { | ||
119 | entry = NULL; | ||
120 | break; | ||
121 | } | ||
101 | 122 | ||
102 | while (n) { | ||
103 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | 123 | entry = rb_entry(n, struct btrfs_free_space, offset_index); |
124 | prev = entry; | ||
104 | 125 | ||
105 | if (offset < entry->offset) { | 126 | if (offset < entry->offset) |
106 | if (fuzzy && | ||
107 | (!ret || entry->offset < ret->offset) && | ||
108 | (bytes <= entry->bytes)) | ||
109 | ret = entry; | ||
110 | n = n->rb_left; | 127 | n = n->rb_left; |
111 | } else if (offset > entry->offset) { | 128 | else if (offset > entry->offset) |
112 | if (fuzzy && | ||
113 | (entry->offset + entry->bytes - 1) >= offset && | ||
114 | bytes <= entry->bytes) { | ||
115 | ret = entry; | ||
116 | break; | ||
117 | } | ||
118 | n = n->rb_right; | 129 | n = n->rb_right; |
119 | } else { | 130 | else |
120 | if (bytes > entry->bytes) { | ||
121 | n = n->rb_right; | ||
122 | continue; | ||
123 | } | ||
124 | ret = entry; | ||
125 | break; | 131 | break; |
126 | } | ||
127 | } | 132 | } |
128 | 133 | ||
129 | return ret; | 134 | if (bitmap_only) { |
130 | } | 135 | if (!entry) |
131 | 136 | return NULL; | |
132 | /* | 137 | if (entry->bitmap) |
133 | * return a chunk at least bytes size, as close to offset that we can get. | 138 | return entry; |
134 | */ | ||
135 | static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, | ||
136 | u64 offset, u64 bytes) | ||
137 | { | ||
138 | struct rb_node *n = root->rb_node; | ||
139 | struct btrfs_free_space *entry, *ret = NULL; | ||
140 | 139 | ||
141 | while (n) { | 140 | /* |
142 | entry = rb_entry(n, struct btrfs_free_space, bytes_index); | 141 | * bitmap entry and extent entry may share same offset, |
142 | * in that case, bitmap entry comes after extent entry. | ||
143 | */ | ||
144 | n = rb_next(n); | ||
145 | if (!n) | ||
146 | return NULL; | ||
147 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | ||
148 | if (entry->offset != offset) | ||
149 | return NULL; | ||
143 | 150 | ||
144 | if (bytes < entry->bytes) { | 151 | WARN_ON(!entry->bitmap); |
152 | return entry; | ||
153 | } else if (entry) { | ||
154 | if (entry->bitmap) { | ||
145 | /* | 155 | /* |
146 | * We prefer to get a hole size as close to the size we | 156 | * if previous extent entry covers the offset, |
147 | * are asking for so we don't take small slivers out of | 157 | * we should return it instead of the bitmap entry |
148 | * huge holes, but we also want to get as close to the | ||
149 | * offset as possible so we don't have a whole lot of | ||
150 | * fragmentation. | ||
151 | */ | 158 | */ |
152 | if (offset <= entry->offset) { | 159 | n = &entry->offset_index; |
153 | if (!ret) | 160 | while (1) { |
154 | ret = entry; | 161 | n = rb_prev(n); |
155 | else if (entry->bytes < ret->bytes) | 162 | if (!n) |
156 | ret = entry; | 163 | break; |
157 | else if (entry->offset < ret->offset) | 164 | prev = rb_entry(n, struct btrfs_free_space, |
158 | ret = entry; | 165 | offset_index); |
166 | if (!prev->bitmap) { | ||
167 | if (prev->offset + prev->bytes > offset) | ||
168 | entry = prev; | ||
169 | break; | ||
170 | } | ||
159 | } | 171 | } |
160 | n = n->rb_left; | 172 | } |
161 | } else if (bytes > entry->bytes) { | 173 | return entry; |
162 | n = n->rb_right; | 174 | } |
175 | |||
176 | if (!prev) | ||
177 | return NULL; | ||
178 | |||
179 | /* find last entry before the 'offset' */ | ||
180 | entry = prev; | ||
181 | if (entry->offset > offset) { | ||
182 | n = rb_prev(&entry->offset_index); | ||
183 | if (n) { | ||
184 | entry = rb_entry(n, struct btrfs_free_space, | ||
185 | offset_index); | ||
186 | BUG_ON(entry->offset > offset); | ||
163 | } else { | 187 | } else { |
164 | /* | 188 | if (fuzzy) |
165 | * Ok we may have multiple chunks of the wanted size, | 189 | return entry; |
166 | * so we don't want to take the first one we find, we | 190 | else |
167 | * want to take the one closest to our given offset, so | 191 | return NULL; |
168 | * keep searching just in case theres a better match. | ||
169 | */ | ||
170 | n = n->rb_right; | ||
171 | if (offset > entry->offset) | ||
172 | continue; | ||
173 | else if (!ret || entry->offset < ret->offset) | ||
174 | ret = entry; | ||
175 | } | 192 | } |
176 | } | 193 | } |
177 | 194 | ||
178 | return ret; | 195 | if (entry->bitmap) { |
196 | n = &entry->offset_index; | ||
197 | while (1) { | ||
198 | n = rb_prev(n); | ||
199 | if (!n) | ||
200 | break; | ||
201 | prev = rb_entry(n, struct btrfs_free_space, | ||
202 | offset_index); | ||
203 | if (!prev->bitmap) { | ||
204 | if (prev->offset + prev->bytes > offset) | ||
205 | return prev; | ||
206 | break; | ||
207 | } | ||
208 | } | ||
209 | if (entry->offset + BITS_PER_BITMAP * | ||
210 | block_group->sectorsize > offset) | ||
211 | return entry; | ||
212 | } else if (entry->offset + entry->bytes > offset) | ||
213 | return entry; | ||
214 | |||
215 | if (!fuzzy) | ||
216 | return NULL; | ||
217 | |||
218 | while (1) { | ||
219 | if (entry->bitmap) { | ||
220 | if (entry->offset + BITS_PER_BITMAP * | ||
221 | block_group->sectorsize > offset) | ||
222 | break; | ||
223 | } else { | ||
224 | if (entry->offset + entry->bytes > offset) | ||
225 | break; | ||
226 | } | ||
227 | |||
228 | n = rb_next(&entry->offset_index); | ||
229 | if (!n) | ||
230 | return NULL; | ||
231 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | ||
232 | } | ||
233 | return entry; | ||
179 | } | 234 | } |
180 | 235 | ||
181 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 236 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, |
182 | struct btrfs_free_space *info) | 237 | struct btrfs_free_space *info) |
183 | { | 238 | { |
184 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 239 | rb_erase(&info->offset_index, &block_group->free_space_offset); |
185 | rb_erase(&info->bytes_index, &block_group->free_space_bytes); | 240 | block_group->free_extents--; |
241 | block_group->free_space -= info->bytes; | ||
186 | } | 242 | } |
187 | 243 | ||
188 | static int link_free_space(struct btrfs_block_group_cache *block_group, | 244 | static int link_free_space(struct btrfs_block_group_cache *block_group, |
@@ -190,17 +246,314 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
190 | { | 246 | { |
191 | int ret = 0; | 247 | int ret = 0; |
192 | 248 | ||
193 | 249 | BUG_ON(!info->bitmap && !info->bytes); | |
194 | BUG_ON(!info->bytes); | ||
195 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 250 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, |
196 | &info->offset_index); | 251 | &info->offset_index, (info->bitmap != NULL)); |
197 | if (ret) | 252 | if (ret) |
198 | return ret; | 253 | return ret; |
199 | 254 | ||
200 | ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, | 255 | block_group->free_space += info->bytes; |
201 | &info->bytes_index); | 256 | block_group->free_extents++; |
202 | if (ret) | 257 | return ret; |
203 | return ret; | 258 | } |
259 | |||
260 | static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | ||
261 | { | ||
262 | u64 max_bytes, possible_bytes; | ||
263 | |||
264 | /* | ||
265 | * The goal is to keep the total amount of memory used per 1gb of space | ||
266 | * at or below 32k, so we need to adjust how much memory we allow to be | ||
267 | * used by extent based free space tracking | ||
268 | */ | ||
269 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | ||
270 | (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); | ||
271 | |||
272 | possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) + | ||
273 | (sizeof(struct btrfs_free_space) * | ||
274 | block_group->extents_thresh); | ||
275 | |||
276 | if (possible_bytes > max_bytes) { | ||
277 | int extent_bytes = max_bytes - | ||
278 | (block_group->total_bitmaps * PAGE_CACHE_SIZE); | ||
279 | |||
280 | if (extent_bytes <= 0) { | ||
281 | block_group->extents_thresh = 0; | ||
282 | return; | ||
283 | } | ||
284 | |||
285 | block_group->extents_thresh = extent_bytes / | ||
286 | (sizeof(struct btrfs_free_space)); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, | ||
291 | struct btrfs_free_space *info, u64 offset, | ||
292 | u64 bytes) | ||
293 | { | ||
294 | unsigned long start, end; | ||
295 | unsigned long i; | ||
296 | |||
297 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | ||
298 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | ||
299 | BUG_ON(end > BITS_PER_BITMAP); | ||
300 | |||
301 | for (i = start; i < end; i++) | ||
302 | clear_bit(i, info->bitmap); | ||
303 | |||
304 | info->bytes -= bytes; | ||
305 | block_group->free_space -= bytes; | ||
306 | } | ||
307 | |||
308 | static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, | ||
309 | struct btrfs_free_space *info, u64 offset, | ||
310 | u64 bytes) | ||
311 | { | ||
312 | unsigned long start, end; | ||
313 | unsigned long i; | ||
314 | |||
315 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | ||
316 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | ||
317 | BUG_ON(end > BITS_PER_BITMAP); | ||
318 | |||
319 | for (i = start; i < end; i++) | ||
320 | set_bit(i, info->bitmap); | ||
321 | |||
322 | info->bytes += bytes; | ||
323 | block_group->free_space += bytes; | ||
324 | } | ||
325 | |||
326 | static int search_bitmap(struct btrfs_block_group_cache *block_group, | ||
327 | struct btrfs_free_space *bitmap_info, u64 *offset, | ||
328 | u64 *bytes) | ||
329 | { | ||
330 | unsigned long found_bits = 0; | ||
331 | unsigned long bits, i; | ||
332 | unsigned long next_zero; | ||
333 | |||
334 | i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, | ||
335 | max_t(u64, *offset, bitmap_info->offset)); | ||
336 | bits = bytes_to_bits(*bytes, block_group->sectorsize); | ||
337 | |||
338 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); | ||
339 | i < BITS_PER_BITMAP; | ||
340 | i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) { | ||
341 | next_zero = find_next_zero_bit(bitmap_info->bitmap, | ||
342 | BITS_PER_BITMAP, i); | ||
343 | if ((next_zero - i) >= bits) { | ||
344 | found_bits = next_zero - i; | ||
345 | break; | ||
346 | } | ||
347 | i = next_zero; | ||
348 | } | ||
349 | |||
350 | if (found_bits) { | ||
351 | *offset = (u64)(i * block_group->sectorsize) + | ||
352 | bitmap_info->offset; | ||
353 | *bytes = (u64)(found_bits) * block_group->sectorsize; | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | return -1; | ||
358 | } | ||
359 | |||
360 | static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | ||
361 | *block_group, u64 *offset, | ||
362 | u64 *bytes, int debug) | ||
363 | { | ||
364 | struct btrfs_free_space *entry; | ||
365 | struct rb_node *node; | ||
366 | int ret; | ||
367 | |||
368 | if (!block_group->free_space_offset.rb_node) | ||
369 | return NULL; | ||
370 | |||
371 | entry = tree_search_offset(block_group, | ||
372 | offset_to_bitmap(block_group, *offset), | ||
373 | 0, 1); | ||
374 | if (!entry) | ||
375 | return NULL; | ||
376 | |||
377 | for (node = &entry->offset_index; node; node = rb_next(node)) { | ||
378 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
379 | if (entry->bytes < *bytes) | ||
380 | continue; | ||
381 | |||
382 | if (entry->bitmap) { | ||
383 | ret = search_bitmap(block_group, entry, offset, bytes); | ||
384 | if (!ret) | ||
385 | return entry; | ||
386 | continue; | ||
387 | } | ||
388 | |||
389 | *offset = entry->offset; | ||
390 | *bytes = entry->bytes; | ||
391 | return entry; | ||
392 | } | ||
393 | |||
394 | return NULL; | ||
395 | } | ||
396 | |||
397 | static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | ||
398 | struct btrfs_free_space *info, u64 offset) | ||
399 | { | ||
400 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | ||
401 | int max_bitmaps = (int)div64_u64(block_group->key.offset + | ||
402 | bytes_per_bg - 1, bytes_per_bg); | ||
403 | BUG_ON(block_group->total_bitmaps >= max_bitmaps); | ||
404 | |||
405 | info->offset = offset_to_bitmap(block_group, offset); | ||
406 | link_free_space(block_group, info); | ||
407 | block_group->total_bitmaps++; | ||
408 | |||
409 | recalculate_thresholds(block_group); | ||
410 | } | ||
411 | |||
412 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | ||
413 | struct btrfs_free_space *bitmap_info, | ||
414 | u64 *offset, u64 *bytes) | ||
415 | { | ||
416 | u64 end; | ||
417 | |||
418 | again: | ||
419 | end = bitmap_info->offset + | ||
420 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | ||
421 | |||
422 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | ||
423 | bitmap_clear_bits(block_group, bitmap_info, *offset, | ||
424 | end - *offset + 1); | ||
425 | *bytes -= end - *offset + 1; | ||
426 | *offset = end + 1; | ||
427 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { | ||
428 | bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); | ||
429 | *bytes = 0; | ||
430 | } | ||
431 | |||
432 | if (*bytes) { | ||
433 | if (!bitmap_info->bytes) { | ||
434 | unlink_free_space(block_group, bitmap_info); | ||
435 | kfree(bitmap_info->bitmap); | ||
436 | kfree(bitmap_info); | ||
437 | block_group->total_bitmaps--; | ||
438 | recalculate_thresholds(block_group); | ||
439 | } | ||
440 | |||
441 | bitmap_info = tree_search_offset(block_group, | ||
442 | offset_to_bitmap(block_group, | ||
443 | *offset), | ||
444 | 1, 0); | ||
445 | if (!bitmap_info) | ||
446 | return -EINVAL; | ||
447 | |||
448 | if (!bitmap_info->bitmap) | ||
449 | return -EAGAIN; | ||
450 | |||
451 | goto again; | ||
452 | } else if (!bitmap_info->bytes) { | ||
453 | unlink_free_space(block_group, bitmap_info); | ||
454 | kfree(bitmap_info->bitmap); | ||
455 | kfree(bitmap_info); | ||
456 | block_group->total_bitmaps--; | ||
457 | recalculate_thresholds(block_group); | ||
458 | } | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | ||
464 | struct btrfs_free_space *info) | ||
465 | { | ||
466 | struct btrfs_free_space *bitmap_info; | ||
467 | int added = 0; | ||
468 | u64 bytes, offset, end; | ||
469 | int ret; | ||
470 | |||
471 | /* | ||
472 | * If we are below the extents threshold then we can add this as an | ||
473 | * extent, and don't have to deal with the bitmap | ||
474 | */ | ||
475 | if (block_group->free_extents < block_group->extents_thresh && | ||
476 | info->bytes > block_group->sectorsize * 4) | ||
477 | return 0; | ||
478 | |||
479 | /* | ||
480 | * some block groups are so tiny they can't be enveloped by a bitmap, so | ||
481 | * don't even bother to create a bitmap for this | ||
482 | */ | ||
483 | if (BITS_PER_BITMAP * block_group->sectorsize > | ||
484 | block_group->key.offset) | ||
485 | return 0; | ||
486 | |||
487 | bytes = info->bytes; | ||
488 | offset = info->offset; | ||
489 | |||
490 | again: | ||
491 | bitmap_info = tree_search_offset(block_group, | ||
492 | offset_to_bitmap(block_group, offset), | ||
493 | 1, 0); | ||
494 | if (!bitmap_info) { | ||
495 | BUG_ON(added); | ||
496 | goto new_bitmap; | ||
497 | } | ||
498 | |||
499 | end = bitmap_info->offset + | ||
500 | (u64)(BITS_PER_BITMAP * block_group->sectorsize); | ||
501 | |||
502 | if (offset >= bitmap_info->offset && offset + bytes > end) { | ||
503 | bitmap_set_bits(block_group, bitmap_info, offset, | ||
504 | end - offset); | ||
505 | bytes -= end - offset; | ||
506 | offset = end; | ||
507 | added = 0; | ||
508 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | ||
509 | bitmap_set_bits(block_group, bitmap_info, offset, bytes); | ||
510 | bytes = 0; | ||
511 | } else { | ||
512 | BUG(); | ||
513 | } | ||
514 | |||
515 | if (!bytes) { | ||
516 | ret = 1; | ||
517 | goto out; | ||
518 | } else | ||
519 | goto again; | ||
520 | |||
521 | new_bitmap: | ||
522 | if (info && info->bitmap) { | ||
523 | add_new_bitmap(block_group, info, offset); | ||
524 | added = 1; | ||
525 | info = NULL; | ||
526 | goto again; | ||
527 | } else { | ||
528 | spin_unlock(&block_group->tree_lock); | ||
529 | |||
530 | /* no pre-allocated info, allocate a new one */ | ||
531 | if (!info) { | ||
532 | info = kzalloc(sizeof(struct btrfs_free_space), | ||
533 | GFP_NOFS); | ||
534 | if (!info) { | ||
535 | spin_lock(&block_group->tree_lock); | ||
536 | ret = -ENOMEM; | ||
537 | goto out; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | /* allocate the bitmap */ | ||
542 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | ||
543 | spin_lock(&block_group->tree_lock); | ||
544 | if (!info->bitmap) { | ||
545 | ret = -ENOMEM; | ||
546 | goto out; | ||
547 | } | ||
548 | goto again; | ||
549 | } | ||
550 | |||
551 | out: | ||
552 | if (info) { | ||
553 | if (info->bitmap) | ||
554 | kfree(info->bitmap); | ||
555 | kfree(info); | ||
556 | } | ||
204 | 557 | ||
205 | return ret; | 558 | return ret; |
206 | } | 559 | } |
@@ -208,8 +561,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
208 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 561 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
209 | u64 offset, u64 bytes) | 562 | u64 offset, u64 bytes) |
210 | { | 563 | { |
211 | struct btrfs_free_space *right_info; | 564 | struct btrfs_free_space *right_info = NULL; |
212 | struct btrfs_free_space *left_info; | 565 | struct btrfs_free_space *left_info = NULL; |
213 | struct btrfs_free_space *info = NULL; | 566 | struct btrfs_free_space *info = NULL; |
214 | int ret = 0; | 567 | int ret = 0; |
215 | 568 | ||
@@ -227,18 +580,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
227 | * are adding, if there is remove that struct and add a new one to | 580 | * are adding, if there is remove that struct and add a new one to |
228 | * cover the entire range | 581 | * cover the entire range |
229 | */ | 582 | */ |
230 | right_info = tree_search_offset(&block_group->free_space_offset, | 583 | right_info = tree_search_offset(block_group, offset + bytes, 0, 0); |
231 | offset+bytes, 0, 0); | 584 | if (right_info && rb_prev(&right_info->offset_index)) |
232 | left_info = tree_search_offset(&block_group->free_space_offset, | 585 | left_info = rb_entry(rb_prev(&right_info->offset_index), |
233 | offset-1, 0, 1); | 586 | struct btrfs_free_space, offset_index); |
587 | else | ||
588 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | ||
234 | 589 | ||
235 | if (right_info) { | 590 | /* |
591 | * If there was no extent directly to the left or right of this new | ||
592 | * extent then we know we're going to have to allocate a new extent, so | ||
593 | * before we do that see if we need to drop this into a bitmap | ||
594 | */ | ||
595 | if ((!left_info || left_info->bitmap) && | ||
596 | (!right_info || right_info->bitmap)) { | ||
597 | ret = insert_into_bitmap(block_group, info); | ||
598 | |||
599 | if (ret < 0) { | ||
600 | goto out; | ||
601 | } else if (ret) { | ||
602 | ret = 0; | ||
603 | goto out; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | if (right_info && !right_info->bitmap) { | ||
236 | unlink_free_space(block_group, right_info); | 608 | unlink_free_space(block_group, right_info); |
237 | info->bytes += right_info->bytes; | 609 | info->bytes += right_info->bytes; |
238 | kfree(right_info); | 610 | kfree(right_info); |
239 | } | 611 | } |
240 | 612 | ||
241 | if (left_info && left_info->offset + left_info->bytes == offset) { | 613 | if (left_info && !left_info->bitmap && |
614 | left_info->offset + left_info->bytes == offset) { | ||
242 | unlink_free_space(block_group, left_info); | 615 | unlink_free_space(block_group, left_info); |
243 | info->offset = left_info->offset; | 616 | info->offset = left_info->offset; |
244 | info->bytes += left_info->bytes; | 617 | info->bytes += left_info->bytes; |
@@ -248,11 +621,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
248 | ret = link_free_space(block_group, info); | 621 | ret = link_free_space(block_group, info); |
249 | if (ret) | 622 | if (ret) |
250 | kfree(info); | 623 | kfree(info); |
251 | 624 | out: | |
252 | spin_unlock(&block_group->tree_lock); | 625 | spin_unlock(&block_group->tree_lock); |
253 | 626 | ||
254 | if (ret) { | 627 | if (ret) { |
255 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); | 628 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); |
256 | BUG_ON(ret == -EEXIST); | 629 | BUG_ON(ret == -EEXIST); |
257 | } | 630 | } |
258 | 631 | ||
@@ -263,40 +636,65 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
263 | u64 offset, u64 bytes) | 636 | u64 offset, u64 bytes) |
264 | { | 637 | { |
265 | struct btrfs_free_space *info; | 638 | struct btrfs_free_space *info; |
639 | struct btrfs_free_space *next_info = NULL; | ||
266 | int ret = 0; | 640 | int ret = 0; |
267 | 641 | ||
268 | spin_lock(&block_group->tree_lock); | 642 | spin_lock(&block_group->tree_lock); |
269 | 643 | ||
270 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, | 644 | again: |
271 | 1); | 645 | info = tree_search_offset(block_group, offset, 0, 0); |
272 | if (info && info->offset == offset) { | 646 | if (!info) { |
273 | if (info->bytes < bytes) { | 647 | WARN_ON(1); |
274 | printk(KERN_ERR "Found free space at %llu, size %llu," | 648 | goto out_lock; |
275 | "trying to use %llu\n", | 649 | } |
276 | (unsigned long long)info->offset, | 650 | |
277 | (unsigned long long)info->bytes, | 651 | if (info->bytes < bytes && rb_next(&info->offset_index)) { |
278 | (unsigned long long)bytes); | 652 | u64 end; |
653 | next_info = rb_entry(rb_next(&info->offset_index), | ||
654 | struct btrfs_free_space, | ||
655 | offset_index); | ||
656 | |||
657 | if (next_info->bitmap) | ||
658 | end = next_info->offset + BITS_PER_BITMAP * | ||
659 | block_group->sectorsize - 1; | ||
660 | else | ||
661 | end = next_info->offset + next_info->bytes; | ||
662 | |||
663 | if (next_info->bytes < bytes || | ||
664 | next_info->offset > offset || offset > end) { | ||
665 | printk(KERN_CRIT "Found free space at %llu, size %llu," | ||
666 | " trying to use %llu\n", | ||
667 | (unsigned long long)info->offset, | ||
668 | (unsigned long long)info->bytes, | ||
669 | (unsigned long long)bytes); | ||
279 | WARN_ON(1); | 670 | WARN_ON(1); |
280 | ret = -EINVAL; | 671 | ret = -EINVAL; |
281 | spin_unlock(&block_group->tree_lock); | 672 | goto out_lock; |
282 | goto out; | ||
283 | } | 673 | } |
284 | unlink_free_space(block_group, info); | ||
285 | 674 | ||
286 | if (info->bytes == bytes) { | 675 | info = next_info; |
287 | kfree(info); | 676 | } |
288 | spin_unlock(&block_group->tree_lock); | 677 | |
289 | goto out; | 678 | if (info->bytes == bytes) { |
679 | unlink_free_space(block_group, info); | ||
680 | if (info->bitmap) { | ||
681 | kfree(info->bitmap); | ||
682 | block_group->total_bitmaps--; | ||
290 | } | 683 | } |
684 | kfree(info); | ||
685 | goto out_lock; | ||
686 | } | ||
291 | 687 | ||
688 | if (!info->bitmap && info->offset == offset) { | ||
689 | unlink_free_space(block_group, info); | ||
292 | info->offset += bytes; | 690 | info->offset += bytes; |
293 | info->bytes -= bytes; | 691 | info->bytes -= bytes; |
692 | link_free_space(block_group, info); | ||
693 | goto out_lock; | ||
694 | } | ||
294 | 695 | ||
295 | ret = link_free_space(block_group, info); | 696 | if (!info->bitmap && info->offset <= offset && |
296 | spin_unlock(&block_group->tree_lock); | 697 | info->offset + info->bytes >= offset + bytes) { |
297 | BUG_ON(ret); | ||
298 | } else if (info && info->offset < offset && | ||
299 | info->offset + info->bytes >= offset + bytes) { | ||
300 | u64 old_start = info->offset; | 698 | u64 old_start = info->offset; |
301 | /* | 699 | /* |
302 | * we're freeing space in the middle of the info, | 700 | * we're freeing space in the middle of the info, |
@@ -312,7 +710,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
312 | info->offset = offset + bytes; | 710 | info->offset = offset + bytes; |
313 | info->bytes = old_end - info->offset; | 711 | info->bytes = old_end - info->offset; |
314 | ret = link_free_space(block_group, info); | 712 | ret = link_free_space(block_group, info); |
315 | BUG_ON(ret); | 713 | WARN_ON(ret); |
714 | if (ret) | ||
715 | goto out_lock; | ||
316 | } else { | 716 | } else { |
317 | /* the hole we're creating ends at the end | 717 | /* the hole we're creating ends at the end |
318 | * of the info struct, just free the info | 718 | * of the info struct, just free the info |
@@ -320,32 +720,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
320 | kfree(info); | 720 | kfree(info); |
321 | } | 721 | } |
322 | spin_unlock(&block_group->tree_lock); | 722 | spin_unlock(&block_group->tree_lock); |
323 | /* step two, insert a new info struct to cover anything | 723 | |
324 | * before the hole | 724 | /* step two, insert a new info struct to cover |
725 | * anything before the hole | ||
325 | */ | 726 | */ |
326 | ret = btrfs_add_free_space(block_group, old_start, | 727 | ret = btrfs_add_free_space(block_group, old_start, |
327 | offset - old_start); | 728 | offset - old_start); |
328 | BUG_ON(ret); | 729 | WARN_ON(ret); |
329 | } else { | 730 | goto out; |
330 | spin_unlock(&block_group->tree_lock); | ||
331 | if (!info) { | ||
332 | printk(KERN_ERR "couldn't find space %llu to free\n", | ||
333 | (unsigned long long)offset); | ||
334 | printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n", | ||
335 | block_group->cached, | ||
336 | (unsigned long long)block_group->key.objectid, | ||
337 | (unsigned long long)block_group->key.offset); | ||
338 | btrfs_dump_free_space(block_group, bytes); | ||
339 | } else if (info) { | ||
340 | printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, " | ||
341 | "but wanted offset=%llu bytes=%llu\n", | ||
342 | (unsigned long long)info->offset, | ||
343 | (unsigned long long)info->bytes, | ||
344 | (unsigned long long)offset, | ||
345 | (unsigned long long)bytes); | ||
346 | } | ||
347 | WARN_ON(1); | ||
348 | } | 731 | } |
732 | |||
733 | ret = remove_from_bitmap(block_group, info, &offset, &bytes); | ||
734 | if (ret == -EAGAIN) | ||
735 | goto again; | ||
736 | BUG_ON(ret); | ||
737 | out_lock: | ||
738 | spin_unlock(&block_group->tree_lock); | ||
349 | out: | 739 | out: |
350 | return ret; | 740 | return ret; |
351 | } | 741 | } |
@@ -361,10 +751,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
361 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 751 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
362 | if (info->bytes >= bytes) | 752 | if (info->bytes >= bytes) |
363 | count++; | 753 | count++; |
364 | printk(KERN_ERR "entry offset %llu, bytes %llu\n", | 754 | printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n", |
365 | (unsigned long long)info->offset, | 755 | (unsigned long long)info->offset, |
366 | (unsigned long long)info->bytes); | 756 | (unsigned long long)info->bytes, |
757 | (info->bitmap) ? "yes" : "no"); | ||
367 | } | 758 | } |
759 | printk(KERN_INFO "block group has cluster?: %s\n", | ||
760 | list_empty(&block_group->cluster_list) ? "no" : "yes"); | ||
368 | printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" | 761 | printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" |
369 | "\n", count); | 762 | "\n", count); |
370 | } | 763 | } |
@@ -397,26 +790,35 @@ __btrfs_return_cluster_to_free_space( | |||
397 | { | 790 | { |
398 | struct btrfs_free_space *entry; | 791 | struct btrfs_free_space *entry; |
399 | struct rb_node *node; | 792 | struct rb_node *node; |
793 | bool bitmap; | ||
400 | 794 | ||
401 | spin_lock(&cluster->lock); | 795 | spin_lock(&cluster->lock); |
402 | if (cluster->block_group != block_group) | 796 | if (cluster->block_group != block_group) |
403 | goto out; | 797 | goto out; |
404 | 798 | ||
799 | bitmap = cluster->points_to_bitmap; | ||
800 | cluster->block_group = NULL; | ||
405 | cluster->window_start = 0; | 801 | cluster->window_start = 0; |
802 | list_del_init(&cluster->block_group_list); | ||
803 | cluster->points_to_bitmap = false; | ||
804 | |||
805 | if (bitmap) | ||
806 | goto out; | ||
807 | |||
406 | node = rb_first(&cluster->root); | 808 | node = rb_first(&cluster->root); |
407 | while(node) { | 809 | while (node) { |
408 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 810 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
409 | node = rb_next(&entry->offset_index); | 811 | node = rb_next(&entry->offset_index); |
410 | rb_erase(&entry->offset_index, &cluster->root); | 812 | rb_erase(&entry->offset_index, &cluster->root); |
411 | link_free_space(block_group, entry); | 813 | BUG_ON(entry->bitmap); |
814 | tree_insert_offset(&block_group->free_space_offset, | ||
815 | entry->offset, &entry->offset_index, 0); | ||
412 | } | 816 | } |
413 | list_del_init(&cluster->block_group_list); | ||
414 | |||
415 | btrfs_put_block_group(cluster->block_group); | ||
416 | cluster->block_group = NULL; | ||
417 | cluster->root.rb_node = NULL; | 817 | cluster->root.rb_node = NULL; |
818 | |||
418 | out: | 819 | out: |
419 | spin_unlock(&cluster->lock); | 820 | spin_unlock(&cluster->lock); |
821 | btrfs_put_block_group(block_group); | ||
420 | return 0; | 822 | return 0; |
421 | } | 823 | } |
422 | 824 | ||
@@ -425,20 +827,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
425 | struct btrfs_free_space *info; | 827 | struct btrfs_free_space *info; |
426 | struct rb_node *node; | 828 | struct rb_node *node; |
427 | struct btrfs_free_cluster *cluster; | 829 | struct btrfs_free_cluster *cluster; |
428 | struct btrfs_free_cluster *safe; | 830 | struct list_head *head; |
429 | 831 | ||
430 | spin_lock(&block_group->tree_lock); | 832 | spin_lock(&block_group->tree_lock); |
431 | 833 | while ((head = block_group->cluster_list.next) != | |
432 | list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, | 834 | &block_group->cluster_list) { |
433 | block_group_list) { | 835 | cluster = list_entry(head, struct btrfs_free_cluster, |
836 | block_group_list); | ||
434 | 837 | ||
435 | WARN_ON(cluster->block_group != block_group); | 838 | WARN_ON(cluster->block_group != block_group); |
436 | __btrfs_return_cluster_to_free_space(block_group, cluster); | 839 | __btrfs_return_cluster_to_free_space(block_group, cluster); |
840 | if (need_resched()) { | ||
841 | spin_unlock(&block_group->tree_lock); | ||
842 | cond_resched(); | ||
843 | spin_lock(&block_group->tree_lock); | ||
844 | } | ||
437 | } | 845 | } |
438 | 846 | ||
439 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { | 847 | while ((node = rb_last(&block_group->free_space_offset)) != NULL) { |
440 | info = rb_entry(node, struct btrfs_free_space, bytes_index); | 848 | info = rb_entry(node, struct btrfs_free_space, offset_index); |
441 | unlink_free_space(block_group, info); | 849 | unlink_free_space(block_group, info); |
850 | if (info->bitmap) | ||
851 | kfree(info->bitmap); | ||
442 | kfree(info); | 852 | kfree(info); |
443 | if (need_resched()) { | 853 | if (need_resched()) { |
444 | spin_unlock(&block_group->tree_lock); | 854 | spin_unlock(&block_group->tree_lock); |
@@ -446,6 +856,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
446 | spin_lock(&block_group->tree_lock); | 856 | spin_lock(&block_group->tree_lock); |
447 | } | 857 | } |
448 | } | 858 | } |
859 | |||
449 | spin_unlock(&block_group->tree_lock); | 860 | spin_unlock(&block_group->tree_lock); |
450 | } | 861 | } |
451 | 862 | ||
@@ -453,25 +864,35 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
453 | u64 offset, u64 bytes, u64 empty_size) | 864 | u64 offset, u64 bytes, u64 empty_size) |
454 | { | 865 | { |
455 | struct btrfs_free_space *entry = NULL; | 866 | struct btrfs_free_space *entry = NULL; |
867 | u64 bytes_search = bytes + empty_size; | ||
456 | u64 ret = 0; | 868 | u64 ret = 0; |
457 | 869 | ||
458 | spin_lock(&block_group->tree_lock); | 870 | spin_lock(&block_group->tree_lock); |
459 | entry = tree_search_offset(&block_group->free_space_offset, offset, | 871 | entry = find_free_space(block_group, &offset, &bytes_search, 0); |
460 | bytes + empty_size, 1); | ||
461 | if (!entry) | 872 | if (!entry) |
462 | entry = tree_search_bytes(&block_group->free_space_bytes, | 873 | goto out; |
463 | offset, bytes + empty_size); | 874 | |
464 | if (entry) { | 875 | ret = offset; |
876 | if (entry->bitmap) { | ||
877 | bitmap_clear_bits(block_group, entry, offset, bytes); | ||
878 | if (!entry->bytes) { | ||
879 | unlink_free_space(block_group, entry); | ||
880 | kfree(entry->bitmap); | ||
881 | kfree(entry); | ||
882 | block_group->total_bitmaps--; | ||
883 | recalculate_thresholds(block_group); | ||
884 | } | ||
885 | } else { | ||
465 | unlink_free_space(block_group, entry); | 886 | unlink_free_space(block_group, entry); |
466 | ret = entry->offset; | ||
467 | entry->offset += bytes; | 887 | entry->offset += bytes; |
468 | entry->bytes -= bytes; | 888 | entry->bytes -= bytes; |
469 | |||
470 | if (!entry->bytes) | 889 | if (!entry->bytes) |
471 | kfree(entry); | 890 | kfree(entry); |
472 | else | 891 | else |
473 | link_free_space(block_group, entry); | 892 | link_free_space(block_group, entry); |
474 | } | 893 | } |
894 | |||
895 | out: | ||
475 | spin_unlock(&block_group->tree_lock); | 896 | spin_unlock(&block_group->tree_lock); |
476 | 897 | ||
477 | return ret; | 898 | return ret; |
@@ -517,6 +938,47 @@ int btrfs_return_cluster_to_free_space( | |||
517 | return ret; | 938 | return ret; |
518 | } | 939 | } |
519 | 940 | ||
941 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | ||
942 | struct btrfs_free_cluster *cluster, | ||
943 | u64 bytes, u64 min_start) | ||
944 | { | ||
945 | struct btrfs_free_space *entry; | ||
946 | int err; | ||
947 | u64 search_start = cluster->window_start; | ||
948 | u64 search_bytes = bytes; | ||
949 | u64 ret = 0; | ||
950 | |||
951 | spin_lock(&block_group->tree_lock); | ||
952 | spin_lock(&cluster->lock); | ||
953 | |||
954 | if (!cluster->points_to_bitmap) | ||
955 | goto out; | ||
956 | |||
957 | if (cluster->block_group != block_group) | ||
958 | goto out; | ||
959 | |||
960 | entry = tree_search_offset(block_group, search_start, 0, 0); | ||
961 | |||
962 | if (!entry || !entry->bitmap) | ||
963 | goto out; | ||
964 | |||
965 | search_start = min_start; | ||
966 | search_bytes = bytes; | ||
967 | |||
968 | err = search_bitmap(block_group, entry, &search_start, | ||
969 | &search_bytes); | ||
970 | if (err) | ||
971 | goto out; | ||
972 | |||
973 | ret = search_start; | ||
974 | bitmap_clear_bits(block_group, entry, ret, bytes); | ||
975 | out: | ||
976 | spin_unlock(&cluster->lock); | ||
977 | spin_unlock(&block_group->tree_lock); | ||
978 | |||
979 | return ret; | ||
980 | } | ||
981 | |||
520 | /* | 982 | /* |
521 | * given a cluster, try to allocate 'bytes' from it, returns 0 | 983 | * given a cluster, try to allocate 'bytes' from it, returns 0 |
522 | * if it couldn't find anything suitably large, or a logical disk offset | 984 | * if it couldn't find anything suitably large, or a logical disk offset |
@@ -530,6 +992,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
530 | struct rb_node *node; | 992 | struct rb_node *node; |
531 | u64 ret = 0; | 993 | u64 ret = 0; |
532 | 994 | ||
995 | if (cluster->points_to_bitmap) | ||
996 | return btrfs_alloc_from_bitmap(block_group, cluster, bytes, | ||
997 | min_start); | ||
998 | |||
533 | spin_lock(&cluster->lock); | 999 | spin_lock(&cluster->lock); |
534 | if (bytes > cluster->max_size) | 1000 | if (bytes > cluster->max_size) |
535 | goto out; | 1001 | goto out; |
@@ -567,9 +1033,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
567 | } | 1033 | } |
568 | out: | 1034 | out: |
569 | spin_unlock(&cluster->lock); | 1035 | spin_unlock(&cluster->lock); |
1036 | |||
570 | return ret; | 1037 | return ret; |
571 | } | 1038 | } |
572 | 1039 | ||
1040 | static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | ||
1041 | struct btrfs_free_space *entry, | ||
1042 | struct btrfs_free_cluster *cluster, | ||
1043 | u64 offset, u64 bytes, u64 min_bytes) | ||
1044 | { | ||
1045 | unsigned long next_zero; | ||
1046 | unsigned long i; | ||
1047 | unsigned long search_bits; | ||
1048 | unsigned long total_bits; | ||
1049 | unsigned long found_bits; | ||
1050 | unsigned long start = 0; | ||
1051 | unsigned long total_found = 0; | ||
1052 | bool found = false; | ||
1053 | |||
1054 | i = offset_to_bit(entry->offset, block_group->sectorsize, | ||
1055 | max_t(u64, offset, entry->offset)); | ||
1056 | search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); | ||
1057 | total_bits = bytes_to_bits(bytes, block_group->sectorsize); | ||
1058 | |||
1059 | again: | ||
1060 | found_bits = 0; | ||
1061 | for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i); | ||
1062 | i < BITS_PER_BITMAP; | ||
1063 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) { | ||
1064 | next_zero = find_next_zero_bit(entry->bitmap, | ||
1065 | BITS_PER_BITMAP, i); | ||
1066 | if (next_zero - i >= search_bits) { | ||
1067 | found_bits = next_zero - i; | ||
1068 | break; | ||
1069 | } | ||
1070 | i = next_zero; | ||
1071 | } | ||
1072 | |||
1073 | if (!found_bits) | ||
1074 | return -1; | ||
1075 | |||
1076 | if (!found) { | ||
1077 | start = i; | ||
1078 | found = true; | ||
1079 | } | ||
1080 | |||
1081 | total_found += found_bits; | ||
1082 | |||
1083 | if (cluster->max_size < found_bits * block_group->sectorsize) | ||
1084 | cluster->max_size = found_bits * block_group->sectorsize; | ||
1085 | |||
1086 | if (total_found < total_bits) { | ||
1087 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero); | ||
1088 | if (i - start > total_bits * 2) { | ||
1089 | total_found = 0; | ||
1090 | cluster->max_size = 0; | ||
1091 | found = false; | ||
1092 | } | ||
1093 | goto again; | ||
1094 | } | ||
1095 | |||
1096 | cluster->window_start = start * block_group->sectorsize + | ||
1097 | entry->offset; | ||
1098 | cluster->points_to_bitmap = true; | ||
1099 | |||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
573 | /* | 1103 | /* |
574 | * here we try to find a cluster of blocks in a block group. The goal | 1104 | * here we try to find a cluster of blocks in a block group. The goal |
575 | * is to find at least bytes free and up to empty_size + bytes free. | 1105 | * is to find at least bytes free and up to empty_size + bytes free. |
@@ -587,12 +1117,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
587 | struct btrfs_free_space *entry = NULL; | 1117 | struct btrfs_free_space *entry = NULL; |
588 | struct rb_node *node; | 1118 | struct rb_node *node; |
589 | struct btrfs_free_space *next; | 1119 | struct btrfs_free_space *next; |
590 | struct btrfs_free_space *last; | 1120 | struct btrfs_free_space *last = NULL; |
591 | u64 min_bytes; | 1121 | u64 min_bytes; |
592 | u64 window_start; | 1122 | u64 window_start; |
593 | u64 window_free; | 1123 | u64 window_free; |
594 | u64 max_extent = 0; | 1124 | u64 max_extent = 0; |
595 | int total_retries = 0; | 1125 | bool found_bitmap = false; |
596 | int ret; | 1126 | int ret; |
597 | 1127 | ||
598 | /* for metadata, allow allocates with more holes */ | 1128 | /* for metadata, allow allocates with more holes */ |
@@ -620,31 +1150,80 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
620 | goto out; | 1150 | goto out; |
621 | } | 1151 | } |
622 | again: | 1152 | again: |
623 | min_bytes = min(min_bytes, bytes + empty_size); | 1153 | entry = tree_search_offset(block_group, offset, found_bitmap, 1); |
624 | entry = tree_search_bytes(&block_group->free_space_bytes, | ||
625 | offset, min_bytes); | ||
626 | if (!entry) { | 1154 | if (!entry) { |
627 | ret = -ENOSPC; | 1155 | ret = -ENOSPC; |
628 | goto out; | 1156 | goto out; |
629 | } | 1157 | } |
1158 | |||
1159 | /* | ||
1160 | * If found_bitmap is true, we exhausted our search for extent entries, | ||
1161 | * and we just want to search all of the bitmaps that we can find, and | ||
1162 | * ignore any extent entries we find. | ||
1163 | */ | ||
1164 | while (entry->bitmap || found_bitmap || | ||
1165 | (!entry->bitmap && entry->bytes < min_bytes)) { | ||
1166 | struct rb_node *node = rb_next(&entry->offset_index); | ||
1167 | |||
1168 | if (entry->bitmap && entry->bytes > bytes + empty_size) { | ||
1169 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, | ||
1170 | offset, bytes + empty_size, | ||
1171 | min_bytes); | ||
1172 | if (!ret) | ||
1173 | goto got_it; | ||
1174 | } | ||
1175 | |||
1176 | if (!node) { | ||
1177 | ret = -ENOSPC; | ||
1178 | goto out; | ||
1179 | } | ||
1180 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1181 | } | ||
1182 | |||
1183 | /* | ||
1184 | * We already searched all the extent entries from the passed in offset | ||
1185 | * to the end and didn't find enough space for the cluster, and we also | ||
1186 | * didn't find any bitmaps that met our criteria, just go ahead and exit | ||
1187 | */ | ||
1188 | if (found_bitmap) { | ||
1189 | ret = -ENOSPC; | ||
1190 | goto out; | ||
1191 | } | ||
1192 | |||
1193 | cluster->points_to_bitmap = false; | ||
630 | window_start = entry->offset; | 1194 | window_start = entry->offset; |
631 | window_free = entry->bytes; | 1195 | window_free = entry->bytes; |
632 | last = entry; | 1196 | last = entry; |
633 | max_extent = entry->bytes; | 1197 | max_extent = entry->bytes; |
634 | 1198 | ||
635 | while(1) { | 1199 | while (1) { |
636 | /* out window is just right, lets fill it */ | 1200 | /* out window is just right, lets fill it */ |
637 | if (window_free >= bytes + empty_size) | 1201 | if (window_free >= bytes + empty_size) |
638 | break; | 1202 | break; |
639 | 1203 | ||
640 | node = rb_next(&last->offset_index); | 1204 | node = rb_next(&last->offset_index); |
641 | if (!node) { | 1205 | if (!node) { |
1206 | if (found_bitmap) | ||
1207 | goto again; | ||
642 | ret = -ENOSPC; | 1208 | ret = -ENOSPC; |
643 | goto out; | 1209 | goto out; |
644 | } | 1210 | } |
645 | next = rb_entry(node, struct btrfs_free_space, offset_index); | 1211 | next = rb_entry(node, struct btrfs_free_space, offset_index); |
646 | 1212 | ||
647 | /* | 1213 | /* |
1214 | * we found a bitmap, so if this search doesn't result in a | ||
1215 | * cluster, we know to go and search again for the bitmaps and | ||
1216 | * start looking for space there | ||
1217 | */ | ||
1218 | if (next->bitmap) { | ||
1219 | if (!found_bitmap) | ||
1220 | offset = next->offset; | ||
1221 | found_bitmap = true; | ||
1222 | last = next; | ||
1223 | continue; | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
648 | * we haven't filled the empty size and the window is | 1227 | * we haven't filled the empty size and the window is |
649 | * very large. reset and try again | 1228 | * very large. reset and try again |
650 | */ | 1229 | */ |
@@ -655,19 +1234,6 @@ again: | |||
655 | window_free = entry->bytes; | 1234 | window_free = entry->bytes; |
656 | last = entry; | 1235 | last = entry; |
657 | max_extent = 0; | 1236 | max_extent = 0; |
658 | total_retries++; | ||
659 | if (total_retries % 64 == 0) { | ||
660 | if (min_bytes >= (bytes + empty_size)) { | ||
661 | ret = -ENOSPC; | ||
662 | goto out; | ||
663 | } | ||
664 | /* | ||
665 | * grow our allocation a bit, we're not having | ||
666 | * much luck | ||
667 | */ | ||
668 | min_bytes *= 2; | ||
669 | goto again; | ||
670 | } | ||
671 | } else { | 1237 | } else { |
672 | last = next; | 1238 | last = next; |
673 | window_free += next->bytes; | 1239 | window_free += next->bytes; |
@@ -685,11 +1251,19 @@ again: | |||
685 | * The cluster includes an rbtree, but only uses the offset index | 1251 | * The cluster includes an rbtree, but only uses the offset index |
686 | * of each free space cache entry. | 1252 | * of each free space cache entry. |
687 | */ | 1253 | */ |
688 | while(1) { | 1254 | while (1) { |
689 | node = rb_next(&entry->offset_index); | 1255 | node = rb_next(&entry->offset_index); |
690 | unlink_free_space(block_group, entry); | 1256 | if (entry->bitmap && node) { |
1257 | entry = rb_entry(node, struct btrfs_free_space, | ||
1258 | offset_index); | ||
1259 | continue; | ||
1260 | } else if (entry->bitmap && !node) { | ||
1261 | break; | ||
1262 | } | ||
1263 | |||
1264 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | ||
691 | ret = tree_insert_offset(&cluster->root, entry->offset, | 1265 | ret = tree_insert_offset(&cluster->root, entry->offset, |
692 | &entry->offset_index); | 1266 | &entry->offset_index, 0); |
693 | BUG_ON(ret); | 1267 | BUG_ON(ret); |
694 | 1268 | ||
695 | if (!node || entry == last) | 1269 | if (!node || entry == last) |
@@ -697,8 +1271,10 @@ again: | |||
697 | 1271 | ||
698 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 1272 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
699 | } | 1273 | } |
700 | ret = 0; | 1274 | |
701 | cluster->max_size = max_extent; | 1275 | cluster->max_size = max_extent; |
1276 | got_it: | ||
1277 | ret = 0; | ||
702 | atomic_inc(&block_group->count); | 1278 | atomic_inc(&block_group->count); |
703 | list_add_tail(&cluster->block_group_list, &block_group->cluster_list); | 1279 | list_add_tail(&cluster->block_group_list, &block_group->cluster_list); |
704 | cluster->block_group = block_group; | 1280 | cluster->block_group = block_group; |
@@ -718,6 +1294,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
718 | spin_lock_init(&cluster->refill_lock); | 1294 | spin_lock_init(&cluster->refill_lock); |
719 | cluster->root.rb_node = NULL; | 1295 | cluster->root.rb_node = NULL; |
720 | cluster->max_size = 0; | 1296 | cluster->max_size = 0; |
1297 | cluster->points_to_bitmap = false; | ||
721 | INIT_LIST_HEAD(&cluster->block_group_list); | 1298 | INIT_LIST_HEAD(&cluster->block_group_list); |
722 | cluster->block_group = NULL; | 1299 | cluster->block_group = NULL; |
723 | } | 1300 | } |
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 266fb8764054..890a8e79011b 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -19,6 +19,14 @@ | |||
19 | #ifndef __BTRFS_FREE_SPACE_CACHE | 19 | #ifndef __BTRFS_FREE_SPACE_CACHE |
20 | #define __BTRFS_FREE_SPACE_CACHE | 20 | #define __BTRFS_FREE_SPACE_CACHE |
21 | 21 | ||
22 | struct btrfs_free_space { | ||
23 | struct rb_node offset_index; | ||
24 | u64 offset; | ||
25 | u64 bytes; | ||
26 | unsigned long *bitmap; | ||
27 | struct list_head list; | ||
28 | }; | ||
29 | |||
22 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 30 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
23 | u64 bytenr, u64 size); | 31 | u64 bytenr, u64 size); |
24 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 32 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 791eab19e330..56fe83fa60c4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -2603,8 +2603,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
2603 | if (root->ref_cows) | 2603 | if (root->ref_cows) |
2604 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); | 2604 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); |
2605 | path = btrfs_alloc_path(); | 2605 | path = btrfs_alloc_path(); |
2606 | path->reada = -1; | ||
2607 | BUG_ON(!path); | 2606 | BUG_ON(!path); |
2607 | path->reada = -1; | ||
2608 | 2608 | ||
2609 | /* FIXME, add redo link to tree so we don't leak on crash */ | 2609 | /* FIXME, add redo link to tree so we don't leak on crash */ |
2610 | key.objectid = inode->i_ino; | 2610 | key.objectid = inode->i_ino; |
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 6d6523da0a30..0d126be22b63 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c | |||
@@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) | |||
309 | } | 309 | } |
310 | printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", | 310 | printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", |
311 | (unsigned long long)btrfs_header_bytenr(c), | 311 | (unsigned long long)btrfs_header_bytenr(c), |
312 | btrfs_header_level(c), nr, | 312 | level, nr, |
313 | (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); | 313 | (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); |
314 | for (i = 0; i < nr; i++) { | 314 | for (i = 0; i < nr; i++) { |
315 | btrfs_node_key_to_cpu(c, &key, i); | 315 | btrfs_node_key_to_cpu(c, &key, i); |
@@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) | |||
326 | btrfs_level_size(root, level - 1), | 326 | btrfs_level_size(root, level - 1), |
327 | btrfs_node_ptr_generation(c, i)); | 327 | btrfs_node_ptr_generation(c, i)); |
328 | if (btrfs_is_leaf(next) && | 328 | if (btrfs_is_leaf(next) && |
329 | btrfs_header_level(c) != 1) | 329 | level != 1) |
330 | BUG(); | 330 | BUG(); |
331 | if (btrfs_header_level(next) != | 331 | if (btrfs_header_level(next) != |
332 | btrfs_header_level(c) - 1) | 332 | level - 1) |
333 | BUG(); | 333 | BUG(); |
334 | btrfs_print_tree(root, next); | 334 | btrfs_print_tree(root, next); |
335 | free_extent_buffer(next); | 335 | free_extent_buffer(next); |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 008397934778..e71264d1c2c9 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -670,6 +670,8 @@ again: | |||
670 | err = ret; | 670 | err = ret; |
671 | goto out; | 671 | goto out; |
672 | } | 672 | } |
673 | if (ret > 0 && path2->slots[level] > 0) | ||
674 | path2->slots[level]--; | ||
673 | 675 | ||
674 | eb = path2->nodes[level]; | 676 | eb = path2->nodes[level]; |
675 | WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != | 677 | WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != |
@@ -1609,6 +1611,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, | |||
1609 | BUG_ON(level == 0); | 1611 | BUG_ON(level == 0); |
1610 | path->lowest_level = level; | 1612 | path->lowest_level = level; |
1611 | ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); | 1613 | ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); |
1614 | path->lowest_level = 0; | ||
1612 | if (ret < 0) { | 1615 | if (ret < 0) { |
1613 | btrfs_free_path(path); | 1616 | btrfs_free_path(path); |
1614 | return ret; | 1617 | return ret; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2dbf1c1f56ee..e51d2bc532f8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -40,6 +40,14 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) | |||
40 | } | 40 | } |
41 | } | 41 | } |
42 | 42 | ||
43 | static noinline void switch_commit_root(struct btrfs_root *root) | ||
44 | { | ||
45 | down_write(&root->commit_root_sem); | ||
46 | free_extent_buffer(root->commit_root); | ||
47 | root->commit_root = btrfs_root_node(root); | ||
48 | up_write(&root->commit_root_sem); | ||
49 | } | ||
50 | |||
43 | /* | 51 | /* |
44 | * either allocate a new transaction or hop into the existing one | 52 | * either allocate a new transaction or hop into the existing one |
45 | */ | 53 | */ |
@@ -444,9 +452,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
444 | 452 | ||
445 | btrfs_write_dirty_block_groups(trans, root); | 453 | btrfs_write_dirty_block_groups(trans, root); |
446 | 454 | ||
447 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
448 | BUG_ON(ret); | ||
449 | |||
450 | while (1) { | 455 | while (1) { |
451 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | 456 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); |
452 | if (old_root_bytenr == root->node->start) | 457 | if (old_root_bytenr == root->node->start) |
@@ -457,13 +462,11 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
457 | &root->root_key, | 462 | &root->root_key, |
458 | &root->root_item); | 463 | &root->root_item); |
459 | BUG_ON(ret); | 464 | BUG_ON(ret); |
460 | btrfs_write_dirty_block_groups(trans, root); | ||
461 | 465 | ||
462 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | 466 | ret = btrfs_write_dirty_block_groups(trans, root); |
463 | BUG_ON(ret); | 467 | BUG_ON(ret); |
464 | } | 468 | } |
465 | free_extent_buffer(root->commit_root); | 469 | switch_commit_root(root); |
466 | root->commit_root = btrfs_root_node(root); | ||
467 | return 0; | 470 | return 0; |
468 | } | 471 | } |
469 | 472 | ||
@@ -495,9 +498,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
495 | root = list_entry(next, struct btrfs_root, dirty_list); | 498 | root = list_entry(next, struct btrfs_root, dirty_list); |
496 | 499 | ||
497 | update_cowonly_root(trans, root); | 500 | update_cowonly_root(trans, root); |
498 | |||
499 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
500 | BUG_ON(ret); | ||
501 | } | 501 | } |
502 | return 0; | 502 | return 0; |
503 | } | 503 | } |
@@ -544,8 +544,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |||
544 | btrfs_update_reloc_root(trans, root); | 544 | btrfs_update_reloc_root(trans, root); |
545 | 545 | ||
546 | if (root->commit_root != root->node) { | 546 | if (root->commit_root != root->node) { |
547 | free_extent_buffer(root->commit_root); | 547 | switch_commit_root(root); |
548 | root->commit_root = btrfs_root_node(root); | ||
549 | btrfs_set_root_node(&root->root_item, | 548 | btrfs_set_root_node(&root->root_item, |
550 | root->node); | 549 | root->node); |
551 | } | 550 | } |
@@ -943,9 +942,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
943 | 942 | ||
944 | mutex_unlock(&root->fs_info->trans_mutex); | 943 | mutex_unlock(&root->fs_info->trans_mutex); |
945 | 944 | ||
946 | if (flush_on_commit || snap_pending) { | 945 | if (flush_on_commit) { |
947 | if (flush_on_commit) | 946 | btrfs_start_delalloc_inodes(root); |
948 | btrfs_start_delalloc_inodes(root); | 947 | ret = btrfs_wait_ordered_extents(root, 0); |
948 | BUG_ON(ret); | ||
949 | } else if (snap_pending) { | ||
949 | ret = btrfs_wait_ordered_extents(root, 1); | 950 | ret = btrfs_wait_ordered_extents(root, 1); |
950 | BUG_ON(ret); | 951 | BUG_ON(ret); |
951 | } | 952 | } |
@@ -1009,15 +1010,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1009 | 1010 | ||
1010 | btrfs_set_root_node(&root->fs_info->tree_root->root_item, | 1011 | btrfs_set_root_node(&root->fs_info->tree_root->root_item, |
1011 | root->fs_info->tree_root->node); | 1012 | root->fs_info->tree_root->node); |
1012 | free_extent_buffer(root->fs_info->tree_root->commit_root); | 1013 | switch_commit_root(root->fs_info->tree_root); |
1013 | root->fs_info->tree_root->commit_root = | ||
1014 | btrfs_root_node(root->fs_info->tree_root); | ||
1015 | 1014 | ||
1016 | btrfs_set_root_node(&root->fs_info->chunk_root->root_item, | 1015 | btrfs_set_root_node(&root->fs_info->chunk_root->root_item, |
1017 | root->fs_info->chunk_root->node); | 1016 | root->fs_info->chunk_root->node); |
1018 | free_extent_buffer(root->fs_info->chunk_root->commit_root); | 1017 | switch_commit_root(root->fs_info->chunk_root); |
1019 | root->fs_info->chunk_root->commit_root = | ||
1020 | btrfs_root_node(root->fs_info->chunk_root); | ||
1021 | 1018 | ||
1022 | update_super_roots(root); | 1019 | update_super_roots(root); |
1023 | 1020 | ||
@@ -1057,6 +1054,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1057 | cur_trans->commit_done = 1; | 1054 | cur_trans->commit_done = 1; |
1058 | 1055 | ||
1059 | root->fs_info->last_trans_committed = cur_trans->transid; | 1056 | root->fs_info->last_trans_committed = cur_trans->transid; |
1057 | |||
1060 | wake_up(&cur_trans->commit_wait); | 1058 | wake_up(&cur_trans->commit_wait); |
1061 | 1059 | ||
1062 | put_transaction(cur_trans); | 1060 | put_transaction(cur_trans); |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c13922206d1b..d91b0de7c502 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | |||
797 | return -ENOENT; | 797 | return -ENOENT; |
798 | 798 | ||
799 | inode = read_one_inode(root, key->objectid); | 799 | inode = read_one_inode(root, key->objectid); |
800 | BUG_ON(!dir); | 800 | BUG_ON(!inode); |
801 | 801 | ||
802 | ref_ptr = btrfs_item_ptr_offset(eb, slot); | 802 | ref_ptr = btrfs_item_ptr_offset(eb, slot); |
803 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); | 803 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3ab80e9cd767..5dbefd11b4af 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -721,7 +721,8 @@ error: | |||
721 | */ | 721 | */ |
722 | static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, | 722 | static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, |
723 | struct btrfs_device *device, | 723 | struct btrfs_device *device, |
724 | u64 num_bytes, u64 *start) | 724 | u64 num_bytes, u64 *start, |
725 | u64 *max_avail) | ||
725 | { | 726 | { |
726 | struct btrfs_key key; | 727 | struct btrfs_key key; |
727 | struct btrfs_root *root = device->dev_root; | 728 | struct btrfs_root *root = device->dev_root; |
@@ -758,9 +759,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, | |||
758 | ret = btrfs_search_slot(trans, root, &key, path, 0, 0); | 759 | ret = btrfs_search_slot(trans, root, &key, path, 0, 0); |
759 | if (ret < 0) | 760 | if (ret < 0) |
760 | goto error; | 761 | goto error; |
761 | ret = btrfs_previous_item(root, path, 0, key.type); | 762 | if (ret > 0) { |
762 | if (ret < 0) | 763 | ret = btrfs_previous_item(root, path, key.objectid, key.type); |
763 | goto error; | 764 | if (ret < 0) |
765 | goto error; | ||
766 | if (ret > 0) | ||
767 | start_found = 1; | ||
768 | } | ||
764 | l = path->nodes[0]; | 769 | l = path->nodes[0]; |
765 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); | 770 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); |
766 | while (1) { | 771 | while (1) { |
@@ -803,6 +808,10 @@ no_more_items: | |||
803 | if (last_byte < search_start) | 808 | if (last_byte < search_start) |
804 | last_byte = search_start; | 809 | last_byte = search_start; |
805 | hole_size = key.offset - last_byte; | 810 | hole_size = key.offset - last_byte; |
811 | |||
812 | if (hole_size > *max_avail) | ||
813 | *max_avail = hole_size; | ||
814 | |||
806 | if (key.offset > last_byte && | 815 | if (key.offset > last_byte && |
807 | hole_size >= num_bytes) { | 816 | hole_size >= num_bytes) { |
808 | *start = last_byte; | 817 | *start = last_byte; |
@@ -1621,6 +1630,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans, | |||
1621 | device->fs_devices->total_rw_bytes += diff; | 1630 | device->fs_devices->total_rw_bytes += diff; |
1622 | 1631 | ||
1623 | device->total_bytes = new_size; | 1632 | device->total_bytes = new_size; |
1633 | device->disk_total_bytes = new_size; | ||
1624 | btrfs_clear_space_info_full(device->dev_root->fs_info); | 1634 | btrfs_clear_space_info_full(device->dev_root->fs_info); |
1625 | 1635 | ||
1626 | return btrfs_update_device(trans, device); | 1636 | return btrfs_update_device(trans, device); |
@@ -2007,7 +2017,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) | |||
2007 | goto done; | 2017 | goto done; |
2008 | if (ret) { | 2018 | if (ret) { |
2009 | ret = 0; | 2019 | ret = 0; |
2010 | goto done; | 2020 | break; |
2011 | } | 2021 | } |
2012 | 2022 | ||
2013 | l = path->nodes[0]; | 2023 | l = path->nodes[0]; |
@@ -2015,7 +2025,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) | |||
2015 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); | 2025 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); |
2016 | 2026 | ||
2017 | if (key.objectid != device->devid) | 2027 | if (key.objectid != device->devid) |
2018 | goto done; | 2028 | break; |
2019 | 2029 | ||
2020 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); | 2030 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); |
2021 | length = btrfs_dev_extent_length(l, dev_extent); | 2031 | length = btrfs_dev_extent_length(l, dev_extent); |
@@ -2171,6 +2181,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2171 | max_chunk_size); | 2181 | max_chunk_size); |
2172 | 2182 | ||
2173 | again: | 2183 | again: |
2184 | max_avail = 0; | ||
2174 | if (!map || map->num_stripes != num_stripes) { | 2185 | if (!map || map->num_stripes != num_stripes) { |
2175 | kfree(map); | 2186 | kfree(map); |
2176 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); | 2187 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); |
@@ -2219,7 +2230,8 @@ again: | |||
2219 | 2230 | ||
2220 | if (device->in_fs_metadata && avail >= min_free) { | 2231 | if (device->in_fs_metadata && avail >= min_free) { |
2221 | ret = find_free_dev_extent(trans, device, | 2232 | ret = find_free_dev_extent(trans, device, |
2222 | min_free, &dev_offset); | 2233 | min_free, &dev_offset, |
2234 | &max_avail); | ||
2223 | if (ret == 0) { | 2235 | if (ret == 0) { |
2224 | list_move_tail(&device->dev_alloc_list, | 2236 | list_move_tail(&device->dev_alloc_list, |
2225 | &private_devs); | 2237 | &private_devs); |
@@ -2795,26 +2807,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
2795 | } | 2807 | } |
2796 | } | 2808 | } |
2797 | 2809 | ||
2798 | for (i = 0; i > nr; i++) { | ||
2799 | struct btrfs_multi_bio *multi; | ||
2800 | struct btrfs_bio_stripe *stripe; | ||
2801 | int ret; | ||
2802 | |||
2803 | length = 1; | ||
2804 | ret = btrfs_map_block(map_tree, WRITE, buf[i], | ||
2805 | &length, &multi, 0); | ||
2806 | BUG_ON(ret); | ||
2807 | |||
2808 | stripe = multi->stripes; | ||
2809 | for (j = 0; j < multi->num_stripes; j++) { | ||
2810 | if (stripe->physical >= physical && | ||
2811 | physical < stripe->physical + length) | ||
2812 | break; | ||
2813 | } | ||
2814 | BUG_ON(j >= multi->num_stripes); | ||
2815 | kfree(multi); | ||
2816 | } | ||
2817 | |||
2818 | *logical = buf; | 2810 | *logical = buf; |
2819 | *naddrs = nr; | 2811 | *naddrs = nr; |
2820 | *stripe_len = map->stripe_len; | 2812 | *stripe_len = map->stripe_len; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9bb5c8750736..fc44d316d0bb 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -2452,10 +2452,10 @@ try_mount_again: | |||
2452 | tcon->local_lease = volume_info->local_lease; | 2452 | tcon->local_lease = volume_info->local_lease; |
2453 | } | 2453 | } |
2454 | if (pSesInfo) { | 2454 | if (pSesInfo) { |
2455 | if (pSesInfo->capabilities & CAP_LARGE_FILES) { | 2455 | if (pSesInfo->capabilities & CAP_LARGE_FILES) |
2456 | sb->s_maxbytes = (u64) 1 << 63; | 2456 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
2457 | } else | 2457 | else |
2458 | sb->s_maxbytes = (u64) 1 << 31; /* 2 GB */ | 2458 | sb->s_maxbytes = MAX_NON_LFS; |
2459 | } | 2459 | } |
2460 | 2460 | ||
2461 | /* BB FIXME fix time_gran to be larger for LANMAN sessions */ | 2461 | /* BB FIXME fix time_gran to be larger for LANMAN sessions */ |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 18afe57b2461..82d83839655e 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -212,7 +212,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, | |||
212 | * junction to the new submount (ie to setup the fake directory | 212 | * junction to the new submount (ie to setup the fake directory |
213 | * which represents a DFS referral). | 213 | * which represents a DFS referral). |
214 | */ | 214 | */ |
215 | void | 215 | static void |
216 | cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) | 216 | cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) |
217 | { | 217 | { |
218 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 218 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
@@ -388,7 +388,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, | |||
388 | } | 388 | } |
389 | 389 | ||
390 | /* Fill a cifs_fattr struct with info from FILE_ALL_INFO */ | 390 | /* Fill a cifs_fattr struct with info from FILE_ALL_INFO */ |
391 | void | 391 | static void |
392 | cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, | 392 | cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, |
393 | struct cifs_sb_info *cifs_sb, bool adjust_tz) | 393 | struct cifs_sb_info *cifs_sb, bool adjust_tz) |
394 | { | 394 | { |
@@ -513,9 +513,12 @@ int cifs_get_inode_info(struct inode **pinode, | |||
513 | cifs_sb->mnt_cifs_flags & | 513 | cifs_sb->mnt_cifs_flags & |
514 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 514 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
515 | if (rc1) { | 515 | if (rc1) { |
516 | /* BB EOPNOSUPP disable SERVER_INUM? */ | ||
517 | cFYI(1, ("GetSrvInodeNum rc %d", rc1)); | 516 | cFYI(1, ("GetSrvInodeNum rc %d", rc1)); |
518 | fattr.cf_uniqueid = iunique(sb, ROOT_I); | 517 | fattr.cf_uniqueid = iunique(sb, ROOT_I); |
518 | /* disable serverino if call not supported */ | ||
519 | if (rc1 == -EINVAL) | ||
520 | cifs_sb->mnt_cifs_flags &= | ||
521 | ~CIFS_MOUNT_SERVER_INUM; | ||
519 | } | 522 | } |
520 | } else { | 523 | } else { |
521 | fattr.cf_uniqueid = iunique(sb, ROOT_I); | 524 | fattr.cf_uniqueid = iunique(sb, ROOT_I); |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index af737bb56cb7..259525c9abb8 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -1303,6 +1303,13 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat, | |||
1303 | } | 1303 | } |
1304 | (*new_auth_tok)->session_key.encrypted_key_size = | 1304 | (*new_auth_tok)->session_key.encrypted_key_size = |
1305 | (body_size - (ECRYPTFS_SALT_SIZE + 5)); | 1305 | (body_size - (ECRYPTFS_SALT_SIZE + 5)); |
1306 | if ((*new_auth_tok)->session_key.encrypted_key_size | ||
1307 | > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) { | ||
1308 | printk(KERN_WARNING "Tag 3 packet contains key larger " | ||
1309 | "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n"); | ||
1310 | rc = -EINVAL; | ||
1311 | goto out_free; | ||
1312 | } | ||
1306 | if (unlikely(data[(*packet_size)++] != 0x04)) { | 1313 | if (unlikely(data[(*packet_size)++] != 0x04)) { |
1307 | printk(KERN_WARNING "Unknown version number [%d]\n", | 1314 | printk(KERN_WARNING "Unknown version number [%d]\n", |
1308 | data[(*packet_size) - 1]); | 1315 | data[(*packet_size) - 1]); |
@@ -1449,6 +1456,12 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents, | |||
1449 | rc = -EINVAL; | 1456 | rc = -EINVAL; |
1450 | goto out; | 1457 | goto out; |
1451 | } | 1458 | } |
1459 | if (unlikely((*tag_11_contents_size) > max_contents_bytes)) { | ||
1460 | printk(KERN_ERR "Literal data section in tag 11 packet exceeds " | ||
1461 | "expected size\n"); | ||
1462 | rc = -EINVAL; | ||
1463 | goto out; | ||
1464 | } | ||
1452 | if (data[(*packet_size)++] != 0x62) { | 1465 | if (data[(*packet_size)++] != 0x62) { |
1453 | printk(KERN_WARNING "Unrecognizable packet\n"); | 1466 | printk(KERN_WARNING "Unrecognizable packet\n"); |
1454 | rc = -EINVAL; | 1467 | rc = -EINVAL; |
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 3d724a95882f..373fa90c796a 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c | |||
@@ -130,8 +130,7 @@ static int ext3_readdir(struct file * filp, | |||
130 | struct buffer_head *bh = NULL; | 130 | struct buffer_head *bh = NULL; |
131 | 131 | ||
132 | map_bh.b_state = 0; | 132 | map_bh.b_state = 0; |
133 | err = ext3_get_blocks_handle(NULL, inode, blk, 1, | 133 | err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0); |
134 | &map_bh, 0, 0); | ||
135 | if (err > 0) { | 134 | if (err > 0) { |
136 | pgoff_t index = map_bh.b_blocknr >> | 135 | pgoff_t index = map_bh.b_blocknr >> |
137 | (PAGE_CACHE_SHIFT - inode->i_blkbits); | 136 | (PAGE_CACHE_SHIFT - inode->i_blkbits); |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 5f51fed5c750..b49908a167ae 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -788,7 +788,7 @@ err_out: | |||
788 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | 788 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, |
789 | sector_t iblock, unsigned long maxblocks, | 789 | sector_t iblock, unsigned long maxblocks, |
790 | struct buffer_head *bh_result, | 790 | struct buffer_head *bh_result, |
791 | int create, int extend_disksize) | 791 | int create) |
792 | { | 792 | { |
793 | int err = -EIO; | 793 | int err = -EIO; |
794 | int offsets[4]; | 794 | int offsets[4]; |
@@ -911,13 +911,6 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
911 | if (!err) | 911 | if (!err) |
912 | err = ext3_splice_branch(handle, inode, iblock, | 912 | err = ext3_splice_branch(handle, inode, iblock, |
913 | partial, indirect_blks, count); | 913 | partial, indirect_blks, count); |
914 | /* | ||
915 | * i_disksize growing is protected by truncate_mutex. Don't forget to | ||
916 | * protect it if you're about to implement concurrent | ||
917 | * ext3_get_block() -bzzz | ||
918 | */ | ||
919 | if (!err && extend_disksize && inode->i_size > ei->i_disksize) | ||
920 | ei->i_disksize = inode->i_size; | ||
921 | mutex_unlock(&ei->truncate_mutex); | 914 | mutex_unlock(&ei->truncate_mutex); |
922 | if (err) | 915 | if (err) |
923 | goto cleanup; | 916 | goto cleanup; |
@@ -972,7 +965,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock, | |||
972 | } | 965 | } |
973 | 966 | ||
974 | ret = ext3_get_blocks_handle(handle, inode, iblock, | 967 | ret = ext3_get_blocks_handle(handle, inode, iblock, |
975 | max_blocks, bh_result, create, 0); | 968 | max_blocks, bh_result, create); |
976 | if (ret > 0) { | 969 | if (ret > 0) { |
977 | bh_result->b_size = (ret << inode->i_blkbits); | 970 | bh_result->b_size = (ret << inode->i_blkbits); |
978 | ret = 0; | 971 | ret = 0; |
@@ -1005,7 +998,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, | |||
1005 | dummy.b_blocknr = -1000; | 998 | dummy.b_blocknr = -1000; |
1006 | buffer_trace_init(&dummy.b_history); | 999 | buffer_trace_init(&dummy.b_history); |
1007 | err = ext3_get_blocks_handle(handle, inode, block, 1, | 1000 | err = ext3_get_blocks_handle(handle, inode, block, 1, |
1008 | &dummy, create, 1); | 1001 | &dummy, create); |
1009 | /* | 1002 | /* |
1010 | * ext3_get_blocks_handle() returns number of blocks | 1003 | * ext3_get_blocks_handle() returns number of blocks |
1011 | * mapped. 0 in case of a HOLE. | 1004 | * mapped. 0 in case of a HOLE. |
@@ -1193,15 +1186,16 @@ write_begin_failed: | |||
1193 | * i_size_read because we hold i_mutex. | 1186 | * i_size_read because we hold i_mutex. |
1194 | * | 1187 | * |
1195 | * Add inode to orphan list in case we crash before truncate | 1188 | * Add inode to orphan list in case we crash before truncate |
1196 | * finishes. | 1189 | * finishes. Do this only if ext3_can_truncate() agrees so |
1190 | * that orphan processing code is happy. | ||
1197 | */ | 1191 | */ |
1198 | if (pos + len > inode->i_size) | 1192 | if (pos + len > inode->i_size && ext3_can_truncate(inode)) |
1199 | ext3_orphan_add(handle, inode); | 1193 | ext3_orphan_add(handle, inode); |
1200 | ext3_journal_stop(handle); | 1194 | ext3_journal_stop(handle); |
1201 | unlock_page(page); | 1195 | unlock_page(page); |
1202 | page_cache_release(page); | 1196 | page_cache_release(page); |
1203 | if (pos + len > inode->i_size) | 1197 | if (pos + len > inode->i_size) |
1204 | vmtruncate(inode, inode->i_size); | 1198 | ext3_truncate(inode); |
1205 | } | 1199 | } |
1206 | if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) | 1200 | if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) |
1207 | goto retry; | 1201 | goto retry; |
@@ -1287,7 +1281,7 @@ static int ext3_ordered_write_end(struct file *file, | |||
1287 | * There may be allocated blocks outside of i_size because | 1281 | * There may be allocated blocks outside of i_size because |
1288 | * we failed to copy some data. Prepare for truncate. | 1282 | * we failed to copy some data. Prepare for truncate. |
1289 | */ | 1283 | */ |
1290 | if (pos + len > inode->i_size) | 1284 | if (pos + len > inode->i_size && ext3_can_truncate(inode)) |
1291 | ext3_orphan_add(handle, inode); | 1285 | ext3_orphan_add(handle, inode); |
1292 | ret2 = ext3_journal_stop(handle); | 1286 | ret2 = ext3_journal_stop(handle); |
1293 | if (!ret) | 1287 | if (!ret) |
@@ -1296,7 +1290,7 @@ static int ext3_ordered_write_end(struct file *file, | |||
1296 | page_cache_release(page); | 1290 | page_cache_release(page); |
1297 | 1291 | ||
1298 | if (pos + len > inode->i_size) | 1292 | if (pos + len > inode->i_size) |
1299 | vmtruncate(inode, inode->i_size); | 1293 | ext3_truncate(inode); |
1300 | return ret ? ret : copied; | 1294 | return ret ? ret : copied; |
1301 | } | 1295 | } |
1302 | 1296 | ||
@@ -1315,14 +1309,14 @@ static int ext3_writeback_write_end(struct file *file, | |||
1315 | * There may be allocated blocks outside of i_size because | 1309 | * There may be allocated blocks outside of i_size because |
1316 | * we failed to copy some data. Prepare for truncate. | 1310 | * we failed to copy some data. Prepare for truncate. |
1317 | */ | 1311 | */ |
1318 | if (pos + len > inode->i_size) | 1312 | if (pos + len > inode->i_size && ext3_can_truncate(inode)) |
1319 | ext3_orphan_add(handle, inode); | 1313 | ext3_orphan_add(handle, inode); |
1320 | ret = ext3_journal_stop(handle); | 1314 | ret = ext3_journal_stop(handle); |
1321 | unlock_page(page); | 1315 | unlock_page(page); |
1322 | page_cache_release(page); | 1316 | page_cache_release(page); |
1323 | 1317 | ||
1324 | if (pos + len > inode->i_size) | 1318 | if (pos + len > inode->i_size) |
1325 | vmtruncate(inode, inode->i_size); | 1319 | ext3_truncate(inode); |
1326 | return ret ? ret : copied; | 1320 | return ret ? ret : copied; |
1327 | } | 1321 | } |
1328 | 1322 | ||
@@ -1358,7 +1352,7 @@ static int ext3_journalled_write_end(struct file *file, | |||
1358 | * There may be allocated blocks outside of i_size because | 1352 | * There may be allocated blocks outside of i_size because |
1359 | * we failed to copy some data. Prepare for truncate. | 1353 | * we failed to copy some data. Prepare for truncate. |
1360 | */ | 1354 | */ |
1361 | if (pos + len > inode->i_size) | 1355 | if (pos + len > inode->i_size && ext3_can_truncate(inode)) |
1362 | ext3_orphan_add(handle, inode); | 1356 | ext3_orphan_add(handle, inode); |
1363 | EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; | 1357 | EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; |
1364 | if (inode->i_size > EXT3_I(inode)->i_disksize) { | 1358 | if (inode->i_size > EXT3_I(inode)->i_disksize) { |
@@ -1375,7 +1369,7 @@ static int ext3_journalled_write_end(struct file *file, | |||
1375 | page_cache_release(page); | 1369 | page_cache_release(page); |
1376 | 1370 | ||
1377 | if (pos + len > inode->i_size) | 1371 | if (pos + len > inode->i_size) |
1378 | vmtruncate(inode, inode->i_size); | 1372 | ext3_truncate(inode); |
1379 | return ret ? ret : copied; | 1373 | return ret ? ret : copied; |
1380 | } | 1374 | } |
1381 | 1375 | ||
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 737f7246a4b5..f96f85092d1c 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -287,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction, | |||
287 | struct page *new_page; | 287 | struct page *new_page; |
288 | unsigned int new_offset; | 288 | unsigned int new_offset; |
289 | struct buffer_head *bh_in = jh2bh(jh_in); | 289 | struct buffer_head *bh_in = jh2bh(jh_in); |
290 | journal_t *journal = transaction->t_journal; | ||
290 | 291 | ||
291 | /* | 292 | /* |
292 | * The buffer really shouldn't be locked: only the current committing | 293 | * The buffer really shouldn't be locked: only the current committing |
@@ -300,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction, | |||
300 | J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); | 301 | J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); |
301 | 302 | ||
302 | new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); | 303 | new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); |
304 | /* keep subsequent assertions sane */ | ||
305 | new_bh->b_state = 0; | ||
306 | init_buffer(new_bh, NULL, NULL); | ||
307 | atomic_set(&new_bh->b_count, 1); | ||
308 | new_jh = journal_add_journal_head(new_bh); /* This sleeps */ | ||
303 | 309 | ||
304 | /* | 310 | /* |
305 | * If a new transaction has already done a buffer copy-out, then | 311 | * If a new transaction has already done a buffer copy-out, then |
@@ -361,14 +367,6 @@ repeat: | |||
361 | kunmap_atomic(mapped_data, KM_USER0); | 367 | kunmap_atomic(mapped_data, KM_USER0); |
362 | } | 368 | } |
363 | 369 | ||
364 | /* keep subsequent assertions sane */ | ||
365 | new_bh->b_state = 0; | ||
366 | init_buffer(new_bh, NULL, NULL); | ||
367 | atomic_set(&new_bh->b_count, 1); | ||
368 | jbd_unlock_bh_state(bh_in); | ||
369 | |||
370 | new_jh = journal_add_journal_head(new_bh); /* This sleeps */ | ||
371 | |||
372 | set_bh_page(new_bh, new_page, new_offset); | 370 | set_bh_page(new_bh, new_page, new_offset); |
373 | new_jh->b_transaction = NULL; | 371 | new_jh->b_transaction = NULL; |
374 | new_bh->b_size = jh2bh(jh_in)->b_size; | 372 | new_bh->b_size = jh2bh(jh_in)->b_size; |
@@ -385,7 +383,11 @@ repeat: | |||
385 | * copying is moved to the transaction's shadow queue. | 383 | * copying is moved to the transaction's shadow queue. |
386 | */ | 384 | */ |
387 | JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); | 385 | JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); |
388 | journal_file_buffer(jh_in, transaction, BJ_Shadow); | 386 | spin_lock(&journal->j_list_lock); |
387 | __journal_file_buffer(jh_in, transaction, BJ_Shadow); | ||
388 | spin_unlock(&journal->j_list_lock); | ||
389 | jbd_unlock_bh_state(bh_in); | ||
390 | |||
389 | JBUFFER_TRACE(new_jh, "file as BJ_IO"); | 391 | JBUFFER_TRACE(new_jh, "file as BJ_IO"); |
390 | journal_file_buffer(new_jh, transaction, BJ_IO); | 392 | journal_file_buffer(new_jh, transaction, BJ_IO); |
391 | 393 | ||
@@ -848,6 +850,12 @@ static int journal_reset(journal_t *journal) | |||
848 | 850 | ||
849 | first = be32_to_cpu(sb->s_first); | 851 | first = be32_to_cpu(sb->s_first); |
850 | last = be32_to_cpu(sb->s_maxlen); | 852 | last = be32_to_cpu(sb->s_maxlen); |
853 | if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) { | ||
854 | printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n", | ||
855 | first, last); | ||
856 | journal_fail_superblock(journal); | ||
857 | return -EINVAL; | ||
858 | } | ||
851 | 859 | ||
852 | journal->j_first = first; | 860 | journal->j_first = first; |
853 | journal->j_last = last; | 861 | journal->j_last = last; |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 73242ba7c7b1..c03ac11f74be 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -489,34 +489,15 @@ void journal_unlock_updates (journal_t *journal) | |||
489 | wake_up(&journal->j_wait_transaction_locked); | 489 | wake_up(&journal->j_wait_transaction_locked); |
490 | } | 490 | } |
491 | 491 | ||
492 | /* | 492 | static void warn_dirty_buffer(struct buffer_head *bh) |
493 | * Report any unexpected dirty buffers which turn up. Normally those | ||
494 | * indicate an error, but they can occur if the user is running (say) | ||
495 | * tune2fs to modify the live filesystem, so we need the option of | ||
496 | * continuing as gracefully as possible. # | ||
497 | * | ||
498 | * The caller should already hold the journal lock and | ||
499 | * j_list_lock spinlock: most callers will need those anyway | ||
500 | * in order to probe the buffer's journaling state safely. | ||
501 | */ | ||
502 | static void jbd_unexpected_dirty_buffer(struct journal_head *jh) | ||
503 | { | 493 | { |
504 | int jlist; | 494 | char b[BDEVNAME_SIZE]; |
505 | |||
506 | /* If this buffer is one which might reasonably be dirty | ||
507 | * --- ie. data, or not part of this journal --- then | ||
508 | * we're OK to leave it alone, but otherwise we need to | ||
509 | * move the dirty bit to the journal's own internal | ||
510 | * JBDDirty bit. */ | ||
511 | jlist = jh->b_jlist; | ||
512 | 495 | ||
513 | if (jlist == BJ_Metadata || jlist == BJ_Reserved || | 496 | printk(KERN_WARNING |
514 | jlist == BJ_Shadow || jlist == BJ_Forget) { | 497 | "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). " |
515 | struct buffer_head *bh = jh2bh(jh); | 498 | "There's a risk of filesystem corruption in case of system " |
516 | 499 | "crash.\n", | |
517 | if (test_clear_buffer_dirty(bh)) | 500 | bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); |
518 | set_buffer_jbddirty(bh); | ||
519 | } | ||
520 | } | 501 | } |
521 | 502 | ||
522 | /* | 503 | /* |
@@ -583,14 +564,16 @@ repeat: | |||
583 | if (jh->b_next_transaction) | 564 | if (jh->b_next_transaction) |
584 | J_ASSERT_JH(jh, jh->b_next_transaction == | 565 | J_ASSERT_JH(jh, jh->b_next_transaction == |
585 | transaction); | 566 | transaction); |
567 | warn_dirty_buffer(bh); | ||
586 | } | 568 | } |
587 | /* | 569 | /* |
588 | * In any case we need to clean the dirty flag and we must | 570 | * In any case we need to clean the dirty flag and we must |
589 | * do it under the buffer lock to be sure we don't race | 571 | * do it under the buffer lock to be sure we don't race |
590 | * with running write-out. | 572 | * with running write-out. |
591 | */ | 573 | */ |
592 | JBUFFER_TRACE(jh, "Unexpected dirty buffer"); | 574 | JBUFFER_TRACE(jh, "Journalling dirty buffer"); |
593 | jbd_unexpected_dirty_buffer(jh); | 575 | clear_buffer_dirty(bh); |
576 | set_buffer_jbddirty(bh); | ||
594 | } | 577 | } |
595 | 578 | ||
596 | unlock_buffer(bh); | 579 | unlock_buffer(bh); |
@@ -826,6 +809,15 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh) | |||
826 | J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); | 809 | J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); |
827 | 810 | ||
828 | if (jh->b_transaction == NULL) { | 811 | if (jh->b_transaction == NULL) { |
812 | /* | ||
813 | * Previous journal_forget() could have left the buffer | ||
814 | * with jbddirty bit set because it was being committed. When | ||
815 | * the commit finished, we've filed the buffer for | ||
816 | * checkpointing and marked it dirty. Now we are reallocating | ||
817 | * the buffer so the transaction freeing it must have | ||
818 | * committed and so it's safe to clear the dirty bit. | ||
819 | */ | ||
820 | clear_buffer_dirty(jh2bh(jh)); | ||
829 | jh->b_transaction = transaction; | 821 | jh->b_transaction = transaction; |
830 | 822 | ||
831 | /* first access by this transaction */ | 823 | /* first access by this transaction */ |
@@ -1782,8 +1774,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) | |||
1782 | 1774 | ||
1783 | if (jh->b_cp_transaction) { | 1775 | if (jh->b_cp_transaction) { |
1784 | JBUFFER_TRACE(jh, "on running+cp transaction"); | 1776 | JBUFFER_TRACE(jh, "on running+cp transaction"); |
1777 | /* | ||
1778 | * We don't want to write the buffer anymore, clear the | ||
1779 | * bit so that we don't confuse checks in | ||
1780 | * __journal_file_buffer | ||
1781 | */ | ||
1782 | clear_buffer_dirty(bh); | ||
1785 | __journal_file_buffer(jh, transaction, BJ_Forget); | 1783 | __journal_file_buffer(jh, transaction, BJ_Forget); |
1786 | clear_buffer_jbddirty(bh); | ||
1787 | may_free = 0; | 1784 | may_free = 0; |
1788 | } else { | 1785 | } else { |
1789 | JBUFFER_TRACE(jh, "on running transaction"); | 1786 | JBUFFER_TRACE(jh, "on running transaction"); |
@@ -2041,12 +2038,17 @@ void __journal_file_buffer(struct journal_head *jh, | |||
2041 | if (jh->b_transaction && jh->b_jlist == jlist) | 2038 | if (jh->b_transaction && jh->b_jlist == jlist) |
2042 | return; | 2039 | return; |
2043 | 2040 | ||
2044 | /* The following list of buffer states needs to be consistent | ||
2045 | * with __jbd_unexpected_dirty_buffer()'s handling of dirty | ||
2046 | * state. */ | ||
2047 | |||
2048 | if (jlist == BJ_Metadata || jlist == BJ_Reserved || | 2041 | if (jlist == BJ_Metadata || jlist == BJ_Reserved || |
2049 | jlist == BJ_Shadow || jlist == BJ_Forget) { | 2042 | jlist == BJ_Shadow || jlist == BJ_Forget) { |
2043 | /* | ||
2044 | * For metadata buffers, we track dirty bit in buffer_jbddirty | ||
2045 | * instead of buffer_dirty. We should not see a dirty bit set | ||
2046 | * here because we clear it in do_get_write_access but e.g. | ||
2047 | * tune2fs can modify the sb and set the dirty bit at any time | ||
2048 | * so we try to gracefully handle that. | ||
2049 | */ | ||
2050 | if (buffer_dirty(bh)) | ||
2051 | warn_dirty_buffer(bh); | ||
2050 | if (test_clear_buffer_dirty(bh) || | 2052 | if (test_clear_buffer_dirty(bh) || |
2051 | test_clear_buffer_jbddirty(bh)) | 2053 | test_clear_buffer_jbddirty(bh)) |
2052 | was_dirty = 1; | 2054 | was_dirty = 1; |
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 91fa3ad6e8c2..a29c7c3e3fb8 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c | |||
@@ -67,10 +67,8 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type) | |||
67 | acl = posix_acl_from_xattr(value, size); | 67 | acl = posix_acl_from_xattr(value, size); |
68 | } | 68 | } |
69 | kfree(value); | 69 | kfree(value); |
70 | if (!IS_ERR(acl)) { | 70 | if (!IS_ERR(acl)) |
71 | set_cached_acl(inode, type, acl); | 71 | set_cached_acl(inode, type, acl); |
72 | posix_acl_release(acl); | ||
73 | } | ||
74 | return acl; | 72 | return acl; |
75 | } | 73 | } |
76 | 74 | ||
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig index 31dac7e3b0f1..dffbb0911d02 100644 --- a/fs/notify/Kconfig +++ b/fs/notify/Kconfig | |||
@@ -1,15 +1,5 @@ | |||
1 | config FSNOTIFY | 1 | config FSNOTIFY |
2 | bool "Filesystem notification backend" | 2 | def_bool n |
3 | default y | ||
4 | ---help--- | ||
5 | fsnotify is a backend for filesystem notification. fsnotify does | ||
6 | not provide any userspace interface but does provide the basis | ||
7 | needed for other notification schemes such as dnotify, inotify, | ||
8 | and fanotify. | ||
9 | |||
10 | Say Y here to enable fsnotify suport. | ||
11 | |||
12 | If unsure, say Y. | ||
13 | 3 | ||
14 | source "fs/notify/dnotify/Kconfig" | 4 | source "fs/notify/dnotify/Kconfig" |
15 | source "fs/notify/inotify/Kconfig" | 5 | source "fs/notify/inotify/Kconfig" |
diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig index 904ff8d5405a..f9c1ca139d8f 100644 --- a/fs/notify/dnotify/Kconfig +++ b/fs/notify/dnotify/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DNOTIFY | 1 | config DNOTIFY |
2 | bool "Dnotify support" | 2 | bool "Dnotify support" |
3 | depends on FSNOTIFY | 3 | select FSNOTIFY |
4 | default y | 4 | default y |
5 | help | 5 | help |
6 | Dnotify is a directory-based per-fd file change notification system | 6 | Dnotify is a directory-based per-fd file change notification system |
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index ec2f7bd76818..037e878e03fc 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c | |||
@@ -159,7 +159,9 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const | |||
159 | if (!group->ops->should_send_event(group, to_tell, mask)) | 159 | if (!group->ops->should_send_event(group, to_tell, mask)) |
160 | continue; | 160 | continue; |
161 | if (!event) { | 161 | if (!event) { |
162 | event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie); | 162 | event = fsnotify_create_event(to_tell, mask, data, |
163 | data_is, file_name, cookie, | ||
164 | GFP_KERNEL); | ||
163 | /* shit, we OOM'd and now we can't tell, maybe | 165 | /* shit, we OOM'd and now we can't tell, maybe |
164 | * someday someone else will want to do something | 166 | * someday someone else will want to do something |
165 | * here */ | 167 | * here */ |
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig index 5356884289a1..3e56dbffe729 100644 --- a/fs/notify/inotify/Kconfig +++ b/fs/notify/inotify/Kconfig | |||
@@ -15,7 +15,7 @@ config INOTIFY | |||
15 | 15 | ||
16 | config INOTIFY_USER | 16 | config INOTIFY_USER |
17 | bool "Inotify support for userspace" | 17 | bool "Inotify support for userspace" |
18 | depends on FSNOTIFY | 18 | select FSNOTIFY |
19 | default y | 19 | default y |
20 | ---help--- | 20 | ---help--- |
21 | Say Y here to enable inotify support for userspace, including the | 21 | Say Y here to enable inotify support for userspace, including the |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index ff27a2965844..f30d9bbc2e1b 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -57,7 +57,6 @@ int inotify_max_user_watches __read_mostly; | |||
57 | 57 | ||
58 | static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; | 58 | static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; |
59 | struct kmem_cache *event_priv_cachep __read_mostly; | 59 | struct kmem_cache *event_priv_cachep __read_mostly; |
60 | static struct fsnotify_event *inotify_ignored_event; | ||
61 | 60 | ||
62 | /* | 61 | /* |
63 | * When inotify registers a new group it increments this and uses that | 62 | * When inotify registers a new group it increments this and uses that |
@@ -365,6 +364,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns | |||
365 | return error; | 364 | return error; |
366 | } | 365 | } |
367 | 366 | ||
367 | static void inotify_remove_from_idr(struct fsnotify_group *group, | ||
368 | struct inotify_inode_mark_entry *ientry) | ||
369 | { | ||
370 | struct idr *idr; | ||
371 | |||
372 | spin_lock(&group->inotify_data.idr_lock); | ||
373 | idr = &group->inotify_data.idr; | ||
374 | idr_remove(idr, ientry->wd); | ||
375 | spin_unlock(&group->inotify_data.idr_lock); | ||
376 | ientry->wd = -1; | ||
377 | } | ||
368 | /* | 378 | /* |
369 | * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the | 379 | * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the |
370 | * internal reference help on the mark because it is in the idr. | 380 | * internal reference help on the mark because it is in the idr. |
@@ -373,13 +383,19 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | |||
373 | struct fsnotify_group *group) | 383 | struct fsnotify_group *group) |
374 | { | 384 | { |
375 | struct inotify_inode_mark_entry *ientry; | 385 | struct inotify_inode_mark_entry *ientry; |
386 | struct fsnotify_event *ignored_event; | ||
376 | struct inotify_event_private_data *event_priv; | 387 | struct inotify_event_private_data *event_priv; |
377 | struct fsnotify_event_private_data *fsn_event_priv; | 388 | struct fsnotify_event_private_data *fsn_event_priv; |
378 | struct idr *idr; | 389 | |
390 | ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, | ||
391 | FSNOTIFY_EVENT_NONE, NULL, 0, | ||
392 | GFP_NOFS); | ||
393 | if (!ignored_event) | ||
394 | return; | ||
379 | 395 | ||
380 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | 396 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); |
381 | 397 | ||
382 | event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); | 398 | event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); |
383 | if (unlikely(!event_priv)) | 399 | if (unlikely(!event_priv)) |
384 | goto skip_send_ignore; | 400 | goto skip_send_ignore; |
385 | 401 | ||
@@ -388,7 +404,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | |||
388 | fsn_event_priv->group = group; | 404 | fsn_event_priv->group = group; |
389 | event_priv->wd = ientry->wd; | 405 | event_priv->wd = ientry->wd; |
390 | 406 | ||
391 | fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv); | 407 | fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); |
392 | 408 | ||
393 | /* did the private data get added? */ | 409 | /* did the private data get added? */ |
394 | if (list_empty(&fsn_event_priv->event_list)) | 410 | if (list_empty(&fsn_event_priv->event_list)) |
@@ -396,14 +412,16 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | |||
396 | 412 | ||
397 | skip_send_ignore: | 413 | skip_send_ignore: |
398 | 414 | ||
415 | /* matches the reference taken when the event was created */ | ||
416 | fsnotify_put_event(ignored_event); | ||
417 | |||
399 | /* remove this entry from the idr */ | 418 | /* remove this entry from the idr */ |
400 | spin_lock(&group->inotify_data.idr_lock); | 419 | inotify_remove_from_idr(group, ientry); |
401 | idr = &group->inotify_data.idr; | ||
402 | idr_remove(idr, ientry->wd); | ||
403 | spin_unlock(&group->inotify_data.idr_lock); | ||
404 | 420 | ||
405 | /* removed from idr, drop that reference */ | 421 | /* removed from idr, drop that reference */ |
406 | fsnotify_put_mark(entry); | 422 | fsnotify_put_mark(entry); |
423 | |||
424 | atomic_dec(&group->inotify_data.user->inotify_watches); | ||
407 | } | 425 | } |
408 | 426 | ||
409 | /* ding dong the mark is dead */ | 427 | /* ding dong the mark is dead */ |
@@ -418,6 +436,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod | |||
418 | { | 436 | { |
419 | struct fsnotify_mark_entry *entry = NULL; | 437 | struct fsnotify_mark_entry *entry = NULL; |
420 | struct inotify_inode_mark_entry *ientry; | 438 | struct inotify_inode_mark_entry *ientry; |
439 | struct inotify_inode_mark_entry *tmp_ientry; | ||
421 | int ret = 0; | 440 | int ret = 0; |
422 | int add = (arg & IN_MASK_ADD); | 441 | int add = (arg & IN_MASK_ADD); |
423 | __u32 mask; | 442 | __u32 mask; |
@@ -428,54 +447,66 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod | |||
428 | if (unlikely(!mask)) | 447 | if (unlikely(!mask)) |
429 | return -EINVAL; | 448 | return -EINVAL; |
430 | 449 | ||
431 | ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); | 450 | tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); |
432 | if (unlikely(!ientry)) | 451 | if (unlikely(!tmp_ientry)) |
433 | return -ENOMEM; | 452 | return -ENOMEM; |
434 | /* we set the mask at the end after attaching it */ | 453 | /* we set the mask at the end after attaching it */ |
435 | fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark); | 454 | fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); |
436 | ientry->wd = 0; | 455 | tmp_ientry->wd = -1; |
437 | 456 | ||
438 | find_entry: | 457 | find_entry: |
439 | spin_lock(&inode->i_lock); | 458 | spin_lock(&inode->i_lock); |
440 | entry = fsnotify_find_mark_entry(group, inode); | 459 | entry = fsnotify_find_mark_entry(group, inode); |
441 | spin_unlock(&inode->i_lock); | 460 | spin_unlock(&inode->i_lock); |
442 | if (entry) { | 461 | if (entry) { |
443 | kmem_cache_free(inotify_inode_mark_cachep, ientry); | ||
444 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | 462 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); |
445 | } else { | 463 | } else { |
446 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) { | 464 | ret = -ENOSPC; |
447 | ret = -ENOSPC; | 465 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) |
448 | goto out_err; | ||
449 | } | ||
450 | |||
451 | ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode); | ||
452 | if (ret == -EEXIST) | ||
453 | goto find_entry; | ||
454 | else if (ret) | ||
455 | goto out_err; | 466 | goto out_err; |
456 | |||
457 | entry = &ientry->fsn_entry; | ||
458 | retry: | 467 | retry: |
459 | ret = -ENOMEM; | 468 | ret = -ENOMEM; |
460 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | 469 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) |
461 | goto out_err; | 470 | goto out_err; |
462 | 471 | ||
463 | spin_lock(&group->inotify_data.idr_lock); | 472 | spin_lock(&group->inotify_data.idr_lock); |
464 | /* if entry is added to the idr we keep the reference obtained | 473 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, |
465 | * through fsnotify_mark_add. remember to drop this reference | 474 | group->inotify_data.last_wd, |
466 | * when entry is removed from idr */ | 475 | &tmp_ientry->wd); |
467 | ret = idr_get_new_above(&group->inotify_data.idr, entry, | ||
468 | ++group->inotify_data.last_wd, | ||
469 | &ientry->wd); | ||
470 | spin_unlock(&group->inotify_data.idr_lock); | 476 | spin_unlock(&group->inotify_data.idr_lock); |
471 | if (ret) { | 477 | if (ret) { |
472 | if (ret == -EAGAIN) | 478 | if (ret == -EAGAIN) |
473 | goto retry; | 479 | goto retry; |
474 | goto out_err; | 480 | goto out_err; |
475 | } | 481 | } |
482 | |||
483 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | ||
484 | if (ret) { | ||
485 | inotify_remove_from_idr(group, tmp_ientry); | ||
486 | if (ret == -EEXIST) | ||
487 | goto find_entry; | ||
488 | goto out_err; | ||
489 | } | ||
490 | |||
491 | /* tmp_ientry has been added to the inode, so we are all set up. | ||
492 | * now we just need to make sure tmp_ientry doesn't get freed and | ||
493 | * we need to set up entry and ientry so the generic code can | ||
494 | * do its thing. */ | ||
495 | ientry = tmp_ientry; | ||
496 | entry = &ientry->fsn_entry; | ||
497 | tmp_ientry = NULL; | ||
498 | |||
476 | atomic_inc(&group->inotify_data.user->inotify_watches); | 499 | atomic_inc(&group->inotify_data.user->inotify_watches); |
500 | |||
501 | /* update the idr hint */ | ||
502 | group->inotify_data.last_wd = ientry->wd; | ||
503 | |||
504 | /* we put the mark on the idr, take a reference */ | ||
505 | fsnotify_get_mark(entry); | ||
477 | } | 506 | } |
478 | 507 | ||
508 | ret = ientry->wd; | ||
509 | |||
479 | spin_lock(&entry->lock); | 510 | spin_lock(&entry->lock); |
480 | 511 | ||
481 | old_mask = entry->mask; | 512 | old_mask = entry->mask; |
@@ -506,14 +537,19 @@ retry: | |||
506 | fsnotify_recalc_group_mask(group); | 537 | fsnotify_recalc_group_mask(group); |
507 | } | 538 | } |
508 | 539 | ||
509 | return ientry->wd; | 540 | /* this either matches fsnotify_find_mark_entry, or init_mark_entry |
541 | * depending on which path we took... */ | ||
542 | fsnotify_put_mark(entry); | ||
510 | 543 | ||
511 | out_err: | 544 | out_err: |
512 | /* see this isn't supposed to happen, just kill the watch */ | 545 | /* could be an error, could be that we found an existing mark */ |
513 | if (entry) { | 546 | if (tmp_ientry) { |
514 | fsnotify_destroy_mark_by_entry(entry); | 547 | /* on the idr but didn't make it on the inode */ |
515 | fsnotify_put_mark(entry); | 548 | if (tmp_ientry->wd != -1) |
549 | inotify_remove_from_idr(group, tmp_ientry); | ||
550 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); | ||
516 | } | 551 | } |
552 | |||
517 | return ret; | 553 | return ret; |
518 | } | 554 | } |
519 | 555 | ||
@@ -721,9 +757,6 @@ static int __init inotify_user_setup(void) | |||
721 | 757 | ||
722 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); | 758 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); |
723 | event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); | 759 | event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); |
724 | inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0); | ||
725 | if (!inotify_ignored_event) | ||
726 | panic("unable to allocate the inotify ignored event\n"); | ||
727 | 760 | ||
728 | inotify_max_queued_events = 16384; | 761 | inotify_max_queued_events = 16384; |
729 | inotify_max_user_instances = 128; | 762 | inotify_max_user_instances = 128; |
diff --git a/fs/notify/notification.c b/fs/notify/notification.c index 959b73e756fd..521368574e97 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c | |||
@@ -136,18 +136,24 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new | |||
136 | { | 136 | { |
137 | if ((old->mask == new->mask) && | 137 | if ((old->mask == new->mask) && |
138 | (old->to_tell == new->to_tell) && | 138 | (old->to_tell == new->to_tell) && |
139 | (old->data_type == new->data_type)) { | 139 | (old->data_type == new->data_type) && |
140 | (old->name_len == new->name_len)) { | ||
140 | switch (old->data_type) { | 141 | switch (old->data_type) { |
141 | case (FSNOTIFY_EVENT_INODE): | 142 | case (FSNOTIFY_EVENT_INODE): |
142 | if (old->inode == new->inode) | 143 | /* remember, after old was put on the wait_q we aren't |
144 | * allowed to look at the inode any more, only thing | ||
145 | * left to check was if the file_name is the same */ | ||
146 | if (old->name_len && | ||
147 | !strcmp(old->file_name, new->file_name)) | ||
143 | return true; | 148 | return true; |
144 | break; | 149 | break; |
145 | case (FSNOTIFY_EVENT_PATH): | 150 | case (FSNOTIFY_EVENT_PATH): |
146 | if ((old->path.mnt == new->path.mnt) && | 151 | if ((old->path.mnt == new->path.mnt) && |
147 | (old->path.dentry == new->path.dentry)) | 152 | (old->path.dentry == new->path.dentry)) |
148 | return true; | 153 | return true; |
154 | break; | ||
149 | case (FSNOTIFY_EVENT_NONE): | 155 | case (FSNOTIFY_EVENT_NONE): |
150 | return true; | 156 | return false; |
151 | }; | 157 | }; |
152 | } | 158 | } |
153 | return false; | 159 | return false; |
@@ -339,18 +345,19 @@ static void initialize_event(struct fsnotify_event *event) | |||
339 | * @name the filename, if available | 345 | * @name the filename, if available |
340 | */ | 346 | */ |
341 | struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, | 347 | struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, |
342 | int data_type, const char *name, u32 cookie) | 348 | int data_type, const char *name, u32 cookie, |
349 | gfp_t gfp) | ||
343 | { | 350 | { |
344 | struct fsnotify_event *event; | 351 | struct fsnotify_event *event; |
345 | 352 | ||
346 | event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); | 353 | event = kmem_cache_alloc(fsnotify_event_cachep, gfp); |
347 | if (!event) | 354 | if (!event) |
348 | return NULL; | 355 | return NULL; |
349 | 356 | ||
350 | initialize_event(event); | 357 | initialize_event(event); |
351 | 358 | ||
352 | if (name) { | 359 | if (name) { |
353 | event->file_name = kstrdup(name, GFP_KERNEL); | 360 | event->file_name = kstrdup(name, gfp); |
354 | if (!event->file_name) { | 361 | if (!event->file_name) { |
355 | kmem_cache_free(fsnotify_event_cachep, event); | 362 | kmem_cache_free(fsnotify_event_cachep, event); |
356 | return NULL; | 363 | return NULL; |
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index d88d0fac9fa5..14f2d71ea3ce 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c | |||
@@ -939,8 +939,10 @@ again: | |||
939 | /* Remove from old parent's list and insert into new parent's list. */ | 939 | /* Remove from old parent's list and insert into new parent's list. */ |
940 | sysfs_unlink_sibling(sd); | 940 | sysfs_unlink_sibling(sd); |
941 | sysfs_get(new_parent_sd); | 941 | sysfs_get(new_parent_sd); |
942 | drop_nlink(old_parent->d_inode); | ||
942 | sysfs_put(sd->s_parent); | 943 | sysfs_put(sd->s_parent); |
943 | sd->s_parent = new_parent_sd; | 944 | sd->s_parent = new_parent_sd; |
945 | inc_nlink(new_parent->d_inode); | ||
944 | sysfs_link_sibling(sd); | 946 | sysfs_link_sibling(sd); |
945 | 947 | ||
946 | out_unlock: | 948 | out_unlock: |
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h index 9d40e879f99e..77ff547730af 100644 --- a/include/asm-generic/4level-fixup.h +++ b/include/asm-generic/4level-fixup.h | |||
@@ -27,9 +27,9 @@ | |||
27 | #define pud_page_vaddr(pud) pgd_page_vaddr(pud) | 27 | #define pud_page_vaddr(pud) pgd_page_vaddr(pud) |
28 | 28 | ||
29 | #undef pud_free_tlb | 29 | #undef pud_free_tlb |
30 | #define pud_free_tlb(tlb, x) do { } while (0) | 30 | #define pud_free_tlb(tlb, x, addr) do { } while (0) |
31 | #define pud_free(mm, x) do { } while (0) | 31 | #define pud_free(mm, x) do { } while (0) |
32 | #define __pud_free_tlb(tlb, x) do { } while (0) | 32 | #define __pud_free_tlb(tlb, x, addr) do { } while (0) |
33 | 33 | ||
34 | #undef pud_addr_end | 34 | #undef pud_addr_end |
35 | #define pud_addr_end(addr, end) (end) | 35 | #define pud_addr_end(addr, end) (end) |
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index a7cdc48e8b78..725612b793ce 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h | |||
@@ -59,7 +59,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) | |||
59 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 59 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
60 | { | 60 | { |
61 | } | 61 | } |
62 | #define __pmd_free_tlb(tlb, x) do { } while (0) | 62 | #define __pmd_free_tlb(tlb, x, a) do { } while (0) |
63 | 63 | ||
64 | #undef pmd_addr_end | 64 | #undef pmd_addr_end |
65 | #define pmd_addr_end(addr, end) (end) | 65 | #define pmd_addr_end(addr, end) (end) |
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index 87cf449a6df3..810431d8351b 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h | |||
@@ -52,7 +52,7 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) | |||
52 | */ | 52 | */ |
53 | #define pud_alloc_one(mm, address) NULL | 53 | #define pud_alloc_one(mm, address) NULL |
54 | #define pud_free(mm, x) do { } while (0) | 54 | #define pud_free(mm, x) do { } while (0) |
55 | #define __pud_free_tlb(tlb, x) do { } while (0) | 55 | #define __pud_free_tlb(tlb, x, a) do { } while (0) |
56 | 56 | ||
57 | #undef pud_addr_end | 57 | #undef pud_addr_end |
58 | #define pud_addr_end(addr, end) (end) | 58 | #define pud_addr_end(addr, end) (end) |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index f490e43a90b9..e43f9766259f 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -123,24 +123,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
123 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | 123 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
124 | } while (0) | 124 | } while (0) |
125 | 125 | ||
126 | #define pte_free_tlb(tlb, ptep) \ | 126 | #define pte_free_tlb(tlb, ptep, address) \ |
127 | do { \ | 127 | do { \ |
128 | tlb->need_flush = 1; \ | 128 | tlb->need_flush = 1; \ |
129 | __pte_free_tlb(tlb, ptep); \ | 129 | __pte_free_tlb(tlb, ptep, address); \ |
130 | } while (0) | 130 | } while (0) |
131 | 131 | ||
132 | #ifndef __ARCH_HAS_4LEVEL_HACK | 132 | #ifndef __ARCH_HAS_4LEVEL_HACK |
133 | #define pud_free_tlb(tlb, pudp) \ | 133 | #define pud_free_tlb(tlb, pudp, address) \ |
134 | do { \ | 134 | do { \ |
135 | tlb->need_flush = 1; \ | 135 | tlb->need_flush = 1; \ |
136 | __pud_free_tlb(tlb, pudp); \ | 136 | __pud_free_tlb(tlb, pudp, address); \ |
137 | } while (0) | 137 | } while (0) |
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | #define pmd_free_tlb(tlb, pmdp) \ | 140 | #define pmd_free_tlb(tlb, pmdp, address) \ |
141 | do { \ | 141 | do { \ |
142 | tlb->need_flush = 1; \ | 142 | tlb->need_flush = 1; \ |
143 | __pmd_free_tlb(tlb, pmdp); \ | 143 | __pmd_free_tlb(tlb, pmdp, address); \ |
144 | } while (0) | 144 | } while (0) |
145 | 145 | ||
146 | #define tlb_migrate_finish(mm) do {} while (0) | 146 | #define tlb_migrate_finish(mm) do {} while (0) |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 41862e9a4c20..af4b4826997e 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
@@ -506,6 +506,8 @@ typedef struct { | |||
506 | #define DRM_RADEON_GEM_WAIT_IDLE 0x24 | 506 | #define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
507 | #define DRM_RADEON_CS 0x26 | 507 | #define DRM_RADEON_CS 0x26 |
508 | #define DRM_RADEON_INFO 0x27 | 508 | #define DRM_RADEON_INFO 0x27 |
509 | #define DRM_RADEON_GEM_SET_TILING 0x28 | ||
510 | #define DRM_RADEON_GEM_GET_TILING 0x29 | ||
509 | 511 | ||
510 | #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) | 512 | #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
511 | #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) | 513 | #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
@@ -544,7 +546,8 @@ typedef struct { | |||
544 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) | 546 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
545 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) | 547 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
546 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) | 548 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
547 | 549 | #define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) | |
550 | #define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) | ||
548 | 551 | ||
549 | typedef struct drm_radeon_init { | 552 | typedef struct drm_radeon_init { |
550 | enum { | 553 | enum { |
@@ -796,6 +799,24 @@ struct drm_radeon_gem_create { | |||
796 | uint32_t flags; | 799 | uint32_t flags; |
797 | }; | 800 | }; |
798 | 801 | ||
802 | #define RADEON_TILING_MACRO 0x1 | ||
803 | #define RADEON_TILING_MICRO 0x2 | ||
804 | #define RADEON_TILING_SWAP 0x4 | ||
805 | #define RADEON_TILING_SURFACE 0x8 /* this object requires a surface | ||
806 | * when mapped - i.e. front buffer */ | ||
807 | |||
808 | struct drm_radeon_gem_set_tiling { | ||
809 | uint32_t handle; | ||
810 | uint32_t tiling_flags; | ||
811 | uint32_t pitch; | ||
812 | }; | ||
813 | |||
814 | struct drm_radeon_gem_get_tiling { | ||
815 | uint32_t handle; | ||
816 | uint32_t tiling_flags; | ||
817 | uint32_t pitch; | ||
818 | }; | ||
819 | |||
799 | struct drm_radeon_gem_mmap { | 820 | struct drm_radeon_gem_mmap { |
800 | uint32_t handle; | 821 | uint32_t handle; |
801 | uint32_t pad; | 822 | uint32_t pad; |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 62ed733c52a2..a68829db381a 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -121,6 +121,7 @@ struct ttm_backend { | |||
121 | #define TTM_PAGE_FLAG_SWAPPED (1 << 4) | 121 | #define TTM_PAGE_FLAG_SWAPPED (1 << 4) |
122 | #define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) | 122 | #define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) |
123 | #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) | 123 | #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) |
124 | #define TTM_PAGE_FLAG_DMA32 (1 << 7) | ||
124 | 125 | ||
125 | enum ttm_caching_state { | 126 | enum ttm_caching_state { |
126 | tt_uncached, | 127 | tt_uncached, |
@@ -353,6 +354,14 @@ struct ttm_bo_driver { | |||
353 | int (*sync_obj_flush) (void *sync_obj, void *sync_arg); | 354 | int (*sync_obj_flush) (void *sync_obj, void *sync_arg); |
354 | void (*sync_obj_unref) (void **sync_obj); | 355 | void (*sync_obj_unref) (void **sync_obj); |
355 | void *(*sync_obj_ref) (void *sync_obj); | 356 | void *(*sync_obj_ref) (void *sync_obj); |
357 | |||
358 | /* hook to notify driver about a driver move so it | ||
359 | * can do tiling things */ | ||
360 | void (*move_notify)(struct ttm_buffer_object *bo, | ||
361 | struct ttm_mem_reg *new_mem); | ||
362 | /* notify the driver we are taking a fault on this BO | ||
363 | * and have reserved it */ | ||
364 | void (*fault_reserve_notify)(struct ttm_buffer_object *bo); | ||
356 | }; | 365 | }; |
357 | 366 | ||
358 | #define TTM_NUM_MEM_TYPES 8 | 367 | #define TTM_NUM_MEM_TYPES 8 |
@@ -429,6 +438,8 @@ struct ttm_bo_device { | |||
429 | */ | 438 | */ |
430 | 439 | ||
431 | struct delayed_work wq; | 440 | struct delayed_work wq; |
441 | |||
442 | bool need_dma32; | ||
432 | }; | 443 | }; |
433 | 444 | ||
434 | /** | 445 | /** |
@@ -648,7 +659,14 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); | |||
648 | extern int ttm_bo_device_init(struct ttm_bo_device *bdev, | 659 | extern int ttm_bo_device_init(struct ttm_bo_device *bdev, |
649 | struct ttm_mem_global *mem_glob, | 660 | struct ttm_mem_global *mem_glob, |
650 | struct ttm_bo_driver *driver, | 661 | struct ttm_bo_driver *driver, |
651 | uint64_t file_page_offset); | 662 | uint64_t file_page_offset, bool need_dma32); |
663 | |||
664 | /** | ||
665 | * ttm_bo_unmap_virtual | ||
666 | * | ||
667 | * @bo: tear down the virtual mappings for this BO | ||
668 | */ | ||
669 | extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | ||
652 | 670 | ||
653 | /** | 671 | /** |
654 | * ttm_bo_reserve: | 672 | * ttm_bo_reserve: |
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h index 889a4c7958ae..d1d433834e4f 100644 --- a/include/drm/ttm/ttm_module.h +++ b/include/drm/ttm/ttm_module.h | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
35 | 35 | ||
36 | #define TTM_PFX "[TTM]" | 36 | #define TTM_PFX "[TTM] " |
37 | 37 | ||
38 | enum ttm_global_types { | 38 | enum ttm_global_types { |
39 | TTM_GLOBAL_TTM_MEM = 0, | 39 | TTM_GLOBAL_TTM_MEM = 0, |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 0d6310657f32..655e7721580a 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -84,7 +84,7 @@ typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, | |||
84 | 84 | ||
85 | typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, | 85 | typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, |
86 | struct dm_dev *dev, | 86 | struct dm_dev *dev, |
87 | sector_t physical_start, | 87 | sector_t start, sector_t len, |
88 | void *data); | 88 | void *data); |
89 | 89 | ||
90 | typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, | 90 | typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, |
@@ -104,7 +104,7 @@ void dm_error(const char *message); | |||
104 | * Combine device limits. | 104 | * Combine device limits. |
105 | */ | 105 | */ |
106 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | 106 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, |
107 | sector_t start, void *data); | 107 | sector_t start, sector_t len, void *data); |
108 | 108 | ||
109 | struct dm_dev { | 109 | struct dm_dev { |
110 | struct block_device *bdev; | 110 | struct block_device *bdev; |
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 634a5e5aba3e..7499b3667798 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
@@ -874,7 +874,7 @@ struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); | |||
874 | struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); | 874 | struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); |
875 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | 875 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, |
876 | sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, | 876 | sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, |
877 | int create, int extend_disksize); | 877 | int create); |
878 | 878 | ||
879 | extern struct inode *ext3_iget(struct super_block *, unsigned long); | 879 | extern struct inode *ext3_iget(struct super_block *, unsigned long); |
880 | extern int ext3_write_inode (struct inode *, int); | 880 | extern int ext3_write_inode (struct inode *, int); |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 6c3de999fb34..4d6f47b51189 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
@@ -352,7 +352,7 @@ extern void fsnotify_unmount_inodes(struct list_head *list); | |||
352 | /* put here because inotify does some weird stuff when destroying watches */ | 352 | /* put here because inotify does some weird stuff when destroying watches */ |
353 | extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | 353 | extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, |
354 | void *data, int data_is, const char *name, | 354 | void *data, int data_is, const char *name, |
355 | u32 cookie); | 355 | u32 cookie, gfp_t gfp); |
356 | 356 | ||
357 | #else | 357 | #else |
358 | 358 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 79b6d7fd4ac2..e5b6e33c6571 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -589,6 +589,7 @@ struct ata_device { | |||
589 | #endif | 589 | #endif |
590 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ | 590 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ |
591 | u64 n_sectors; /* size of device, if ATA */ | 591 | u64 n_sectors; /* size of device, if ATA */ |
592 | u64 n_native_sectors; /* native size, if ATA */ | ||
592 | unsigned int class; /* ATA_DEV_xxx */ | 593 | unsigned int class; /* ATA_DEV_xxx */ |
593 | unsigned long unpark_deadline; | 594 | unsigned long unpark_deadline; |
594 | 595 | ||
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index c9663c690303..53b94e025c7c 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h | |||
@@ -18,5 +18,8 @@ extern struct phy_device *of_phy_connect(struct net_device *dev, | |||
18 | struct device_node *phy_np, | 18 | struct device_node *phy_np, |
19 | void (*hndlr)(struct net_device *), | 19 | void (*hndlr)(struct net_device *), |
20 | u32 flags, phy_interface_t iface); | 20 | u32 flags, phy_interface_t iface); |
21 | extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | ||
22 | void (*hndlr)(struct net_device *), | ||
23 | phy_interface_t iface); | ||
21 | 24 | ||
22 | #endif /* __LINUX_OF_MDIO_H */ | 25 | #endif /* __LINUX_OF_MDIO_H */ |
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 2ce29831feb6..278777fa8a3a 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h | |||
@@ -224,7 +224,7 @@ void rfkill_destroy(struct rfkill *rfkill); | |||
224 | * should be blocked) so that drivers need not keep track of the soft | 224 | * should be blocked) so that drivers need not keep track of the soft |
225 | * block state -- which they might not be able to. | 225 | * block state -- which they might not be able to. |
226 | */ | 226 | */ |
227 | bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); | 227 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * rfkill_set_sw_state - Set the internal rfkill software block state | 230 | * rfkill_set_sw_state - Set the internal rfkill software block state |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 1488d8c81aac..e8c6c9136c97 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -394,6 +394,7 @@ extern void __do_SAK(struct tty_struct *tty); | |||
394 | extern void disassociate_ctty(int priv); | 394 | extern void disassociate_ctty(int priv); |
395 | extern void no_tty(void); | 395 | extern void no_tty(void); |
396 | extern void tty_flip_buffer_push(struct tty_struct *tty); | 396 | extern void tty_flip_buffer_push(struct tty_struct *tty); |
397 | extern void tty_flush_to_ldisc(struct tty_struct *tty); | ||
397 | extern void tty_buffer_free_all(struct tty_struct *tty); | 398 | extern void tty_buffer_free_all(struct tty_struct *tty); |
398 | extern void tty_buffer_flush(struct tty_struct *tty); | 399 | extern void tty_buffer_flush(struct tty_struct *tty); |
399 | extern void tty_buffer_init(struct tty_struct *tty); | 400 | extern void tty_buffer_init(struct tty_struct *tty); |
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 95846d988011..74f16876f38d 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h | |||
@@ -338,6 +338,7 @@ struct v4l2_pix_format { | |||
338 | /* Vendor-specific formats */ | 338 | /* Vendor-specific formats */ |
339 | #define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ | 339 | #define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ |
340 | #define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ | 340 | #define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ |
341 | #define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */ | ||
341 | #define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ | 342 | #define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ |
342 | #define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ | 343 | #define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ |
343 | #define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ | 344 | #define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ |
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h index 11a4a2d3e364..94e908c0d7a0 100644 --- a/include/media/v4l2-chip-ident.h +++ b/include/media/v4l2-chip-ident.h | |||
@@ -60,6 +60,10 @@ enum { | |||
60 | V4L2_IDENT_OV7670 = 250, | 60 | V4L2_IDENT_OV7670 = 250, |
61 | V4L2_IDENT_OV7720 = 251, | 61 | V4L2_IDENT_OV7720 = 251, |
62 | V4L2_IDENT_OV7725 = 252, | 62 | V4L2_IDENT_OV7725 = 252, |
63 | V4L2_IDENT_OV7660 = 253, | ||
64 | V4L2_IDENT_OV9650 = 254, | ||
65 | V4L2_IDENT_OV9655 = 255, | ||
66 | V4L2_IDENT_SOI968 = 256, | ||
63 | 67 | ||
64 | /* module saa7146: reserved range 300-309 */ | 68 | /* module saa7146: reserved range 300-309 */ |
65 | V4L2_IDENT_SAA7146 = 300, | 69 | V4L2_IDENT_SAA7146 = 300, |
@@ -161,6 +165,9 @@ enum { | |||
161 | /* module tw9910: just ident 9910 */ | 165 | /* module tw9910: just ident 9910 */ |
162 | V4L2_IDENT_TW9910 = 9910, | 166 | V4L2_IDENT_TW9910 = 9910, |
163 | 167 | ||
168 | /* module sn9c20x: just ident 10000 */ | ||
169 | V4L2_IDENT_SN9C20X = 10000, | ||
170 | |||
164 | /* module msp3400: reserved range 34000-34999 and 44000-44999 */ | 171 | /* module msp3400: reserved range 34000-34999 and 44000-44999 */ |
165 | V4L2_IDENT_MSPX4XX = 34000, /* generic MSPX4XX identifier, only | 172 | V4L2_IDENT_MSPX4XX = 34000, /* generic MSPX4XX identifier, only |
166 | use internally (tveeprom.c). */ | 173 | use internally (tveeprom.c). */ |
@@ -237,6 +244,11 @@ enum { | |||
237 | V4L2_IDENT_MT9V022IX7ATC = 45010, /* No way to detect "normal" I77ATx */ | 244 | V4L2_IDENT_MT9V022IX7ATC = 45010, /* No way to detect "normal" I77ATx */ |
238 | V4L2_IDENT_MT9V022IX7ATM = 45015, /* and "lead free" IA7ATx chips */ | 245 | V4L2_IDENT_MT9V022IX7ATM = 45015, /* and "lead free" IA7ATx chips */ |
239 | V4L2_IDENT_MT9T031 = 45020, | 246 | V4L2_IDENT_MT9T031 = 45020, |
247 | V4L2_IDENT_MT9V111 = 45031, | ||
248 | V4L2_IDENT_MT9V112 = 45032, | ||
249 | |||
250 | /* HV7131R CMOS sensor: just ident 46000 */ | ||
251 | V4L2_IDENT_HV7131R = 46000, | ||
240 | 252 | ||
241 | /* module cs53132a: just ident 53132 */ | 253 | /* module cs53132a: just ident 53132 */ |
242 | V4L2_IDENT_CS53l32A = 53132, | 254 | V4L2_IDENT_CS53l32A = 53132, |
diff --git a/include/net/rose.h b/include/net/rose.h index cbd5364b2c8a..5ba9f02731eb 100644 --- a/include/net/rose.h +++ b/include/net/rose.h | |||
@@ -156,7 +156,7 @@ extern int sysctl_rose_maximum_vcs; | |||
156 | extern int sysctl_rose_window_size; | 156 | extern int sysctl_rose_window_size; |
157 | extern int rosecmp(rose_address *, rose_address *); | 157 | extern int rosecmp(rose_address *, rose_address *); |
158 | extern int rosecmpm(rose_address *, rose_address *, unsigned short); | 158 | extern int rosecmpm(rose_address *, rose_address *, unsigned short); |
159 | extern const char *rose2asc(const rose_address *); | 159 | extern char *rose2asc(char *buf, const rose_address *); |
160 | extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *); | 160 | extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *); |
161 | extern void rose_kill_by_neigh(struct rose_neigh *); | 161 | extern void rose_kill_by_neigh(struct rose_neigh *); |
162 | extern unsigned int rose_new_lci(struct rose_neigh *); | 162 | extern unsigned int rose_new_lci(struct rose_neigh *); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 9b1a7de26979..eb8751aa0418 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind); | |||
180 | * @k: thread created by kthread_create(). | 180 | * @k: thread created by kthread_create(). |
181 | * | 181 | * |
182 | * Sets kthread_should_stop() for @k to return true, wakes it, and | 182 | * Sets kthread_should_stop() for @k to return true, wakes it, and |
183 | * waits for it to exit. Your threadfn() must not call do_exit() | 183 | * waits for it to exit. This can also be called after kthread_create() |
184 | * itself if you use this function! This can also be called after | 184 | * instead of calling wake_up_process(): the thread will exit without |
185 | * kthread_create() instead of calling wake_up_process(): the thread | 185 | * calling threadfn(). |
186 | * will exit without calling threadfn(). | 186 | * |
187 | * If threadfn() may call do_exit() itself, the caller must ensure | ||
188 | * task_struct can't go away. | ||
187 | * | 189 | * |
188 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() | 190 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() |
189 | * was never called. | 191 | * was never called. |
diff --git a/kernel/module.c b/kernel/module.c index 0a049837008e..fd1411403558 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
1068 | { | 1068 | { |
1069 | const unsigned long *crc; | 1069 | const unsigned long *crc; |
1070 | 1070 | ||
1071 | if (!find_symbol("module_layout", NULL, &crc, true, false)) | 1071 | if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, |
1072 | &crc, true, false)) | ||
1072 | BUG(); | 1073 | BUG(); |
1073 | return check_version(sechdrs, versindex, "module_layout", mod, crc); | 1074 | return check_version(sechdrs, versindex, "module_layout", mod, crc); |
1074 | } | 1075 | } |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 833139ce1e22..e22c148e4b7f 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query, | |||
164 | 164 | ||
165 | if (!newflags) | 165 | if (!newflags) |
166 | dt->num_enabled--; | 166 | dt->num_enabled--; |
167 | else if (!dp-flags) | 167 | else if (!dp->flags) |
168 | dt->num_enabled++; | 168 | dt->num_enabled++; |
169 | dp->flags = newflags; | 169 | dp->flags = newflags; |
170 | if (newflags) { | 170 | if (newflags) { |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5aabd41ffb8f..487267310a84 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1217,7 +1217,6 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | |||
1217 | } | 1217 | } |
1218 | object = NULL; | 1218 | object = NULL; |
1219 | out: | 1219 | out: |
1220 | rcu_read_unlock(); | ||
1221 | return object; | 1220 | return object; |
1222 | } | 1221 | } |
1223 | 1222 | ||
@@ -1233,13 +1232,11 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1233 | 1232 | ||
1234 | ++(*pos); | 1233 | ++(*pos); |
1235 | 1234 | ||
1236 | rcu_read_lock(); | ||
1237 | list_for_each_continue_rcu(n, &object_list) { | 1235 | list_for_each_continue_rcu(n, &object_list) { |
1238 | next_obj = list_entry(n, struct kmemleak_object, object_list); | 1236 | next_obj = list_entry(n, struct kmemleak_object, object_list); |
1239 | if (get_object(next_obj)) | 1237 | if (get_object(next_obj)) |
1240 | break; | 1238 | break; |
1241 | } | 1239 | } |
1242 | rcu_read_unlock(); | ||
1243 | 1240 | ||
1244 | put_object(prev_obj); | 1241 | put_object(prev_obj); |
1245 | return next_obj; | 1242 | return next_obj; |
@@ -1255,6 +1252,7 @@ static void kmemleak_seq_stop(struct seq_file *seq, void *v) | |||
1255 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex | 1252 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex |
1256 | * waiting was interrupted, so only release it if !IS_ERR. | 1253 | * waiting was interrupted, so only release it if !IS_ERR. |
1257 | */ | 1254 | */ |
1255 | rcu_read_unlock(); | ||
1258 | mutex_unlock(&scan_mutex); | 1256 | mutex_unlock(&scan_mutex); |
1259 | if (v) | 1257 | if (v) |
1260 | put_object(v); | 1258 | put_object(v); |
diff --git a/mm/memory.c b/mm/memory.c index 65216194eb8d..aede2ce3aba4 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -135,11 +135,12 @@ void pmd_clear_bad(pmd_t *pmd) | |||
135 | * Note: this doesn't free the actual pages themselves. That | 135 | * Note: this doesn't free the actual pages themselves. That |
136 | * has been handled earlier when unmapping all the memory regions. | 136 | * has been handled earlier when unmapping all the memory regions. |
137 | */ | 137 | */ |
138 | static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) | 138 | static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, |
139 | unsigned long addr) | ||
139 | { | 140 | { |
140 | pgtable_t token = pmd_pgtable(*pmd); | 141 | pgtable_t token = pmd_pgtable(*pmd); |
141 | pmd_clear(pmd); | 142 | pmd_clear(pmd); |
142 | pte_free_tlb(tlb, token); | 143 | pte_free_tlb(tlb, token, addr); |
143 | tlb->mm->nr_ptes--; | 144 | tlb->mm->nr_ptes--; |
144 | } | 145 | } |
145 | 146 | ||
@@ -157,7 +158,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
157 | next = pmd_addr_end(addr, end); | 158 | next = pmd_addr_end(addr, end); |
158 | if (pmd_none_or_clear_bad(pmd)) | 159 | if (pmd_none_or_clear_bad(pmd)) |
159 | continue; | 160 | continue; |
160 | free_pte_range(tlb, pmd); | 161 | free_pte_range(tlb, pmd, addr); |
161 | } while (pmd++, addr = next, addr != end); | 162 | } while (pmd++, addr = next, addr != end); |
162 | 163 | ||
163 | start &= PUD_MASK; | 164 | start &= PUD_MASK; |
@@ -173,7 +174,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
173 | 174 | ||
174 | pmd = pmd_offset(pud, start); | 175 | pmd = pmd_offset(pud, start); |
175 | pud_clear(pud); | 176 | pud_clear(pud); |
176 | pmd_free_tlb(tlb, pmd); | 177 | pmd_free_tlb(tlb, pmd, start); |
177 | } | 178 | } |
178 | 179 | ||
179 | static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | 180 | static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
@@ -206,7 +207,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
206 | 207 | ||
207 | pud = pud_offset(pgd, start); | 208 | pud = pud_offset(pgd, start); |
208 | pgd_clear(pgd); | 209 | pgd_clear(pgd); |
209 | pud_free_tlb(tlb, pud); | 210 | pud_free_tlb(tlb, pud, start); |
210 | } | 211 | } |
211 | 212 | ||
212 | /* | 213 | /* |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 8a96672e2c5c..eb404dc3ed6e 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -424,7 +424,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
424 | err2: | 424 | err2: |
425 | br_fdb_delete_by_port(br, p, 1); | 425 | br_fdb_delete_by_port(br, p, 1); |
426 | err1: | 426 | err1: |
427 | kobject_del(&p->kobj); | 427 | kobject_put(&p->kobj); |
428 | err0: | 428 | err0: |
429 | dev_set_promiscuity(dev, -1); | 429 | dev_set_promiscuity(dev, -1); |
430 | put_back: | 430 | put_back: |
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index ecf4eb2717cb..9cb79f95bf63 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -1453,6 +1453,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) | |||
1453 | } | 1453 | } |
1454 | /* Dup */ | 1454 | /* Dup */ |
1455 | memcpy(new, orig, sizeof(struct tsap_cb)); | 1455 | memcpy(new, orig, sizeof(struct tsap_cb)); |
1456 | spin_lock_init(&new->lock); | ||
1456 | 1457 | ||
1457 | /* We don't need the old instance any more */ | 1458 | /* We don't need the old instance any more */ |
1458 | spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); | 1459 | spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index ba2643a43c73..7836ee928983 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -83,6 +83,7 @@ endmenu | |||
83 | config MAC80211_MESH | 83 | config MAC80211_MESH |
84 | bool "Enable mac80211 mesh networking (pre-802.11s) support" | 84 | bool "Enable mac80211 mesh networking (pre-802.11s) support" |
85 | depends on MAC80211 && EXPERIMENTAL | 85 | depends on MAC80211 && EXPERIMENTAL |
86 | depends on BROKEN | ||
86 | ---help--- | 87 | ---help--- |
87 | This options enables support of Draft 802.11s mesh networking. | 88 | This options enables support of Draft 802.11s mesh networking. |
88 | The implementation is based on Draft 1.08 of the Mesh Networking | 89 | The implementation is based on Draft 1.08 of the Mesh Networking |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 3c72557df45a..479597e88583 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -175,6 +175,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
175 | int err = 0; | 175 | int err = 0; |
176 | u32 hash_idx; | 176 | u32 hash_idx; |
177 | 177 | ||
178 | might_sleep(); | ||
179 | |||
178 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) | 180 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) |
179 | /* never add ourselves as neighbours */ | 181 | /* never add ourselves as neighbours */ |
180 | return -ENOTSUPP; | 182 | return -ENOTSUPP; |
@@ -265,6 +267,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
265 | int err = 0; | 267 | int err = 0; |
266 | u32 hash_idx; | 268 | u32 hash_idx; |
267 | 269 | ||
270 | might_sleep(); | ||
268 | 271 | ||
269 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) | 272 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) |
270 | /* never add ourselves as neighbours */ | 273 | /* never add ourselves as neighbours */ |
@@ -491,8 +494,10 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
491 | * @skb: frame to discard | 494 | * @skb: frame to discard |
492 | * @sdata: network subif the frame was to be sent through | 495 | * @sdata: network subif the frame was to be sent through |
493 | * | 496 | * |
494 | * If the frame was beign forwarded from another MP, a PERR frame will be sent | 497 | * If the frame was being forwarded from another MP, a PERR frame will be sent |
495 | * to the precursor. | 498 | * to the precursor. The precursor's address (i.e. the previous hop) was saved |
499 | * in addr1 of the frame-to-be-forwarded, and would only be overwritten once | ||
500 | * the destination is successfully resolved. | ||
496 | * | 501 | * |
497 | * Locking: the function must me called within a rcu_read_lock region | 502 | * Locking: the function must me called within a rcu_read_lock region |
498 | */ | 503 | */ |
@@ -507,7 +512,7 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
507 | u8 *ra, *da; | 512 | u8 *ra, *da; |
508 | 513 | ||
509 | da = hdr->addr3; | 514 | da = hdr->addr3; |
510 | ra = hdr->addr2; | 515 | ra = hdr->addr1; |
511 | mpath = mesh_path_lookup(da, sdata); | 516 | mpath = mesh_path_lookup(da, sdata); |
512 | if (mpath) | 517 | if (mpath) |
513 | dsn = ++mpath->dsn; | 518 | dsn = ++mpath->dsn; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index d238a8939a09..3a8922cd1038 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1455,7 +1455,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1455 | monitor_iface = UNKNOWN_ADDRESS; | 1455 | monitor_iface = UNKNOWN_ADDRESS; |
1456 | 1456 | ||
1457 | len_rthdr = ieee80211_get_radiotap_len(skb->data); | 1457 | len_rthdr = ieee80211_get_radiotap_len(skb->data); |
1458 | hdr = (struct ieee80211_hdr *)skb->data + len_rthdr; | 1458 | hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); |
1459 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 1459 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1460 | 1460 | ||
1461 | /* check the header is complete in the frame */ | 1461 | /* check the header is complete in the frame */ |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 79693fe2001e..2fc4a1724eb8 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -549,6 +549,10 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | |||
549 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | 549 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); |
550 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | 550 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); |
551 | __rfkill_set_sw_state(rfkill, sw); | 551 | __rfkill_set_sw_state(rfkill, sw); |
552 | if (hw) | ||
553 | rfkill->state |= RFKILL_BLOCK_HW; | ||
554 | else | ||
555 | rfkill->state &= ~RFKILL_BLOCK_HW; | ||
552 | 556 | ||
553 | spin_unlock_irqrestore(&rfkill->lock, flags); | 557 | spin_unlock_irqrestore(&rfkill->lock, flags); |
554 | 558 | ||
@@ -648,15 +652,26 @@ static ssize_t rfkill_state_store(struct device *dev, | |||
648 | struct device_attribute *attr, | 652 | struct device_attribute *attr, |
649 | const char *buf, size_t count) | 653 | const char *buf, size_t count) |
650 | { | 654 | { |
651 | /* | 655 | struct rfkill *rfkill = to_rfkill(dev); |
652 | * The intention was that userspace can only take control over | 656 | unsigned long state; |
653 | * a given device when/if rfkill-input doesn't control it due | 657 | int err; |
654 | * to user_claim. Since user_claim is currently unsupported, | 658 | |
655 | * we never support changing the state from userspace -- this | 659 | if (!capable(CAP_NET_ADMIN)) |
656 | * can be implemented again later. | 660 | return -EPERM; |
657 | */ | 661 | |
662 | err = strict_strtoul(buf, 0, &state); | ||
663 | if (err) | ||
664 | return err; | ||
665 | |||
666 | if (state != RFKILL_USER_STATE_SOFT_BLOCKED && | ||
667 | state != RFKILL_USER_STATE_UNBLOCKED) | ||
668 | return -EINVAL; | ||
669 | |||
670 | mutex_lock(&rfkill_global_mutex); | ||
671 | rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); | ||
672 | mutex_unlock(&rfkill_global_mutex); | ||
658 | 673 | ||
659 | return -EPERM; | 674 | return err ?: count; |
660 | } | 675 | } |
661 | 676 | ||
662 | static ssize_t rfkill_claim_show(struct device *dev, | 677 | static ssize_t rfkill_claim_show(struct device *dev, |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 6bd8e93869ed..f0a76f6bca71 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -92,23 +92,21 @@ static void rose_set_lockdep_key(struct net_device *dev) | |||
92 | /* | 92 | /* |
93 | * Convert a ROSE address into text. | 93 | * Convert a ROSE address into text. |
94 | */ | 94 | */ |
95 | const char *rose2asc(const rose_address *addr) | 95 | char *rose2asc(char *buf, const rose_address *addr) |
96 | { | 96 | { |
97 | static char buffer[11]; | ||
98 | |||
99 | if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && | 97 | if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && |
100 | addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && | 98 | addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && |
101 | addr->rose_addr[4] == 0x00) { | 99 | addr->rose_addr[4] == 0x00) { |
102 | strcpy(buffer, "*"); | 100 | strcpy(buf, "*"); |
103 | } else { | 101 | } else { |
104 | sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, | 102 | sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, |
105 | addr->rose_addr[1] & 0xFF, | 103 | addr->rose_addr[1] & 0xFF, |
106 | addr->rose_addr[2] & 0xFF, | 104 | addr->rose_addr[2] & 0xFF, |
107 | addr->rose_addr[3] & 0xFF, | 105 | addr->rose_addr[3] & 0xFF, |
108 | addr->rose_addr[4] & 0xFF); | 106 | addr->rose_addr[4] & 0xFF); |
109 | } | 107 | } |
110 | 108 | ||
111 | return buffer; | 109 | return buf; |
112 | } | 110 | } |
113 | 111 | ||
114 | /* | 112 | /* |
@@ -1437,7 +1435,7 @@ static void rose_info_stop(struct seq_file *seq, void *v) | |||
1437 | 1435 | ||
1438 | static int rose_info_show(struct seq_file *seq, void *v) | 1436 | static int rose_info_show(struct seq_file *seq, void *v) |
1439 | { | 1437 | { |
1440 | char buf[11]; | 1438 | char buf[11], rsbuf[11]; |
1441 | 1439 | ||
1442 | if (v == SEQ_START_TOKEN) | 1440 | if (v == SEQ_START_TOKEN) |
1443 | seq_puts(seq, | 1441 | seq_puts(seq, |
@@ -1455,8 +1453,8 @@ static int rose_info_show(struct seq_file *seq, void *v) | |||
1455 | devname = dev->name; | 1453 | devname = dev->name; |
1456 | 1454 | ||
1457 | seq_printf(seq, "%-10s %-9s ", | 1455 | seq_printf(seq, "%-10s %-9s ", |
1458 | rose2asc(&rose->dest_addr), | 1456 | rose2asc(rsbuf, &rose->dest_addr), |
1459 | ax2asc(buf, &rose->dest_call)); | 1457 | ax2asc(buf, &rose->dest_call)); |
1460 | 1458 | ||
1461 | if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) | 1459 | if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) |
1462 | callsign = "??????-?"; | 1460 | callsign = "??????-?"; |
@@ -1465,7 +1463,7 @@ static int rose_info_show(struct seq_file *seq, void *v) | |||
1465 | 1463 | ||
1466 | seq_printf(seq, | 1464 | seq_printf(seq, |
1467 | "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", | 1465 | "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", |
1468 | rose2asc(&rose->source_addr), | 1466 | rose2asc(rsbuf, &rose->source_addr), |
1469 | callsign, | 1467 | callsign, |
1470 | devname, | 1468 | devname, |
1471 | rose->lci & 0x0FFF, | 1469 | rose->lci & 0x0FFF, |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index a81066a1010a..9478d9b3d977 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -1104,6 +1104,7 @@ static void rose_node_stop(struct seq_file *seq, void *v) | |||
1104 | 1104 | ||
1105 | static int rose_node_show(struct seq_file *seq, void *v) | 1105 | static int rose_node_show(struct seq_file *seq, void *v) |
1106 | { | 1106 | { |
1107 | char rsbuf[11]; | ||
1107 | int i; | 1108 | int i; |
1108 | 1109 | ||
1109 | if (v == SEQ_START_TOKEN) | 1110 | if (v == SEQ_START_TOKEN) |
@@ -1112,13 +1113,13 @@ static int rose_node_show(struct seq_file *seq, void *v) | |||
1112 | const struct rose_node *rose_node = v; | 1113 | const struct rose_node *rose_node = v; |
1113 | /* if (rose_node->loopback) { | 1114 | /* if (rose_node->loopback) { |
1114 | seq_printf(seq, "%-10s %04d 1 loopback\n", | 1115 | seq_printf(seq, "%-10s %04d 1 loopback\n", |
1115 | rose2asc(&rose_node->address), | 1116 | rose2asc(rsbuf, &rose_node->address), |
1116 | rose_node->mask); | 1117 | rose_node->mask); |
1117 | } else { */ | 1118 | } else { */ |
1118 | seq_printf(seq, "%-10s %04d %d", | 1119 | seq_printf(seq, "%-10s %04d %d", |
1119 | rose2asc(&rose_node->address), | 1120 | rose2asc(rsbuf, &rose_node->address), |
1120 | rose_node->mask, | 1121 | rose_node->mask, |
1121 | rose_node->count); | 1122 | rose_node->count); |
1122 | 1123 | ||
1123 | for (i = 0; i < rose_node->count; i++) | 1124 | for (i = 0; i < rose_node->count; i++) |
1124 | seq_printf(seq, " %05d", | 1125 | seq_printf(seq, " %05d", |
@@ -1267,7 +1268,7 @@ static void rose_route_stop(struct seq_file *seq, void *v) | |||
1267 | 1268 | ||
1268 | static int rose_route_show(struct seq_file *seq, void *v) | 1269 | static int rose_route_show(struct seq_file *seq, void *v) |
1269 | { | 1270 | { |
1270 | char buf[11]; | 1271 | char buf[11], rsbuf[11]; |
1271 | 1272 | ||
1272 | if (v == SEQ_START_TOKEN) | 1273 | if (v == SEQ_START_TOKEN) |
1273 | seq_puts(seq, | 1274 | seq_puts(seq, |
@@ -1279,7 +1280,7 @@ static int rose_route_show(struct seq_file *seq, void *v) | |||
1279 | seq_printf(seq, | 1280 | seq_printf(seq, |
1280 | "%3.3X %-10s %-9s %05d ", | 1281 | "%3.3X %-10s %-9s %05d ", |
1281 | rose_route->lci1, | 1282 | rose_route->lci1, |
1282 | rose2asc(&rose_route->src_addr), | 1283 | rose2asc(rsbuf, &rose_route->src_addr), |
1283 | ax2asc(buf, &rose_route->src_call), | 1284 | ax2asc(buf, &rose_route->src_call), |
1284 | rose_route->neigh1->number); | 1285 | rose_route->neigh1->number); |
1285 | else | 1286 | else |
@@ -1289,10 +1290,10 @@ static int rose_route_show(struct seq_file *seq, void *v) | |||
1289 | if (rose_route->neigh2) | 1290 | if (rose_route->neigh2) |
1290 | seq_printf(seq, | 1291 | seq_printf(seq, |
1291 | "%3.3X %-10s %-9s %05d\n", | 1292 | "%3.3X %-10s %-9s %05d\n", |
1292 | rose_route->lci2, | 1293 | rose_route->lci2, |
1293 | rose2asc(&rose_route->dest_addr), | 1294 | rose2asc(rsbuf, &rose_route->dest_addr), |
1294 | ax2asc(buf, &rose_route->dest_call), | 1295 | ax2asc(buf, &rose_route->dest_call), |
1295 | rose_route->neigh2->number); | 1296 | rose_route->neigh2->number); |
1296 | else | 1297 | else |
1297 | seq_puts(seq, | 1298 | seq_puts(seq, |
1298 | "000 * * 00000\n"); | 1299 | "000 * * 00000\n"); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 43bdb1372cae..634496b3ed77 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -997,7 +997,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
997 | 997 | ||
998 | if (IS_ERR(hdr)) { | 998 | if (IS_ERR(hdr)) { |
999 | err = PTR_ERR(hdr); | 999 | err = PTR_ERR(hdr); |
1000 | goto out; | 1000 | goto free_msg; |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | cookie.msg = msg; | 1003 | cookie.msg = msg; |
@@ -1011,7 +1011,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
1011 | &cookie, get_key_callback); | 1011 | &cookie, get_key_callback); |
1012 | 1012 | ||
1013 | if (err) | 1013 | if (err) |
1014 | goto out; | 1014 | goto free_msg; |
1015 | 1015 | ||
1016 | if (cookie.error) | 1016 | if (cookie.error) |
1017 | goto nla_put_failure; | 1017 | goto nla_put_failure; |
@@ -1022,6 +1022,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
1022 | 1022 | ||
1023 | nla_put_failure: | 1023 | nla_put_failure: |
1024 | err = -ENOBUFS; | 1024 | err = -ENOBUFS; |
1025 | free_msg: | ||
1025 | nlmsg_free(msg); | 1026 | nlmsg_free(msg); |
1026 | out: | 1027 | out: |
1027 | cfg80211_put_dev(drv); | 1028 | cfg80211_put_dev(drv); |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index f8e71b300001..9271118e1fc4 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -35,8 +35,6 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | |||
35 | else | 35 | else |
36 | nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); | 36 | nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); |
37 | 37 | ||
38 | wiphy_to_dev(request->wiphy)->scan_req = NULL; | ||
39 | |||
40 | #ifdef CONFIG_WIRELESS_EXT | 38 | #ifdef CONFIG_WIRELESS_EXT |
41 | if (!aborted) { | 39 | if (!aborted) { |
42 | memset(&wrqu, 0, sizeof(wrqu)); | 40 | memset(&wrqu, 0, sizeof(wrqu)); |
@@ -48,6 +46,7 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | |||
48 | dev_put(dev); | 46 | dev_put(dev); |
49 | 47 | ||
50 | out: | 48 | out: |
49 | wiphy_to_dev(request->wiphy)->scan_req = NULL; | ||
51 | kfree(request); | 50 | kfree(request); |
52 | } | 51 | } |
53 | EXPORT_SYMBOL(cfg80211_scan_done); | 52 | EXPORT_SYMBOL(cfg80211_scan_done); |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 333e4dd29450..72cfd47af6b8 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -233,6 +233,18 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream) | |||
233 | xrun(substream); | 233 | xrun(substream); |
234 | return -EPIPE; | 234 | return -EPIPE; |
235 | } | 235 | } |
236 | if (xrun_debug(substream, 8)) { | ||
237 | char name[16]; | ||
238 | pcm_debug_name(substream, name, sizeof(name)); | ||
239 | snd_printd("period_update: %s: pos=0x%x/0x%x/0x%x, " | ||
240 | "hwptr=0x%lx, hw_base=0x%lx, hw_intr=0x%lx\n", | ||
241 | name, (unsigned int)pos, | ||
242 | (unsigned int)runtime->period_size, | ||
243 | (unsigned int)runtime->buffer_size, | ||
244 | (unsigned long)old_hw_ptr, | ||
245 | (unsigned long)runtime->hw_ptr_base, | ||
246 | (unsigned long)runtime->hw_ptr_interrupt); | ||
247 | } | ||
236 | hw_base = runtime->hw_ptr_base; | 248 | hw_base = runtime->hw_ptr_base; |
237 | new_hw_ptr = hw_base + pos; | 249 | new_hw_ptr = hw_base + pos; |
238 | hw_ptr_interrupt = runtime->hw_ptr_interrupt + runtime->period_size; | 250 | hw_ptr_interrupt = runtime->hw_ptr_interrupt + runtime->period_size; |
@@ -244,18 +256,27 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream) | |||
244 | delta = new_hw_ptr - hw_ptr_interrupt; | 256 | delta = new_hw_ptr - hw_ptr_interrupt; |
245 | } | 257 | } |
246 | if (delta < 0) { | 258 | if (delta < 0) { |
247 | delta += runtime->buffer_size; | 259 | if (runtime->periods == 1 || new_hw_ptr < old_hw_ptr) |
260 | delta += runtime->buffer_size; | ||
248 | if (delta < 0) { | 261 | if (delta < 0) { |
249 | hw_ptr_error(substream, | 262 | hw_ptr_error(substream, |
250 | "Unexpected hw_pointer value " | 263 | "Unexpected hw_pointer value " |
251 | "(stream=%i, pos=%ld, intr_ptr=%ld)\n", | 264 | "(stream=%i, pos=%ld, intr_ptr=%ld)\n", |
252 | substream->stream, (long)pos, | 265 | substream->stream, (long)pos, |
253 | (long)hw_ptr_interrupt); | 266 | (long)hw_ptr_interrupt); |
267 | #if 1 | ||
268 | /* simply skipping the hwptr update seems more | ||
269 | * robust in some cases, e.g. on VMware with | ||
270 | * inaccurate timer source | ||
271 | */ | ||
272 | return 0; /* skip this update */ | ||
273 | #else | ||
254 | /* rebase to interrupt position */ | 274 | /* rebase to interrupt position */ |
255 | hw_base = new_hw_ptr = hw_ptr_interrupt; | 275 | hw_base = new_hw_ptr = hw_ptr_interrupt; |
256 | /* align hw_base to buffer_size */ | 276 | /* align hw_base to buffer_size */ |
257 | hw_base -= hw_base % runtime->buffer_size; | 277 | hw_base -= hw_base % runtime->buffer_size; |
258 | delta = 0; | 278 | delta = 0; |
279 | #endif | ||
259 | } else { | 280 | } else { |
260 | hw_base += runtime->buffer_size; | 281 | hw_base += runtime->buffer_size; |
261 | if (hw_base >= runtime->boundary) | 282 | if (hw_base >= runtime->boundary) |
@@ -344,6 +365,19 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) | |||
344 | xrun(substream); | 365 | xrun(substream); |
345 | return -EPIPE; | 366 | return -EPIPE; |
346 | } | 367 | } |
368 | if (xrun_debug(substream, 16)) { | ||
369 | char name[16]; | ||
370 | pcm_debug_name(substream, name, sizeof(name)); | ||
371 | snd_printd("hw_update: %s: pos=0x%x/0x%x/0x%x, " | ||
372 | "hwptr=0x%lx, hw_base=0x%lx, hw_intr=0x%lx\n", | ||
373 | name, (unsigned int)pos, | ||
374 | (unsigned int)runtime->period_size, | ||
375 | (unsigned int)runtime->buffer_size, | ||
376 | (unsigned long)old_hw_ptr, | ||
377 | (unsigned long)runtime->hw_ptr_base, | ||
378 | (unsigned long)runtime->hw_ptr_interrupt); | ||
379 | } | ||
380 | |||
347 | hw_base = runtime->hw_ptr_base; | 381 | hw_base = runtime->hw_ptr_base; |
348 | new_hw_ptr = hw_base + pos; | 382 | new_hw_ptr = hw_base + pos; |
349 | 383 | ||
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c index a1db51b3ead8..a7f4a671f7b7 100644 --- a/sound/pci/ctxfi/ctamixer.c +++ b/sound/pci/ctxfi/ctamixer.c | |||
@@ -242,13 +242,12 @@ static int get_amixer_rsc(struct amixer_mgr *mgr, | |||
242 | 242 | ||
243 | /* Allocate mem for amixer resource */ | 243 | /* Allocate mem for amixer resource */ |
244 | amixer = kzalloc(sizeof(*amixer), GFP_KERNEL); | 244 | amixer = kzalloc(sizeof(*amixer), GFP_KERNEL); |
245 | if (NULL == amixer) { | 245 | if (!amixer) |
246 | err = -ENOMEM; | 246 | return -ENOMEM; |
247 | return err; | ||
248 | } | ||
249 | 247 | ||
250 | /* Check whether there are sufficient | 248 | /* Check whether there are sufficient |
251 | * amixer resources to meet request. */ | 249 | * amixer resources to meet request. */ |
250 | err = 0; | ||
252 | spin_lock_irqsave(&mgr->mgr_lock, flags); | 251 | spin_lock_irqsave(&mgr->mgr_lock, flags); |
253 | for (i = 0; i < desc->msr; i++) { | 252 | for (i = 0; i < desc->msr; i++) { |
254 | err = mgr_get_resource(&mgr->mgr, 1, &idx); | 253 | err = mgr_get_resource(&mgr->mgr, 1, &idx); |
@@ -397,12 +396,11 @@ static int get_sum_rsc(struct sum_mgr *mgr, | |||
397 | 396 | ||
398 | /* Allocate mem for sum resource */ | 397 | /* Allocate mem for sum resource */ |
399 | sum = kzalloc(sizeof(*sum), GFP_KERNEL); | 398 | sum = kzalloc(sizeof(*sum), GFP_KERNEL); |
400 | if (NULL == sum) { | 399 | if (!sum) |
401 | err = -ENOMEM; | 400 | return -ENOMEM; |
402 | return err; | ||
403 | } | ||
404 | 401 | ||
405 | /* Check whether there are sufficient sum resources to meet request. */ | 402 | /* Check whether there are sufficient sum resources to meet request. */ |
403 | err = 0; | ||
406 | spin_lock_irqsave(&mgr->mgr_lock, flags); | 404 | spin_lock_irqsave(&mgr->mgr_lock, flags); |
407 | for (i = 0; i < desc->msr; i++) { | 405 | for (i = 0; i < desc->msr; i++) { |
408 | err = mgr_get_resource(&mgr->mgr, 1, &idx); | 406 | err = mgr_get_resource(&mgr->mgr, 1, &idx); |
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c index e1c145d8b702..df43a5cd3938 100644 --- a/sound/pci/ctxfi/ctsrc.c +++ b/sound/pci/ctxfi/ctsrc.c | |||
@@ -724,12 +724,11 @@ static int get_srcimp_rsc(struct srcimp_mgr *mgr, | |||
724 | 724 | ||
725 | /* Allocate mem for SRCIMP resource */ | 725 | /* Allocate mem for SRCIMP resource */ |
726 | srcimp = kzalloc(sizeof(*srcimp), GFP_KERNEL); | 726 | srcimp = kzalloc(sizeof(*srcimp), GFP_KERNEL); |
727 | if (NULL == srcimp) { | 727 | if (!srcimp) |
728 | err = -ENOMEM; | 728 | return -ENOMEM; |
729 | return err; | ||
730 | } | ||
731 | 729 | ||
732 | /* Check whether there are sufficient SRCIMP resources. */ | 730 | /* Check whether there are sufficient SRCIMP resources. */ |
731 | err = 0; | ||
733 | spin_lock_irqsave(&mgr->mgr_lock, flags); | 732 | spin_lock_irqsave(&mgr->mgr_lock, flags); |
734 | for (i = 0; i < desc->msr; i++) { | 733 | for (i = 0; i < desc->msr; i++) { |
735 | err = mgr_get_resource(&mgr->mgr, 1, &idx); | 734 | err = mgr_get_resource(&mgr->mgr, 1, &idx); |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index be7d25fa7f35..3da85caf8af1 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -3754,7 +3754,7 @@ static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol, | |||
3754 | int mute = (!ucontrol->value.integer.value[0] && | 3754 | int mute = (!ucontrol->value.integer.value[0] && |
3755 | !ucontrol->value.integer.value[1]); | 3755 | !ucontrol->value.integer.value[1]); |
3756 | /* toggle GPIO1 according to the mute state */ | 3756 | /* toggle GPIO1 according to the mute state */ |
3757 | snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, | 3757 | snd_hda_codec_write_cache(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, |
3758 | mute ? 0x02 : 0x0); | 3758 | mute ? 0x02 : 0x0); |
3759 | return ret; | 3759 | return ret; |
3760 | } | 3760 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7e99763ca527..8c8b273116fb 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -10631,6 +10631,18 @@ static void alc262_lenovo_3000_unsol_event(struct hda_codec *codec, | |||
10631 | alc262_lenovo_3000_automute(codec, 1); | 10631 | alc262_lenovo_3000_automute(codec, 1); |
10632 | } | 10632 | } |
10633 | 10633 | ||
10634 | static int amp_stereo_mute_update(struct hda_codec *codec, hda_nid_t nid, | ||
10635 | int dir, int idx, long *valp) | ||
10636 | { | ||
10637 | int i, change = 0; | ||
10638 | |||
10639 | for (i = 0; i < 2; i++, valp++) | ||
10640 | change |= snd_hda_codec_amp_update(codec, nid, i, dir, idx, | ||
10641 | HDA_AMP_MUTE, | ||
10642 | *valp ? 0 : HDA_AMP_MUTE); | ||
10643 | return change; | ||
10644 | } | ||
10645 | |||
10634 | /* bind hp and internal speaker mute (with plug check) */ | 10646 | /* bind hp and internal speaker mute (with plug check) */ |
10635 | static int alc262_fujitsu_master_sw_put(struct snd_kcontrol *kcontrol, | 10647 | static int alc262_fujitsu_master_sw_put(struct snd_kcontrol *kcontrol, |
10636 | struct snd_ctl_elem_value *ucontrol) | 10648 | struct snd_ctl_elem_value *ucontrol) |
@@ -10639,13 +10651,8 @@ static int alc262_fujitsu_master_sw_put(struct snd_kcontrol *kcontrol, | |||
10639 | long *valp = ucontrol->value.integer.value; | 10651 | long *valp = ucontrol->value.integer.value; |
10640 | int change; | 10652 | int change; |
10641 | 10653 | ||
10642 | change = snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0, | 10654 | change = amp_stereo_mute_update(codec, 0x14, HDA_OUTPUT, 0, valp); |
10643 | HDA_AMP_MUTE, | 10655 | change |= amp_stereo_mute_update(codec, 0x1b, HDA_OUTPUT, 0, valp); |
10644 | valp ? 0 : HDA_AMP_MUTE); | ||
10645 | change |= snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0, | ||
10646 | HDA_AMP_MUTE, | ||
10647 | valp ? 0 : HDA_AMP_MUTE); | ||
10648 | |||
10649 | if (change) | 10656 | if (change) |
10650 | alc262_fujitsu_automute(codec, 0); | 10657 | alc262_fujitsu_automute(codec, 0); |
10651 | return change; | 10658 | return change; |
@@ -10680,10 +10687,7 @@ static int alc262_lenovo_3000_master_sw_put(struct snd_kcontrol *kcontrol, | |||
10680 | long *valp = ucontrol->value.integer.value; | 10687 | long *valp = ucontrol->value.integer.value; |
10681 | int change; | 10688 | int change; |
10682 | 10689 | ||
10683 | change = snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0, | 10690 | change = amp_stereo_mute_update(codec, 0x1b, HDA_OUTPUT, 0, valp); |
10684 | HDA_AMP_MUTE, | ||
10685 | valp ? 0 : HDA_AMP_MUTE); | ||
10686 | |||
10687 | if (change) | 10691 | if (change) |
10688 | alc262_lenovo_3000_automute(codec, 0); | 10692 | alc262_lenovo_3000_automute(codec, 0); |
10689 | return change; | 10693 | return change; |
@@ -11854,12 +11858,7 @@ static int alc268_acer_master_sw_put(struct snd_kcontrol *kcontrol, | |||
11854 | long *valp = ucontrol->value.integer.value; | 11858 | long *valp = ucontrol->value.integer.value; |
11855 | int change; | 11859 | int change; |
11856 | 11860 | ||
11857 | change = snd_hda_codec_amp_update(codec, 0x14, 0, HDA_OUTPUT, 0, | 11861 | change = amp_stereo_mute_update(codec, 0x14, HDA_OUTPUT, 0, valp); |
11858 | HDA_AMP_MUTE, | ||
11859 | valp[0] ? 0 : HDA_AMP_MUTE); | ||
11860 | change |= snd_hda_codec_amp_update(codec, 0x14, 1, HDA_OUTPUT, 0, | ||
11861 | HDA_AMP_MUTE, | ||
11862 | valp[1] ? 0 : HDA_AMP_MUTE); | ||
11863 | if (change) | 11862 | if (change) |
11864 | alc268_acer_automute(codec, 0); | 11863 | alc268_acer_automute(codec, 0); |
11865 | return change; | 11864 | return change; |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index da7f9f65c047..512f3b9b9a45 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -4066,7 +4066,7 @@ static int stac92xx_add_jack(struct hda_codec *codec, | |||
4066 | jack->nid = nid; | 4066 | jack->nid = nid; |
4067 | jack->type = type; | 4067 | jack->type = type; |
4068 | 4068 | ||
4069 | sprintf(name, "%s at %s %s Jack", | 4069 | snprintf(name, sizeof(name), "%s at %s %s Jack", |
4070 | snd_hda_get_jack_type(def_conf), | 4070 | snd_hda_get_jack_type(def_conf), |
4071 | snd_hda_get_jack_connectivity(def_conf), | 4071 | snd_hda_get_jack_connectivity(def_conf), |
4072 | snd_hda_get_jack_location(def_conf)); | 4072 | snd_hda_get_jack_location(def_conf)); |
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index ab099f482487..cb0d1bf34b57 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c | |||
@@ -767,6 +767,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream, | |||
767 | int codec_clk = 0, bypass_pll = 0, fsref, last_clk = 0; | 767 | int codec_clk = 0, bypass_pll = 0, fsref, last_clk = 0; |
768 | u8 data, r, p, pll_q, pll_p = 1, pll_r = 1, pll_j = 1; | 768 | u8 data, r, p, pll_q, pll_p = 1, pll_r = 1, pll_j = 1; |
769 | u16 pll_d = 1; | 769 | u16 pll_d = 1; |
770 | u8 reg; | ||
770 | 771 | ||
771 | /* select data word length */ | 772 | /* select data word length */ |
772 | data = | 773 | data = |
@@ -801,8 +802,16 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream, | |||
801 | pll_q &= 0xf; | 802 | pll_q &= 0xf; |
802 | aic3x_write(codec, AIC3X_PLL_PROGA_REG, pll_q << PLLQ_SHIFT); | 803 | aic3x_write(codec, AIC3X_PLL_PROGA_REG, pll_q << PLLQ_SHIFT); |
803 | aic3x_write(codec, AIC3X_GPIOB_REG, CODEC_CLKIN_CLKDIV); | 804 | aic3x_write(codec, AIC3X_GPIOB_REG, CODEC_CLKIN_CLKDIV); |
804 | } else | 805 | /* disable PLL if it is bypassed */ |
806 | reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG); | ||
807 | aic3x_write(codec, AIC3X_PLL_PROGA_REG, reg & ~PLL_ENABLE); | ||
808 | |||
809 | } else { | ||
805 | aic3x_write(codec, AIC3X_GPIOB_REG, CODEC_CLKIN_PLLDIV); | 810 | aic3x_write(codec, AIC3X_GPIOB_REG, CODEC_CLKIN_PLLDIV); |
811 | /* enable PLL when it is used */ | ||
812 | reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG); | ||
813 | aic3x_write(codec, AIC3X_PLL_PROGA_REG, reg | PLL_ENABLE); | ||
814 | } | ||
806 | 815 | ||
807 | /* Route Left DAC to left channel input and | 816 | /* Route Left DAC to left channel input and |
808 | * right DAC to right channel input */ | 817 | * right DAC to right channel input */ |
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig index 523aec188ccf..73525c048e7f 100644 --- a/sound/usb/Kconfig +++ b/sound/usb/Kconfig | |||
@@ -48,6 +48,7 @@ config SND_USB_CAIAQ | |||
48 | * Native Instruments Kore Controller | 48 | * Native Instruments Kore Controller |
49 | * Native Instruments Kore Controller 2 | 49 | * Native Instruments Kore Controller 2 |
50 | * Native Instruments Audio Kontrol 1 | 50 | * Native Instruments Audio Kontrol 1 |
51 | * Native Instruments Audio 2 DJ | ||
51 | * Native Instruments Audio 4 DJ | 52 | * Native Instruments Audio 4 DJ |
52 | * Native Instruments Audio 8 DJ | 53 | * Native Instruments Audio 8 DJ |
53 | * Native Instruments Guitar Rig Session I/O | 54 | * Native Instruments Guitar Rig Session I/O |
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 8f9b60c5d74c..121af0644fd9 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c | |||
@@ -646,6 +646,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) | |||
646 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_GUITARRIGMOBILE): | 646 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_GUITARRIGMOBILE): |
647 | dev->samplerates |= SNDRV_PCM_RATE_192000; | 647 | dev->samplerates |= SNDRV_PCM_RATE_192000; |
648 | /* fall thru */ | 648 | /* fall thru */ |
649 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO2DJ): | ||
649 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): | 650 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): |
650 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): | 651 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): |
651 | dev->samplerates |= SNDRV_PCM_RATE_88200; | 652 | dev->samplerates |= SNDRV_PCM_RATE_88200; |
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index de38108f0b28..83e6c1312d47 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c | |||
@@ -35,13 +35,14 @@ | |||
35 | #include "input.h" | 35 | #include "input.h" |
36 | 36 | ||
37 | MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); | 37 | MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); |
38 | MODULE_DESCRIPTION("caiaq USB audio, version 1.3.18"); | 38 | MODULE_DESCRIPTION("caiaq USB audio, version 1.3.19"); |
39 | MODULE_LICENSE("GPL"); | 39 | MODULE_LICENSE("GPL"); |
40 | MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," | 40 | MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," |
41 | "{Native Instruments, RigKontrol3}," | 41 | "{Native Instruments, RigKontrol3}," |
42 | "{Native Instruments, Kore Controller}," | 42 | "{Native Instruments, Kore Controller}," |
43 | "{Native Instruments, Kore Controller 2}," | 43 | "{Native Instruments, Kore Controller 2}," |
44 | "{Native Instruments, Audio Kontrol 1}," | 44 | "{Native Instruments, Audio Kontrol 1}," |
45 | "{Native Instruments, Audio 2 DJ}," | ||
45 | "{Native Instruments, Audio 4 DJ}," | 46 | "{Native Instruments, Audio 4 DJ}," |
46 | "{Native Instruments, Audio 8 DJ}," | 47 | "{Native Instruments, Audio 8 DJ}," |
47 | "{Native Instruments, Session I/O}," | 48 | "{Native Instruments, Session I/O}," |
@@ -121,6 +122,11 @@ static struct usb_device_id snd_usb_id_table[] = { | |||
121 | .idVendor = USB_VID_NATIVEINSTRUMENTS, | 122 | .idVendor = USB_VID_NATIVEINSTRUMENTS, |
122 | .idProduct = USB_PID_AUDIO4DJ | 123 | .idProduct = USB_PID_AUDIO4DJ |
123 | }, | 124 | }, |
125 | { | ||
126 | .match_flags = USB_DEVICE_ID_MATCH_DEVICE, | ||
127 | .idVendor = USB_VID_NATIVEINSTRUMENTS, | ||
128 | .idProduct = USB_PID_AUDIO2DJ | ||
129 | }, | ||
124 | { /* terminator */ } | 130 | { /* terminator */ } |
125 | }; | 131 | }; |
126 | 132 | ||
diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h index ece73514854e..44e3edf88bef 100644 --- a/sound/usb/caiaq/device.h +++ b/sound/usb/caiaq/device.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define USB_PID_KORECONTROLLER 0x4711 | 10 | #define USB_PID_KORECONTROLLER 0x4711 |
11 | #define USB_PID_KORECONTROLLER2 0x4712 | 11 | #define USB_PID_KORECONTROLLER2 0x4712 |
12 | #define USB_PID_AK1 0x0815 | 12 | #define USB_PID_AK1 0x0815 |
13 | #define USB_PID_AUDIO2DJ 0x041c | ||
13 | #define USB_PID_AUDIO4DJ 0x0839 | 14 | #define USB_PID_AUDIO4DJ 0x0839 |
14 | #define USB_PID_AUDIO8DJ 0x1978 | 15 | #define USB_PID_AUDIO8DJ 0x1978 |
15 | #define USB_PID_SESSIONIO 0x1915 | 16 | #define USB_PID_SESSIONIO 0x1915 |
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c index 4bd3a7a0edc1..ec9cdf986928 100644 --- a/sound/usb/usbmixer.c +++ b/sound/usb/usbmixer.c | |||
@@ -990,20 +990,35 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc, | |||
990 | break; | 990 | break; |
991 | } | 991 | } |
992 | 992 | ||
993 | /* quirk for UDA1321/N101 */ | 993 | /* volume control quirks */ |
994 | /* note that detection between firmware 2.1.1.7 (N101) and later 2.1.1.21 */ | ||
995 | /* is not very clear from datasheets */ | ||
996 | /* I hope that the min value is -15360 for newer firmware --jk */ | ||
997 | switch (state->chip->usb_id) { | 994 | switch (state->chip->usb_id) { |
998 | case USB_ID(0x0471, 0x0101): | 995 | case USB_ID(0x0471, 0x0101): |
999 | case USB_ID(0x0471, 0x0104): | 996 | case USB_ID(0x0471, 0x0104): |
1000 | case USB_ID(0x0471, 0x0105): | 997 | case USB_ID(0x0471, 0x0105): |
1001 | case USB_ID(0x0672, 0x1041): | 998 | case USB_ID(0x0672, 0x1041): |
999 | /* quirk for UDA1321/N101. | ||
1000 | * note that detection between firmware 2.1.1.7 (N101) | ||
1001 | * and later 2.1.1.21 is not very clear from datasheets. | ||
1002 | * I hope that the min value is -15360 for newer firmware --jk | ||
1003 | */ | ||
1002 | if (!strcmp(kctl->id.name, "PCM Playback Volume") && | 1004 | if (!strcmp(kctl->id.name, "PCM Playback Volume") && |
1003 | cval->min == -15616) { | 1005 | cval->min == -15616) { |
1004 | snd_printk(KERN_INFO "using volume control quirk for the UDA1321/N101 chip\n"); | 1006 | snd_printk(KERN_INFO |
1007 | "set volume quirk for UDA1321/N101 chip\n"); | ||
1005 | cval->max = -256; | 1008 | cval->max = -256; |
1006 | } | 1009 | } |
1010 | break; | ||
1011 | |||
1012 | case USB_ID(0x046d, 0x09a4): | ||
1013 | if (!strcmp(kctl->id.name, "Mic Capture Volume")) { | ||
1014 | snd_printk(KERN_INFO | ||
1015 | "set volume quirk for QuickCam E3500\n"); | ||
1016 | cval->min = 6080; | ||
1017 | cval->max = 8768; | ||
1018 | cval->res = 192; | ||
1019 | } | ||
1020 | break; | ||
1021 | |||
1007 | } | 1022 | } |
1008 | 1023 | ||
1009 | snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", | 1024 | snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", |