diff options
Diffstat (limited to 'drivers/char')
49 files changed, 5798 insertions, 2773 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3141dd3b6e53..f09fc0e2062d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -276,11 +276,19 @@ config N_HDLC | |||
276 | Allows synchronous HDLC communications with tty device drivers that | 276 | Allows synchronous HDLC communications with tty device drivers that |
277 | support synchronous HDLC such as the Microgate SyncLink adapter. | 277 | support synchronous HDLC such as the Microgate SyncLink adapter. |
278 | 278 | ||
279 | This driver can only be built as a module ( = code which can be | 279 | This driver can be built as a module ( = code which can be |
280 | inserted in and removed from the running kernel whenever you want). | 280 | inserted in and removed from the running kernel whenever you want). |
281 | The module will be called n_hdlc. If you want to do that, say M | 281 | The module will be called n_hdlc. If you want to do that, say M |
282 | here. | 282 | here. |
283 | 283 | ||
284 | config N_GSM | ||
285 | tristate "GSM MUX line discipline support (EXPERIMENTAL)" | ||
286 | depends on EXPERIMENTAL | ||
287 | depends on NET | ||
288 | help | ||
289 | This line discipline provides support for the GSM MUX protocol and | ||
290 | presents the mux as a set of 61 individual tty devices. | ||
291 | |||
284 | config RISCOM8 | 292 | config RISCOM8 |
285 | tristate "SDL RISCom/8 card support" | 293 | tristate "SDL RISCom/8 card support" |
286 | depends on SERIAL_NONSTANDARD | 294 | depends on SERIAL_NONSTANDARD |
@@ -1113,5 +1121,12 @@ config DEVPORT | |||
1113 | 1121 | ||
1114 | source "drivers/s390/char/Kconfig" | 1122 | source "drivers/s390/char/Kconfig" |
1115 | 1123 | ||
1124 | config RAMOOPS | ||
1125 | tristate "Log panic/oops to a RAM buffer" | ||
1126 | default n | ||
1127 | help | ||
1128 | This enables panic and oops messages to be logged to a circular | ||
1129 | buffer in RAM where it can be read back at some later point. | ||
1130 | |||
1116 | endmenu | 1131 | endmenu |
1117 | 1132 | ||
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index f957edf7e45d..88d6eac69754 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -40,6 +40,7 @@ obj-$(CONFIG_SYNCLINK) += synclink.o | |||
40 | obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o | 40 | obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o |
41 | obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o | 41 | obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o |
42 | obj-$(CONFIG_N_HDLC) += n_hdlc.o | 42 | obj-$(CONFIG_N_HDLC) += n_hdlc.o |
43 | obj-$(CONFIG_N_GSM) += n_gsm.o | ||
43 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o | 44 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o |
44 | obj-$(CONFIG_SX) += sx.o generic_serial.o | 45 | obj-$(CONFIG_SX) += sx.o generic_serial.o |
45 | obj-$(CONFIG_RIO) += rio/ generic_serial.o | 46 | obj-$(CONFIG_RIO) += rio/ generic_serial.o |
@@ -107,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o | |||
107 | obj-$(CONFIG_TCG_TPM) += tpm/ | 108 | obj-$(CONFIG_TCG_TPM) += tpm/ |
108 | 109 | ||
109 | obj-$(CONFIG_PS3_FLASH) += ps3flash.o | 110 | obj-$(CONFIG_PS3_FLASH) += ps3flash.o |
111 | obj-$(CONFIG_RAMOOPS) += ramoops.o | ||
110 | 112 | ||
111 | obj-$(CONFIG_JS_RTC) += js-rtc.o | 113 | obj-$(CONFIG_JS_RTC) += js-rtc.o |
112 | js-rtc-y = rtc.o | 114 | js-rtc-y = rtc.o |
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 870f12cfed93..120490949997 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
@@ -178,86 +178,6 @@ struct agp_bridge_data { | |||
178 | #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) | 178 | #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) |
179 | 179 | ||
180 | 180 | ||
181 | /* Intel registers */ | ||
182 | #define INTEL_APSIZE 0xb4 | ||
183 | #define INTEL_ATTBASE 0xb8 | ||
184 | #define INTEL_AGPCTRL 0xb0 | ||
185 | #define INTEL_NBXCFG 0x50 | ||
186 | #define INTEL_ERRSTS 0x91 | ||
187 | |||
188 | /* Intel i830 registers */ | ||
189 | #define I830_GMCH_CTRL 0x52 | ||
190 | #define I830_GMCH_ENABLED 0x4 | ||
191 | #define I830_GMCH_MEM_MASK 0x1 | ||
192 | #define I830_GMCH_MEM_64M 0x1 | ||
193 | #define I830_GMCH_MEM_128M 0 | ||
194 | #define I830_GMCH_GMS_MASK 0x70 | ||
195 | #define I830_GMCH_GMS_DISABLED 0x00 | ||
196 | #define I830_GMCH_GMS_LOCAL 0x10 | ||
197 | #define I830_GMCH_GMS_STOLEN_512 0x20 | ||
198 | #define I830_GMCH_GMS_STOLEN_1024 0x30 | ||
199 | #define I830_GMCH_GMS_STOLEN_8192 0x40 | ||
200 | #define I830_RDRAM_CHANNEL_TYPE 0x03010 | ||
201 | #define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) | ||
202 | #define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) | ||
203 | |||
204 | /* This one is for I830MP w. an external graphic card */ | ||
205 | #define INTEL_I830_ERRSTS 0x92 | ||
206 | |||
207 | /* Intel 855GM/852GM registers */ | ||
208 | #define I855_GMCH_GMS_MASK 0xF0 | ||
209 | #define I855_GMCH_GMS_STOLEN_0M 0x0 | ||
210 | #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) | ||
211 | #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) | ||
212 | #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) | ||
213 | #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) | ||
214 | #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) | ||
215 | #define I85X_CAPID 0x44 | ||
216 | #define I85X_VARIANT_MASK 0x7 | ||
217 | #define I85X_VARIANT_SHIFT 5 | ||
218 | #define I855_GME 0x0 | ||
219 | #define I855_GM 0x4 | ||
220 | #define I852_GME 0x2 | ||
221 | #define I852_GM 0x5 | ||
222 | |||
223 | /* Intel i845 registers */ | ||
224 | #define INTEL_I845_AGPM 0x51 | ||
225 | #define INTEL_I845_ERRSTS 0xc8 | ||
226 | |||
227 | /* Intel i860 registers */ | ||
228 | #define INTEL_I860_MCHCFG 0x50 | ||
229 | #define INTEL_I860_ERRSTS 0xc8 | ||
230 | |||
231 | /* Intel i810 registers */ | ||
232 | #define I810_GMADDR 0x10 | ||
233 | #define I810_MMADDR 0x14 | ||
234 | #define I810_PTE_BASE 0x10000 | ||
235 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | ||
236 | #define I810_PTE_LOCAL 0x00000002 | ||
237 | #define I810_PTE_VALID 0x00000001 | ||
238 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | ||
239 | #define I810_SMRAM_MISCC 0x70 | ||
240 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | ||
241 | #define I810_GFX_MEM_WIN_32M 0x00010000 | ||
242 | #define I810_GMS 0x000000c0 | ||
243 | #define I810_GMS_DISABLE 0x00000000 | ||
244 | #define I810_PGETBL_CTL 0x2020 | ||
245 | #define I810_PGETBL_ENABLED 0x00000001 | ||
246 | #define I965_PGETBL_SIZE_MASK 0x0000000e | ||
247 | #define I965_PGETBL_SIZE_512KB (0 << 1) | ||
248 | #define I965_PGETBL_SIZE_256KB (1 << 1) | ||
249 | #define I965_PGETBL_SIZE_128KB (2 << 1) | ||
250 | #define I965_PGETBL_SIZE_1MB (3 << 1) | ||
251 | #define I965_PGETBL_SIZE_2MB (4 << 1) | ||
252 | #define I965_PGETBL_SIZE_1_5MB (5 << 1) | ||
253 | #define G33_PGETBL_SIZE_MASK (3 << 8) | ||
254 | #define G33_PGETBL_SIZE_1M (1 << 8) | ||
255 | #define G33_PGETBL_SIZE_2M (2 << 8) | ||
256 | |||
257 | #define I810_DRAM_CTL 0x3000 | ||
258 | #define I810_DRAM_ROW_0 0x00000001 | ||
259 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 | ||
260 | |||
261 | struct agp_device_ids { | 181 | struct agp_device_ids { |
262 | unsigned short device_id; /* first, to make table easier to read */ | 182 | unsigned short device_id; /* first, to make table easier to read */ |
263 | enum chipset_type chipset; | 183 | enum chipset_type chipset; |
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index d2ce68f27e4b..fd793519ea2b 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c | |||
@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = { | |||
204 | .aperture_sizes = ali_generic_sizes, | 204 | .aperture_sizes = ali_generic_sizes, |
205 | .size_type = U32_APER_SIZE, | 205 | .size_type = U32_APER_SIZE, |
206 | .num_aperture_sizes = 7, | 206 | .num_aperture_sizes = 7, |
207 | .needs_scratch_page = true, | ||
207 | .configure = ali_configure, | 208 | .configure = ali_configure, |
208 | .fetch_size = ali_fetch_size, | 209 | .fetch_size = ali_fetch_size, |
209 | .cleanup = ali_cleanup, | 210 | .cleanup = ali_cleanup, |
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index a7637d72cef6..b6b1568314c8 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c | |||
@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) | |||
142 | { | 142 | { |
143 | struct aper_size_info_lvl2 *value; | 143 | struct aper_size_info_lvl2 *value; |
144 | struct amd_page_map page_dir; | 144 | struct amd_page_map page_dir; |
145 | unsigned long __iomem *cur_gatt; | ||
145 | unsigned long addr; | 146 | unsigned long addr; |
146 | int retval; | 147 | int retval; |
147 | u32 temp; | 148 | u32 temp; |
@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) | |||
178 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ | 179 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ |
179 | } | 180 | } |
180 | 181 | ||
182 | for (i = 0; i < value->num_entries; i++) { | ||
183 | addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; | ||
184 | cur_gatt = GET_GATT(addr); | ||
185 | writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); | ||
186 | readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ | ||
187 | } | ||
188 | |||
181 | return 0; | 189 | return 0; |
182 | } | 190 | } |
183 | 191 | ||
@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = { | |||
375 | .aperture_sizes = amd_irongate_sizes, | 383 | .aperture_sizes = amd_irongate_sizes, |
376 | .size_type = LVL2_APER_SIZE, | 384 | .size_type = LVL2_APER_SIZE, |
377 | .num_aperture_sizes = 7, | 385 | .num_aperture_sizes = 7, |
386 | .needs_scratch_page = true, | ||
378 | .configure = amd_irongate_configure, | 387 | .configure = amd_irongate_configure, |
379 | .fetch_size = amd_irongate_fetch_size, | 388 | .fetch_size = amd_irongate_fetch_size, |
380 | .cleanup = amd_irongate_cleanup, | 389 | .cleanup = amd_irongate_cleanup, |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index fd50ead59c79..70312da4c968 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = { | |||
210 | .aperture_sizes = amd_8151_sizes, | 210 | .aperture_sizes = amd_8151_sizes, |
211 | .size_type = U32_APER_SIZE, | 211 | .size_type = U32_APER_SIZE, |
212 | .num_aperture_sizes = 7, | 212 | .num_aperture_sizes = 7, |
213 | .needs_scratch_page = true, | ||
213 | .configure = amd_8151_configure, | 214 | .configure = amd_8151_configure, |
214 | .fetch_size = amd64_fetch_size, | 215 | .fetch_size = amd64_fetch_size, |
215 | .cleanup = amd64_cleanup, | 216 | .cleanup = amd64_cleanup, |
@@ -383,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) | |||
383 | { | 384 | { |
384 | u32 httfea,baseaddr,enuscr; | 385 | u32 httfea,baseaddr,enuscr; |
385 | struct pci_dev *dev1; | 386 | struct pci_dev *dev1; |
386 | int i; | 387 | int i, ret; |
387 | unsigned size = amd64_fetch_size(); | 388 | unsigned size = amd64_fetch_size(); |
388 | 389 | ||
389 | dev_info(&pdev->dev, "setting up ULi AGP\n"); | 390 | dev_info(&pdev->dev, "setting up ULi AGP\n"); |
@@ -399,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) | |||
399 | 400 | ||
400 | if (i == ARRAY_SIZE(uli_sizes)) { | 401 | if (i == ARRAY_SIZE(uli_sizes)) { |
401 | dev_info(&pdev->dev, "no ULi size found for %d\n", size); | 402 | dev_info(&pdev->dev, "no ULi size found for %d\n", size); |
402 | return -ENODEV; | 403 | ret = -ENODEV; |
404 | goto put; | ||
403 | } | 405 | } |
404 | 406 | ||
405 | /* shadow x86-64 registers into ULi registers */ | 407 | /* shadow x86-64 registers into ULi registers */ |
406 | pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); | 408 | pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); |
407 | 409 | ||
408 | /* if x86-64 aperture base is beyond 4G, exit here */ | 410 | /* if x86-64 aperture base is beyond 4G, exit here */ |
409 | if ((httfea & 0x7fff) >> (32 - 25)) | 411 | if ((httfea & 0x7fff) >> (32 - 25)) { |
410 | return -ENODEV; | 412 | ret = -ENODEV; |
413 | goto put; | ||
414 | } | ||
411 | 415 | ||
412 | httfea = (httfea& 0x7fff) << 25; | 416 | httfea = (httfea& 0x7fff) << 25; |
413 | 417 | ||
@@ -419,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) | |||
419 | enuscr= httfea+ (size * 1024 * 1024) - 1; | 423 | enuscr= httfea+ (size * 1024 * 1024) - 1; |
420 | pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); | 424 | pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); |
421 | pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); | 425 | pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); |
422 | 426 | ret = 0; | |
427 | put: | ||
423 | pci_dev_put(dev1); | 428 | pci_dev_put(dev1); |
424 | return 0; | 429 | return ret; |
425 | } | 430 | } |
426 | 431 | ||
427 | 432 | ||
@@ -440,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) | |||
440 | { | 445 | { |
441 | u32 tmp, apbase, apbar, aplimit; | 446 | u32 tmp, apbase, apbar, aplimit; |
442 | struct pci_dev *dev1; | 447 | struct pci_dev *dev1; |
443 | int i; | 448 | int i, ret; |
444 | unsigned size = amd64_fetch_size(); | 449 | unsigned size = amd64_fetch_size(); |
445 | 450 | ||
446 | dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); | 451 | dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); |
@@ -457,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) | |||
457 | 462 | ||
458 | if (i == ARRAY_SIZE(nforce3_sizes)) { | 463 | if (i == ARRAY_SIZE(nforce3_sizes)) { |
459 | dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); | 464 | dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); |
460 | return -ENODEV; | 465 | ret = -ENODEV; |
466 | goto put; | ||
461 | } | 467 | } |
462 | 468 | ||
463 | pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); | 469 | pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); |
@@ -471,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) | |||
471 | /* if x86-64 aperture base is beyond 4G, exit here */ | 477 | /* if x86-64 aperture base is beyond 4G, exit here */ |
472 | if ( (apbase & 0x7fff) >> (32 - 25) ) { | 478 | if ( (apbase & 0x7fff) >> (32 - 25) ) { |
473 | dev_info(&pdev->dev, "aperture base > 4G\n"); | 479 | dev_info(&pdev->dev, "aperture base > 4G\n"); |
474 | return -ENODEV; | 480 | ret = -ENODEV; |
481 | goto put; | ||
475 | } | 482 | } |
476 | 483 | ||
477 | apbase = (apbase & 0x7fff) << 25; | 484 | apbase = (apbase & 0x7fff) << 25; |
@@ -487,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev) | |||
487 | pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); | 494 | pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); |
488 | pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); | 495 | pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); |
489 | 496 | ||
497 | ret = 0; | ||
498 | put: | ||
490 | pci_dev_put(dev1); | 499 | pci_dev_put(dev1); |
491 | 500 | ||
492 | return 0; | 501 | return ret; |
493 | } | 502 | } |
494 | 503 | ||
495 | static int __devinit agp_amd64_probe(struct pci_dev *pdev, | 504 | static int __devinit agp_amd64_probe(struct pci_dev *pdev, |
@@ -499,6 +508,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, | |||
499 | u8 cap_ptr; | 508 | u8 cap_ptr; |
500 | int err; | 509 | int err; |
501 | 510 | ||
511 | /* The Highlander principle */ | ||
512 | if (agp_bridges_found) | ||
513 | return -ENODEV; | ||
514 | |||
502 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | 515 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); |
503 | if (!cap_ptr) | 516 | if (!cap_ptr) |
504 | return -ENODEV; | 517 | return -ENODEV; |
@@ -562,6 +575,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev) | |||
562 | amd64_aperture_sizes[bridge->aperture_size_idx].size); | 575 | amd64_aperture_sizes[bridge->aperture_size_idx].size); |
563 | agp_remove_bridge(bridge); | 576 | agp_remove_bridge(bridge); |
564 | agp_put_bridge(bridge); | 577 | agp_put_bridge(bridge); |
578 | |||
579 | agp_bridges_found--; | ||
565 | } | 580 | } |
566 | 581 | ||
567 | #ifdef CONFIG_PM | 582 | #ifdef CONFIG_PM |
@@ -709,6 +724,11 @@ static struct pci_device_id agp_amd64_pci_table[] = { | |||
709 | 724 | ||
710 | MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); | 725 | MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); |
711 | 726 | ||
727 | static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { | ||
728 | { PCI_DEVICE_CLASS(0, 0) }, | ||
729 | { } | ||
730 | }; | ||
731 | |||
712 | static struct pci_driver agp_amd64_pci_driver = { | 732 | static struct pci_driver agp_amd64_pci_driver = { |
713 | .name = "agpgart-amd64", | 733 | .name = "agpgart-amd64", |
714 | .id_table = agp_amd64_pci_table, | 734 | .id_table = agp_amd64_pci_table, |
@@ -734,7 +754,6 @@ int __init agp_amd64_init(void) | |||
734 | return err; | 754 | return err; |
735 | 755 | ||
736 | if (agp_bridges_found == 0) { | 756 | if (agp_bridges_found == 0) { |
737 | struct pci_dev *dev; | ||
738 | if (!agp_try_unsupported && !agp_try_unsupported_boot) { | 757 | if (!agp_try_unsupported && !agp_try_unsupported_boot) { |
739 | printk(KERN_INFO PFX "No supported AGP bridge found.\n"); | 758 | printk(KERN_INFO PFX "No supported AGP bridge found.\n"); |
740 | #ifdef MODULE | 759 | #ifdef MODULE |
@@ -750,17 +769,10 @@ int __init agp_amd64_init(void) | |||
750 | return -ENODEV; | 769 | return -ENODEV; |
751 | 770 | ||
752 | /* Look for any AGP bridge */ | 771 | /* Look for any AGP bridge */ |
753 | dev = NULL; | 772 | agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; |
754 | err = -ENODEV; | 773 | err = driver_attach(&agp_amd64_pci_driver.driver); |
755 | for_each_pci_dev(dev) { | 774 | if (err == 0 && agp_bridges_found == 0) |
756 | if (!pci_find_capability(dev, PCI_CAP_ID_AGP)) | 775 | err = -ENODEV; |
757 | continue; | ||
758 | /* Only one bridge supported right now */ | ||
759 | if (agp_amd64_probe(dev, NULL) == 0) { | ||
760 | err = 0; | ||
761 | break; | ||
762 | } | ||
763 | } | ||
764 | } | 776 | } |
765 | return err; | 777 | return err; |
766 | } | 778 | } |
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 3b2ecbe86ebe..dc30e2243494 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c | |||
@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) | |||
341 | { | 341 | { |
342 | struct aper_size_info_lvl2 *value; | 342 | struct aper_size_info_lvl2 *value; |
343 | struct ati_page_map page_dir; | 343 | struct ati_page_map page_dir; |
344 | unsigned long __iomem *cur_gatt; | ||
344 | unsigned long addr; | 345 | unsigned long addr; |
345 | int retval; | 346 | int retval; |
346 | u32 temp; | 347 | u32 temp; |
@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) | |||
395 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ | 396 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ |
396 | } | 397 | } |
397 | 398 | ||
399 | for (i = 0; i < value->num_entries; i++) { | ||
400 | addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; | ||
401 | cur_gatt = GET_GATT(addr); | ||
402 | writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); | ||
403 | } | ||
404 | |||
398 | return 0; | 405 | return 0; |
399 | } | 406 | } |
400 | 407 | ||
@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = { | |||
415 | .aperture_sizes = ati_generic_sizes, | 422 | .aperture_sizes = ati_generic_sizes, |
416 | .size_type = LVL2_APER_SIZE, | 423 | .size_type = LVL2_APER_SIZE, |
417 | .num_aperture_sizes = 7, | 424 | .num_aperture_sizes = 7, |
425 | .needs_scratch_page = true, | ||
418 | .configure = ati_configure, | 426 | .configure = ati_configure, |
419 | .fetch_size = ati_fetch_size, | 427 | .fetch_size = ati_fetch_size, |
420 | .cleanup = ati_cleanup, | 428 | .cleanup = ati_cleanup, |
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 793f39ea9618..aa109cbe0e6e 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/page-flags.h> | 28 | #include <linux/page-flags.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include "agp.h" | 30 | #include "agp.h" |
31 | #include "intel-agp.h" | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * The real differences to the generic AGP code is | 34 | * The real differences to the generic AGP code is |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index aa4248efc5d8..d836a71bf06d 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -11,1531 +11,13 @@ | |||
11 | #include <linux/agp_backend.h> | 11 | #include <linux/agp_backend.h> |
12 | #include <asm/smp.h> | 12 | #include <asm/smp.h> |
13 | #include "agp.h" | 13 | #include "agp.h" |
14 | #include "intel-agp.h" | ||
15 | |||
16 | #include "intel-gtt.c" | ||
14 | 17 | ||
15 | int intel_agp_enabled; | 18 | int intel_agp_enabled; |
16 | EXPORT_SYMBOL(intel_agp_enabled); | 19 | EXPORT_SYMBOL(intel_agp_enabled); |
17 | 20 | ||
18 | /* | ||
19 | * If we have Intel graphics, we're not going to have anything other than | ||
20 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | ||
21 | * on the Intel IOMMU support (CONFIG_DMAR). | ||
22 | * Only newer chipsets need to bother with this, of course. | ||
23 | */ | ||
24 | #ifdef CONFIG_DMAR | ||
25 | #define USE_PCI_DMA_API 1 | ||
26 | #endif | ||
27 | |||
28 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 | ||
29 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | ||
30 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | ||
31 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 | ||
32 | #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 | ||
33 | #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 | ||
34 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 | ||
35 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 | ||
36 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 | ||
37 | #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 | ||
38 | #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 | ||
39 | #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 | ||
40 | #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 | ||
41 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | ||
42 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | ||
43 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | ||
44 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 | ||
45 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 | ||
46 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 | ||
47 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 | ||
48 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | ||
49 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | ||
50 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | ||
51 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | ||
52 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | ||
53 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | ||
54 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | ||
55 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | ||
56 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | ||
57 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | ||
58 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 | ||
59 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 | ||
60 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | ||
61 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 | ||
62 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 | ||
63 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 | ||
64 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | ||
65 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | ||
66 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | ||
67 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | ||
68 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | ||
69 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | ||
70 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a | ||
71 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 | ||
72 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 | ||
73 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | ||
74 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | ||
75 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | ||
76 | |||
77 | /* cover 915 and 945 variants */ | ||
78 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | ||
79 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ | ||
80 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ | ||
81 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ | ||
82 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ | ||
83 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) | ||
84 | |||
85 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ | ||
86 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ | ||
87 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ | ||
88 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | ||
89 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | ||
90 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) | ||
91 | |||
92 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | ||
93 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | ||
94 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ | ||
95 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
96 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
97 | |||
98 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
99 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
100 | |||
101 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | ||
102 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
103 | |||
104 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | ||
105 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | ||
106 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | ||
107 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | ||
108 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | ||
109 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | ||
110 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ | ||
111 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ | ||
112 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | ||
113 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | ||
114 | IS_SNB) | ||
115 | |||
116 | extern int agp_memory_reserved; | ||
117 | |||
118 | |||
119 | /* Intel 815 register */ | ||
120 | #define INTEL_815_APCONT 0x51 | ||
121 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF | ||
122 | |||
123 | /* Intel i820 registers */ | ||
124 | #define INTEL_I820_RDCR 0x51 | ||
125 | #define INTEL_I820_ERRSTS 0xc8 | ||
126 | |||
127 | /* Intel i840 registers */ | ||
128 | #define INTEL_I840_MCHCFG 0x50 | ||
129 | #define INTEL_I840_ERRSTS 0xc8 | ||
130 | |||
131 | /* Intel i850 registers */ | ||
132 | #define INTEL_I850_MCHCFG 0x50 | ||
133 | #define INTEL_I850_ERRSTS 0xc8 | ||
134 | |||
135 | /* intel 915G registers */ | ||
136 | #define I915_GMADDR 0x18 | ||
137 | #define I915_MMADDR 0x10 | ||
138 | #define I915_PTEADDR 0x1C | ||
139 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) | ||
140 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | ||
141 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
142 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
143 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
144 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
145 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
146 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
147 | |||
148 | #define I915_IFPADDR 0x60 | ||
149 | |||
150 | /* Intel 965G registers */ | ||
151 | #define I965_MSAC 0x62 | ||
152 | #define I965_IFPADDR 0x70 | ||
153 | |||
154 | /* Intel 7505 registers */ | ||
155 | #define INTEL_I7505_APSIZE 0x74 | ||
156 | #define INTEL_I7505_NCAPID 0x60 | ||
157 | #define INTEL_I7505_NISTAT 0x6c | ||
158 | #define INTEL_I7505_ATTBASE 0x78 | ||
159 | #define INTEL_I7505_ERRSTS 0x42 | ||
160 | #define INTEL_I7505_AGPCTRL 0x70 | ||
161 | #define INTEL_I7505_MCHCFG 0x50 | ||
162 | |||
163 | #define SNB_GMCH_CTRL 0x50 | ||
164 | #define SNB_GMCH_GMS_STOLEN_MASK 0xF8 | ||
165 | #define SNB_GMCH_GMS_STOLEN_32M (1 << 3) | ||
166 | #define SNB_GMCH_GMS_STOLEN_64M (2 << 3) | ||
167 | #define SNB_GMCH_GMS_STOLEN_96M (3 << 3) | ||
168 | #define SNB_GMCH_GMS_STOLEN_128M (4 << 3) | ||
169 | #define SNB_GMCH_GMS_STOLEN_160M (5 << 3) | ||
170 | #define SNB_GMCH_GMS_STOLEN_192M (6 << 3) | ||
171 | #define SNB_GMCH_GMS_STOLEN_224M (7 << 3) | ||
172 | #define SNB_GMCH_GMS_STOLEN_256M (8 << 3) | ||
173 | #define SNB_GMCH_GMS_STOLEN_288M (9 << 3) | ||
174 | #define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) | ||
175 | #define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) | ||
176 | #define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) | ||
177 | #define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) | ||
178 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | ||
179 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | ||
180 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | ||
181 | #define SNB_GTT_SIZE_0M (0 << 8) | ||
182 | #define SNB_GTT_SIZE_1M (1 << 8) | ||
183 | #define SNB_GTT_SIZE_2M (2 << 8) | ||
184 | #define SNB_GTT_SIZE_MASK (3 << 8) | ||
185 | |||
186 | static const struct aper_size_info_fixed intel_i810_sizes[] = | ||
187 | { | ||
188 | {64, 16384, 4}, | ||
189 | /* The 32M mode still requires a 64k gatt */ | ||
190 | {32, 8192, 4} | ||
191 | }; | ||
192 | |||
193 | #define AGP_DCACHE_MEMORY 1 | ||
194 | #define AGP_PHYS_MEMORY 2 | ||
195 | #define INTEL_AGP_CACHED_MEMORY 3 | ||
196 | |||
197 | static struct gatt_mask intel_i810_masks[] = | ||
198 | { | ||
199 | {.mask = I810_PTE_VALID, .type = 0}, | ||
200 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, | ||
201 | {.mask = I810_PTE_VALID, .type = 0}, | ||
202 | {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, | ||
203 | .type = INTEL_AGP_CACHED_MEMORY} | ||
204 | }; | ||
205 | |||
206 | static struct _intel_private { | ||
207 | struct pci_dev *pcidev; /* device one */ | ||
208 | u8 __iomem *registers; | ||
209 | u32 __iomem *gtt; /* I915G */ | ||
210 | int num_dcache_entries; | ||
211 | /* gtt_entries is the number of gtt entries that are already mapped | ||
212 | * to stolen memory. Stolen memory is larger than the memory mapped | ||
213 | * through gtt_entries, as it includes some reserved space for the BIOS | ||
214 | * popup and for the GTT. | ||
215 | */ | ||
216 | int gtt_entries; /* i830+ */ | ||
217 | int gtt_total_size; | ||
218 | union { | ||
219 | void __iomem *i9xx_flush_page; | ||
220 | void *i8xx_flush_page; | ||
221 | }; | ||
222 | struct page *i8xx_page; | ||
223 | struct resource ifp_resource; | ||
224 | int resource_valid; | ||
225 | } intel_private; | ||
226 | |||
227 | #ifdef USE_PCI_DMA_API | ||
228 | static int intel_agp_map_page(struct page *page, dma_addr_t *ret) | ||
229 | { | ||
230 | *ret = pci_map_page(intel_private.pcidev, page, 0, | ||
231 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
232 | if (pci_dma_mapping_error(intel_private.pcidev, *ret)) | ||
233 | return -EINVAL; | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) | ||
238 | { | ||
239 | pci_unmap_page(intel_private.pcidev, dma, | ||
240 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
241 | } | ||
242 | |||
243 | static void intel_agp_free_sglist(struct agp_memory *mem) | ||
244 | { | ||
245 | struct sg_table st; | ||
246 | |||
247 | st.sgl = mem->sg_list; | ||
248 | st.orig_nents = st.nents = mem->page_count; | ||
249 | |||
250 | sg_free_table(&st); | ||
251 | |||
252 | mem->sg_list = NULL; | ||
253 | mem->num_sg = 0; | ||
254 | } | ||
255 | |||
256 | static int intel_agp_map_memory(struct agp_memory *mem) | ||
257 | { | ||
258 | struct sg_table st; | ||
259 | struct scatterlist *sg; | ||
260 | int i; | ||
261 | |||
262 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | ||
263 | |||
264 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | ||
265 | return -ENOMEM; | ||
266 | |||
267 | mem->sg_list = sg = st.sgl; | ||
268 | |||
269 | for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) | ||
270 | sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); | ||
271 | |||
272 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | ||
273 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
274 | if (unlikely(!mem->num_sg)) { | ||
275 | intel_agp_free_sglist(mem); | ||
276 | return -ENOMEM; | ||
277 | } | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static void intel_agp_unmap_memory(struct agp_memory *mem) | ||
282 | { | ||
283 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | ||
284 | |||
285 | pci_unmap_sg(intel_private.pcidev, mem->sg_list, | ||
286 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
287 | intel_agp_free_sglist(mem); | ||
288 | } | ||
289 | |||
290 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
291 | off_t pg_start, int mask_type) | ||
292 | { | ||
293 | struct scatterlist *sg; | ||
294 | int i, j; | ||
295 | |||
296 | j = pg_start; | ||
297 | |||
298 | WARN_ON(!mem->num_sg); | ||
299 | |||
300 | if (mem->num_sg == mem->page_count) { | ||
301 | for_each_sg(mem->sg_list, sg, mem->page_count, i) { | ||
302 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
303 | sg_dma_address(sg), mask_type), | ||
304 | intel_private.gtt+j); | ||
305 | j++; | ||
306 | } | ||
307 | } else { | ||
308 | /* sg may merge pages, but we have to separate | ||
309 | * per-page addr for GTT */ | ||
310 | unsigned int len, m; | ||
311 | |||
312 | for_each_sg(mem->sg_list, sg, mem->num_sg, i) { | ||
313 | len = sg_dma_len(sg) / PAGE_SIZE; | ||
314 | for (m = 0; m < len; m++) { | ||
315 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
316 | sg_dma_address(sg) + m * PAGE_SIZE, | ||
317 | mask_type), | ||
318 | intel_private.gtt+j); | ||
319 | j++; | ||
320 | } | ||
321 | } | ||
322 | } | ||
323 | readl(intel_private.gtt+j-1); | ||
324 | } | ||
325 | |||
326 | #else | ||
327 | |||
328 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
329 | off_t pg_start, int mask_type) | ||
330 | { | ||
331 | int i, j; | ||
332 | u32 cache_bits = 0; | ||
333 | |||
334 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
335 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
336 | { | ||
337 | cache_bits = I830_PTE_SYSTEM_CACHED; | ||
338 | } | ||
339 | |||
340 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
341 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
342 | page_to_phys(mem->pages[i]), mask_type), | ||
343 | intel_private.gtt+j); | ||
344 | } | ||
345 | |||
346 | readl(intel_private.gtt+j-1); | ||
347 | } | ||
348 | |||
349 | #endif | ||
350 | |||
351 | static int intel_i810_fetch_size(void) | ||
352 | { | ||
353 | u32 smram_miscc; | ||
354 | struct aper_size_info_fixed *values; | ||
355 | |||
356 | pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); | ||
357 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
358 | |||
359 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { | ||
360 | dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); | ||
361 | return 0; | ||
362 | } | ||
363 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { | ||
364 | agp_bridge->previous_size = | ||
365 | agp_bridge->current_size = (void *) (values + 1); | ||
366 | agp_bridge->aperture_size_idx = 1; | ||
367 | return values[1].size; | ||
368 | } else { | ||
369 | agp_bridge->previous_size = | ||
370 | agp_bridge->current_size = (void *) (values); | ||
371 | agp_bridge->aperture_size_idx = 0; | ||
372 | return values[0].size; | ||
373 | } | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static int intel_i810_configure(void) | ||
379 | { | ||
380 | struct aper_size_info_fixed *current_size; | ||
381 | u32 temp; | ||
382 | int i; | ||
383 | |||
384 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
385 | |||
386 | if (!intel_private.registers) { | ||
387 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
388 | temp &= 0xfff80000; | ||
389 | |||
390 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
391 | if (!intel_private.registers) { | ||
392 | dev_err(&intel_private.pcidev->dev, | ||
393 | "can't remap memory\n"); | ||
394 | return -ENOMEM; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | if ((readl(intel_private.registers+I810_DRAM_CTL) | ||
399 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | ||
400 | /* This will need to be dynamically assigned */ | ||
401 | dev_info(&intel_private.pcidev->dev, | ||
402 | "detected 4MB dedicated video ram\n"); | ||
403 | intel_private.num_dcache_entries = 1024; | ||
404 | } | ||
405 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
406 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
407 | writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
408 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
409 | |||
410 | if (agp_bridge->driver->needs_scratch_page) { | ||
411 | for (i = 0; i < current_size->num_entries; i++) { | ||
412 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
413 | } | ||
414 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ | ||
415 | } | ||
416 | global_cache_flush(); | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static void intel_i810_cleanup(void) | ||
421 | { | ||
422 | writel(0, intel_private.registers+I810_PGETBL_CTL); | ||
423 | readl(intel_private.registers); /* PCI Posting. */ | ||
424 | iounmap(intel_private.registers); | ||
425 | } | ||
426 | |||
427 | static void intel_i810_tlbflush(struct agp_memory *mem) | ||
428 | { | ||
429 | return; | ||
430 | } | ||
431 | |||
432 | static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) | ||
433 | { | ||
434 | return; | ||
435 | } | ||
436 | |||
437 | /* Exists to support ARGB cursors */ | ||
438 | static struct page *i8xx_alloc_pages(void) | ||
439 | { | ||
440 | struct page *page; | ||
441 | |||
442 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); | ||
443 | if (page == NULL) | ||
444 | return NULL; | ||
445 | |||
446 | if (set_pages_uc(page, 4) < 0) { | ||
447 | set_pages_wb(page, 4); | ||
448 | __free_pages(page, 2); | ||
449 | return NULL; | ||
450 | } | ||
451 | get_page(page); | ||
452 | atomic_inc(&agp_bridge->current_memory_agp); | ||
453 | return page; | ||
454 | } | ||
455 | |||
456 | static void i8xx_destroy_pages(struct page *page) | ||
457 | { | ||
458 | if (page == NULL) | ||
459 | return; | ||
460 | |||
461 | set_pages_wb(page, 4); | ||
462 | put_page(page); | ||
463 | __free_pages(page, 2); | ||
464 | atomic_dec(&agp_bridge->current_memory_agp); | ||
465 | } | ||
466 | |||
467 | static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | ||
468 | int type) | ||
469 | { | ||
470 | if (type < AGP_USER_TYPES) | ||
471 | return type; | ||
472 | else if (type == AGP_USER_CACHED_MEMORY) | ||
473 | return INTEL_AGP_CACHED_MEMORY; | ||
474 | else | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
479 | int type) | ||
480 | { | ||
481 | int i, j, num_entries; | ||
482 | void *temp; | ||
483 | int ret = -EINVAL; | ||
484 | int mask_type; | ||
485 | |||
486 | if (mem->page_count == 0) | ||
487 | goto out; | ||
488 | |||
489 | temp = agp_bridge->current_size; | ||
490 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
491 | |||
492 | if ((pg_start + mem->page_count) > num_entries) | ||
493 | goto out_err; | ||
494 | |||
495 | |||
496 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | ||
497 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { | ||
498 | ret = -EBUSY; | ||
499 | goto out_err; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | if (type != mem->type) | ||
504 | goto out_err; | ||
505 | |||
506 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
507 | |||
508 | switch (mask_type) { | ||
509 | case AGP_DCACHE_MEMORY: | ||
510 | if (!mem->is_flushed) | ||
511 | global_cache_flush(); | ||
512 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
513 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, | ||
514 | intel_private.registers+I810_PTE_BASE+(i*4)); | ||
515 | } | ||
516 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
517 | break; | ||
518 | case AGP_PHYS_MEMORY: | ||
519 | case AGP_NORMAL_MEMORY: | ||
520 | if (!mem->is_flushed) | ||
521 | global_cache_flush(); | ||
522 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
523 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
524 | page_to_phys(mem->pages[i]), mask_type), | ||
525 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
526 | } | ||
527 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
528 | break; | ||
529 | default: | ||
530 | goto out_err; | ||
531 | } | ||
532 | |||
533 | agp_bridge->driver->tlb_flush(mem); | ||
534 | out: | ||
535 | ret = 0; | ||
536 | out_err: | ||
537 | mem->is_flushed = true; | ||
538 | return ret; | ||
539 | } | ||
540 | |||
541 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
542 | int type) | ||
543 | { | ||
544 | int i; | ||
545 | |||
546 | if (mem->page_count == 0) | ||
547 | return 0; | ||
548 | |||
549 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
550 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
551 | } | ||
552 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
553 | |||
554 | agp_bridge->driver->tlb_flush(mem); | ||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * The i810/i830 requires a physical address to program its mouse | ||
560 | * pointer into hardware. | ||
561 | * However the Xserver still writes to it through the agp aperture. | ||
562 | */ | ||
563 | static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) | ||
564 | { | ||
565 | struct agp_memory *new; | ||
566 | struct page *page; | ||
567 | |||
568 | switch (pg_count) { | ||
569 | case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); | ||
570 | break; | ||
571 | case 4: | ||
572 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
573 | page = i8xx_alloc_pages(); | ||
574 | break; | ||
575 | default: | ||
576 | return NULL; | ||
577 | } | ||
578 | |||
579 | if (page == NULL) | ||
580 | return NULL; | ||
581 | |||
582 | new = agp_create_memory(pg_count); | ||
583 | if (new == NULL) | ||
584 | return NULL; | ||
585 | |||
586 | new->pages[0] = page; | ||
587 | if (pg_count == 4) { | ||
588 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
589 | new->pages[1] = new->pages[0] + 1; | ||
590 | new->pages[2] = new->pages[1] + 1; | ||
591 | new->pages[3] = new->pages[2] + 1; | ||
592 | } | ||
593 | new->page_count = pg_count; | ||
594 | new->num_scratch_pages = pg_count; | ||
595 | new->type = AGP_PHYS_MEMORY; | ||
596 | new->physical = page_to_phys(new->pages[0]); | ||
597 | return new; | ||
598 | } | ||
599 | |||
600 | static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) | ||
601 | { | ||
602 | struct agp_memory *new; | ||
603 | |||
604 | if (type == AGP_DCACHE_MEMORY) { | ||
605 | if (pg_count != intel_private.num_dcache_entries) | ||
606 | return NULL; | ||
607 | |||
608 | new = agp_create_memory(1); | ||
609 | if (new == NULL) | ||
610 | return NULL; | ||
611 | |||
612 | new->type = AGP_DCACHE_MEMORY; | ||
613 | new->page_count = pg_count; | ||
614 | new->num_scratch_pages = 0; | ||
615 | agp_free_page_array(new); | ||
616 | return new; | ||
617 | } | ||
618 | if (type == AGP_PHYS_MEMORY) | ||
619 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
620 | return NULL; | ||
621 | } | ||
622 | |||
623 | static void intel_i810_free_by_type(struct agp_memory *curr) | ||
624 | { | ||
625 | agp_free_key(curr->key); | ||
626 | if (curr->type == AGP_PHYS_MEMORY) { | ||
627 | if (curr->page_count == 4) | ||
628 | i8xx_destroy_pages(curr->pages[0]); | ||
629 | else { | ||
630 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
631 | AGP_PAGE_DESTROY_UNMAP); | ||
632 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
633 | AGP_PAGE_DESTROY_FREE); | ||
634 | } | ||
635 | agp_free_page_array(curr); | ||
636 | } | ||
637 | kfree(curr); | ||
638 | } | ||
639 | |||
640 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, | ||
641 | dma_addr_t addr, int type) | ||
642 | { | ||
643 | /* Type checking must be done elsewhere */ | ||
644 | return addr | bridge->driver->masks[type].mask; | ||
645 | } | ||
646 | |||
647 | static struct aper_size_info_fixed intel_i830_sizes[] = | ||
648 | { | ||
649 | {128, 32768, 5}, | ||
650 | /* The 64M mode still requires a 128k gatt */ | ||
651 | {64, 16384, 5}, | ||
652 | {256, 65536, 6}, | ||
653 | {512, 131072, 7}, | ||
654 | }; | ||
655 | |||
656 | static void intel_i830_init_gtt_entries(void) | ||
657 | { | ||
658 | u16 gmch_ctrl; | ||
659 | int gtt_entries = 0; | ||
660 | u8 rdct; | ||
661 | int local = 0; | ||
662 | static const int ddt[4] = { 0, 16, 32, 64 }; | ||
663 | int size; /* reserved space (in kb) at the top of stolen memory */ | ||
664 | |||
665 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
666 | |||
667 | if (IS_I965) { | ||
668 | u32 pgetbl_ctl; | ||
669 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); | ||
670 | |||
671 | /* The 965 has a field telling us the size of the GTT, | ||
672 | * which may be larger than what is necessary to map the | ||
673 | * aperture. | ||
674 | */ | ||
675 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { | ||
676 | case I965_PGETBL_SIZE_128KB: | ||
677 | size = 128; | ||
678 | break; | ||
679 | case I965_PGETBL_SIZE_256KB: | ||
680 | size = 256; | ||
681 | break; | ||
682 | case I965_PGETBL_SIZE_512KB: | ||
683 | size = 512; | ||
684 | break; | ||
685 | case I965_PGETBL_SIZE_1MB: | ||
686 | size = 1024; | ||
687 | break; | ||
688 | case I965_PGETBL_SIZE_2MB: | ||
689 | size = 2048; | ||
690 | break; | ||
691 | case I965_PGETBL_SIZE_1_5MB: | ||
692 | size = 1024 + 512; | ||
693 | break; | ||
694 | default: | ||
695 | dev_info(&intel_private.pcidev->dev, | ||
696 | "unknown page table size, assuming 512KB\n"); | ||
697 | size = 512; | ||
698 | } | ||
699 | size += 4; /* add in BIOS popup space */ | ||
700 | } else if (IS_G33 && !IS_PINEVIEW) { | ||
701 | /* G33's GTT size defined in gmch_ctrl */ | ||
702 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | ||
703 | case G33_PGETBL_SIZE_1M: | ||
704 | size = 1024; | ||
705 | break; | ||
706 | case G33_PGETBL_SIZE_2M: | ||
707 | size = 2048; | ||
708 | break; | ||
709 | default: | ||
710 | dev_info(&agp_bridge->dev->dev, | ||
711 | "unknown page table size 0x%x, assuming 512KB\n", | ||
712 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | ||
713 | size = 512; | ||
714 | } | ||
715 | size += 4; | ||
716 | } else if (IS_G4X || IS_PINEVIEW) { | ||
717 | /* On 4 series hardware, GTT stolen is separate from graphics | ||
718 | * stolen, ignore it in stolen gtt entries counting. However, | ||
719 | * 4KB of the stolen memory doesn't get mapped to the GTT. | ||
720 | */ | ||
721 | size = 4; | ||
722 | } else { | ||
723 | /* On previous hardware, the GTT size was just what was | ||
724 | * required to map the aperture. | ||
725 | */ | ||
726 | size = agp_bridge->driver->fetch_size() + 4; | ||
727 | } | ||
728 | |||
729 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || | ||
730 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
731 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { | ||
732 | case I830_GMCH_GMS_STOLEN_512: | ||
733 | gtt_entries = KB(512) - KB(size); | ||
734 | break; | ||
735 | case I830_GMCH_GMS_STOLEN_1024: | ||
736 | gtt_entries = MB(1) - KB(size); | ||
737 | break; | ||
738 | case I830_GMCH_GMS_STOLEN_8192: | ||
739 | gtt_entries = MB(8) - KB(size); | ||
740 | break; | ||
741 | case I830_GMCH_GMS_LOCAL: | ||
742 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); | ||
743 | gtt_entries = (I830_RDRAM_ND(rdct) + 1) * | ||
744 | MB(ddt[I830_RDRAM_DDT(rdct)]); | ||
745 | local = 1; | ||
746 | break; | ||
747 | default: | ||
748 | gtt_entries = 0; | ||
749 | break; | ||
750 | } | ||
751 | } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
752 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { | ||
753 | /* | ||
754 | * SandyBridge has new memory control reg at 0x50.w | ||
755 | */ | ||
756 | u16 snb_gmch_ctl; | ||
757 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
758 | switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { | ||
759 | case SNB_GMCH_GMS_STOLEN_32M: | ||
760 | gtt_entries = MB(32) - KB(size); | ||
761 | break; | ||
762 | case SNB_GMCH_GMS_STOLEN_64M: | ||
763 | gtt_entries = MB(64) - KB(size); | ||
764 | break; | ||
765 | case SNB_GMCH_GMS_STOLEN_96M: | ||
766 | gtt_entries = MB(96) - KB(size); | ||
767 | break; | ||
768 | case SNB_GMCH_GMS_STOLEN_128M: | ||
769 | gtt_entries = MB(128) - KB(size); | ||
770 | break; | ||
771 | case SNB_GMCH_GMS_STOLEN_160M: | ||
772 | gtt_entries = MB(160) - KB(size); | ||
773 | break; | ||
774 | case SNB_GMCH_GMS_STOLEN_192M: | ||
775 | gtt_entries = MB(192) - KB(size); | ||
776 | break; | ||
777 | case SNB_GMCH_GMS_STOLEN_224M: | ||
778 | gtt_entries = MB(224) - KB(size); | ||
779 | break; | ||
780 | case SNB_GMCH_GMS_STOLEN_256M: | ||
781 | gtt_entries = MB(256) - KB(size); | ||
782 | break; | ||
783 | case SNB_GMCH_GMS_STOLEN_288M: | ||
784 | gtt_entries = MB(288) - KB(size); | ||
785 | break; | ||
786 | case SNB_GMCH_GMS_STOLEN_320M: | ||
787 | gtt_entries = MB(320) - KB(size); | ||
788 | break; | ||
789 | case SNB_GMCH_GMS_STOLEN_352M: | ||
790 | gtt_entries = MB(352) - KB(size); | ||
791 | break; | ||
792 | case SNB_GMCH_GMS_STOLEN_384M: | ||
793 | gtt_entries = MB(384) - KB(size); | ||
794 | break; | ||
795 | case SNB_GMCH_GMS_STOLEN_416M: | ||
796 | gtt_entries = MB(416) - KB(size); | ||
797 | break; | ||
798 | case SNB_GMCH_GMS_STOLEN_448M: | ||
799 | gtt_entries = MB(448) - KB(size); | ||
800 | break; | ||
801 | case SNB_GMCH_GMS_STOLEN_480M: | ||
802 | gtt_entries = MB(480) - KB(size); | ||
803 | break; | ||
804 | case SNB_GMCH_GMS_STOLEN_512M: | ||
805 | gtt_entries = MB(512) - KB(size); | ||
806 | break; | ||
807 | } | ||
808 | } else { | ||
809 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { | ||
810 | case I855_GMCH_GMS_STOLEN_1M: | ||
811 | gtt_entries = MB(1) - KB(size); | ||
812 | break; | ||
813 | case I855_GMCH_GMS_STOLEN_4M: | ||
814 | gtt_entries = MB(4) - KB(size); | ||
815 | break; | ||
816 | case I855_GMCH_GMS_STOLEN_8M: | ||
817 | gtt_entries = MB(8) - KB(size); | ||
818 | break; | ||
819 | case I855_GMCH_GMS_STOLEN_16M: | ||
820 | gtt_entries = MB(16) - KB(size); | ||
821 | break; | ||
822 | case I855_GMCH_GMS_STOLEN_32M: | ||
823 | gtt_entries = MB(32) - KB(size); | ||
824 | break; | ||
825 | case I915_GMCH_GMS_STOLEN_48M: | ||
826 | /* Check it's really I915G */ | ||
827 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
828 | gtt_entries = MB(48) - KB(size); | ||
829 | else | ||
830 | gtt_entries = 0; | ||
831 | break; | ||
832 | case I915_GMCH_GMS_STOLEN_64M: | ||
833 | /* Check it's really I915G */ | ||
834 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
835 | gtt_entries = MB(64) - KB(size); | ||
836 | else | ||
837 | gtt_entries = 0; | ||
838 | break; | ||
839 | case G33_GMCH_GMS_STOLEN_128M: | ||
840 | if (IS_G33 || IS_I965 || IS_G4X) | ||
841 | gtt_entries = MB(128) - KB(size); | ||
842 | else | ||
843 | gtt_entries = 0; | ||
844 | break; | ||
845 | case G33_GMCH_GMS_STOLEN_256M: | ||
846 | if (IS_G33 || IS_I965 || IS_G4X) | ||
847 | gtt_entries = MB(256) - KB(size); | ||
848 | else | ||
849 | gtt_entries = 0; | ||
850 | break; | ||
851 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
852 | if (IS_I965 || IS_G4X) | ||
853 | gtt_entries = MB(96) - KB(size); | ||
854 | else | ||
855 | gtt_entries = 0; | ||
856 | break; | ||
857 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
858 | if (IS_I965 || IS_G4X) | ||
859 | gtt_entries = MB(160) - KB(size); | ||
860 | else | ||
861 | gtt_entries = 0; | ||
862 | break; | ||
863 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
864 | if (IS_I965 || IS_G4X) | ||
865 | gtt_entries = MB(224) - KB(size); | ||
866 | else | ||
867 | gtt_entries = 0; | ||
868 | break; | ||
869 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
870 | if (IS_I965 || IS_G4X) | ||
871 | gtt_entries = MB(352) - KB(size); | ||
872 | else | ||
873 | gtt_entries = 0; | ||
874 | break; | ||
875 | default: | ||
876 | gtt_entries = 0; | ||
877 | break; | ||
878 | } | ||
879 | } | ||
880 | if (gtt_entries > 0) { | ||
881 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", | ||
882 | gtt_entries / KB(1), local ? "local" : "stolen"); | ||
883 | gtt_entries /= KB(4); | ||
884 | } else { | ||
885 | dev_info(&agp_bridge->dev->dev, | ||
886 | "no pre-allocated video memory detected\n"); | ||
887 | gtt_entries = 0; | ||
888 | } | ||
889 | |||
890 | intel_private.gtt_entries = gtt_entries; | ||
891 | } | ||
892 | |||
893 | static void intel_i830_fini_flush(void) | ||
894 | { | ||
895 | kunmap(intel_private.i8xx_page); | ||
896 | intel_private.i8xx_flush_page = NULL; | ||
897 | unmap_page_from_agp(intel_private.i8xx_page); | ||
898 | |||
899 | __free_page(intel_private.i8xx_page); | ||
900 | intel_private.i8xx_page = NULL; | ||
901 | } | ||
902 | |||
903 | static void intel_i830_setup_flush(void) | ||
904 | { | ||
905 | /* return if we've already set the flush mechanism up */ | ||
906 | if (intel_private.i8xx_page) | ||
907 | return; | ||
908 | |||
909 | intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); | ||
910 | if (!intel_private.i8xx_page) | ||
911 | return; | ||
912 | |||
913 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
914 | if (!intel_private.i8xx_flush_page) | ||
915 | intel_i830_fini_flush(); | ||
916 | } | ||
917 | |||
918 | /* The chipset_flush interface needs to get data that has already been | ||
919 | * flushed out of the CPU all the way out to main memory, because the GPU | ||
920 | * doesn't snoop those buffers. | ||
921 | * | ||
922 | * The 8xx series doesn't have the same lovely interface for flushing the | ||
923 | * chipset write buffers that the later chips do. According to the 865 | ||
924 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in | ||
925 | * that buffer out, we just fill 1KB and clflush it out, on the assumption | ||
926 | * that it'll push whatever was in there out. It appears to work. | ||
927 | */ | ||
928 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | ||
929 | { | ||
930 | unsigned int *pg = intel_private.i8xx_flush_page; | ||
931 | |||
932 | memset(pg, 0, 1024); | ||
933 | |||
934 | if (cpu_has_clflush) | ||
935 | clflush_cache_range(pg, 1024); | ||
936 | else if (wbinvd_on_all_cpus() != 0) | ||
937 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
938 | } | ||
939 | |||
940 | /* The intel i830 automatically initializes the agp aperture during POST. | ||
941 | * Use the memory already set aside for in the GTT. | ||
942 | */ | ||
943 | static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | ||
944 | { | ||
945 | int page_order; | ||
946 | struct aper_size_info_fixed *size; | ||
947 | int num_entries; | ||
948 | u32 temp; | ||
949 | |||
950 | size = agp_bridge->current_size; | ||
951 | page_order = size->page_order; | ||
952 | num_entries = size->num_entries; | ||
953 | agp_bridge->gatt_table_real = NULL; | ||
954 | |||
955 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
956 | temp &= 0xfff80000; | ||
957 | |||
958 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
959 | if (!intel_private.registers) | ||
960 | return -ENOMEM; | ||
961 | |||
962 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
963 | global_cache_flush(); /* FIXME: ?? */ | ||
964 | |||
965 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
966 | intel_i830_init_gtt_entries(); | ||
967 | |||
968 | agp_bridge->gatt_table = NULL; | ||
969 | |||
970 | agp_bridge->gatt_bus_addr = temp; | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | /* Return the gatt table to a sane state. Use the top of stolen | ||
976 | * memory for the GTT. | ||
977 | */ | ||
978 | static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) | ||
979 | { | ||
980 | return 0; | ||
981 | } | ||
982 | |||
983 | static int intel_i830_fetch_size(void) | ||
984 | { | ||
985 | u16 gmch_ctrl; | ||
986 | struct aper_size_info_fixed *values; | ||
987 | |||
988 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
989 | |||
990 | if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && | ||
991 | agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
992 | /* 855GM/852GM/865G has 128MB aperture size */ | ||
993 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; | ||
994 | agp_bridge->aperture_size_idx = 0; | ||
995 | return values[0].size; | ||
996 | } | ||
997 | |||
998 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
999 | |||
1000 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { | ||
1001 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; | ||
1002 | agp_bridge->aperture_size_idx = 0; | ||
1003 | return values[0].size; | ||
1004 | } else { | ||
1005 | agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1); | ||
1006 | agp_bridge->aperture_size_idx = 1; | ||
1007 | return values[1].size; | ||
1008 | } | ||
1009 | |||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static int intel_i830_configure(void) | ||
1014 | { | ||
1015 | struct aper_size_info_fixed *current_size; | ||
1016 | u32 temp; | ||
1017 | u16 gmch_ctrl; | ||
1018 | int i; | ||
1019 | |||
1020 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1021 | |||
1022 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
1023 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1024 | |||
1025 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1026 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1027 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1028 | |||
1029 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1030 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1031 | |||
1032 | if (agp_bridge->driver->needs_scratch_page) { | ||
1033 | for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { | ||
1034 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
1035 | } | ||
1036 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ | ||
1037 | } | ||
1038 | |||
1039 | global_cache_flush(); | ||
1040 | |||
1041 | intel_i830_setup_flush(); | ||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | static void intel_i830_cleanup(void) | ||
1046 | { | ||
1047 | iounmap(intel_private.registers); | ||
1048 | } | ||
1049 | |||
1050 | static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1051 | int type) | ||
1052 | { | ||
1053 | int i, j, num_entries; | ||
1054 | void *temp; | ||
1055 | int ret = -EINVAL; | ||
1056 | int mask_type; | ||
1057 | |||
1058 | if (mem->page_count == 0) | ||
1059 | goto out; | ||
1060 | |||
1061 | temp = agp_bridge->current_size; | ||
1062 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1063 | |||
1064 | if (pg_start < intel_private.gtt_entries) { | ||
1065 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1066 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1067 | pg_start, intel_private.gtt_entries); | ||
1068 | |||
1069 | dev_info(&intel_private.pcidev->dev, | ||
1070 | "trying to insert into local/stolen memory\n"); | ||
1071 | goto out_err; | ||
1072 | } | ||
1073 | |||
1074 | if ((pg_start + mem->page_count) > num_entries) | ||
1075 | goto out_err; | ||
1076 | |||
1077 | /* The i830 can't check the GTT for entries since its read only, | ||
1078 | * depend on the caller to make the correct offset decisions. | ||
1079 | */ | ||
1080 | |||
1081 | if (type != mem->type) | ||
1082 | goto out_err; | ||
1083 | |||
1084 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1085 | |||
1086 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1087 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1088 | goto out_err; | ||
1089 | |||
1090 | if (!mem->is_flushed) | ||
1091 | global_cache_flush(); | ||
1092 | |||
1093 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
1094 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
1095 | page_to_phys(mem->pages[i]), mask_type), | ||
1096 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
1097 | } | ||
1098 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
1099 | agp_bridge->driver->tlb_flush(mem); | ||
1100 | |||
1101 | out: | ||
1102 | ret = 0; | ||
1103 | out_err: | ||
1104 | mem->is_flushed = true; | ||
1105 | return ret; | ||
1106 | } | ||
1107 | |||
1108 | static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1109 | int type) | ||
1110 | { | ||
1111 | int i; | ||
1112 | |||
1113 | if (mem->page_count == 0) | ||
1114 | return 0; | ||
1115 | |||
1116 | if (pg_start < intel_private.gtt_entries) { | ||
1117 | dev_info(&intel_private.pcidev->dev, | ||
1118 | "trying to disable local/stolen memory\n"); | ||
1119 | return -EINVAL; | ||
1120 | } | ||
1121 | |||
1122 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
1123 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
1124 | } | ||
1125 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
1126 | |||
1127 | agp_bridge->driver->tlb_flush(mem); | ||
1128 | return 0; | ||
1129 | } | ||
1130 | |||
1131 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) | ||
1132 | { | ||
1133 | if (type == AGP_PHYS_MEMORY) | ||
1134 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
1135 | /* always return NULL for other allocation types for now */ | ||
1136 | return NULL; | ||
1137 | } | ||
1138 | |||
1139 | static int intel_alloc_chipset_flush_resource(void) | ||
1140 | { | ||
1141 | int ret; | ||
1142 | ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, | ||
1143 | PAGE_SIZE, PCIBIOS_MIN_MEM, 0, | ||
1144 | pcibios_align_resource, agp_bridge->dev); | ||
1145 | |||
1146 | return ret; | ||
1147 | } | ||
1148 | |||
1149 | static void intel_i915_setup_chipset_flush(void) | ||
1150 | { | ||
1151 | int ret; | ||
1152 | u32 temp; | ||
1153 | |||
1154 | pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); | ||
1155 | if (!(temp & 0x1)) { | ||
1156 | intel_alloc_chipset_flush_resource(); | ||
1157 | intel_private.resource_valid = 1; | ||
1158 | pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1159 | } else { | ||
1160 | temp &= ~1; | ||
1161 | |||
1162 | intel_private.resource_valid = 1; | ||
1163 | intel_private.ifp_resource.start = temp; | ||
1164 | intel_private.ifp_resource.end = temp + PAGE_SIZE; | ||
1165 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1166 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1167 | if (ret) | ||
1168 | intel_private.resource_valid = 0; | ||
1169 | } | ||
1170 | } | ||
1171 | |||
1172 | static void intel_i965_g33_setup_chipset_flush(void) | ||
1173 | { | ||
1174 | u32 temp_hi, temp_lo; | ||
1175 | int ret; | ||
1176 | |||
1177 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); | ||
1178 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); | ||
1179 | |||
1180 | if (!(temp_lo & 0x1)) { | ||
1181 | |||
1182 | intel_alloc_chipset_flush_resource(); | ||
1183 | |||
1184 | intel_private.resource_valid = 1; | ||
1185 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, | ||
1186 | upper_32_bits(intel_private.ifp_resource.start)); | ||
1187 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1188 | } else { | ||
1189 | u64 l64; | ||
1190 | |||
1191 | temp_lo &= ~0x1; | ||
1192 | l64 = ((u64)temp_hi << 32) | temp_lo; | ||
1193 | |||
1194 | intel_private.resource_valid = 1; | ||
1195 | intel_private.ifp_resource.start = l64; | ||
1196 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; | ||
1197 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1198 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1199 | if (ret) | ||
1200 | intel_private.resource_valid = 0; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1204 | static void intel_i9xx_setup_flush(void) | ||
1205 | { | ||
1206 | /* return if already configured */ | ||
1207 | if (intel_private.ifp_resource.start) | ||
1208 | return; | ||
1209 | |||
1210 | if (IS_SNB) | ||
1211 | return; | ||
1212 | |||
1213 | /* setup a resource for this object */ | ||
1214 | intel_private.ifp_resource.name = "Intel Flush Page"; | ||
1215 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | ||
1216 | |||
1217 | /* Setup chipset flush for 915 */ | ||
1218 | if (IS_I965 || IS_G33 || IS_G4X) { | ||
1219 | intel_i965_g33_setup_chipset_flush(); | ||
1220 | } else { | ||
1221 | intel_i915_setup_chipset_flush(); | ||
1222 | } | ||
1223 | |||
1224 | if (intel_private.ifp_resource.start) { | ||
1225 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | ||
1226 | if (!intel_private.i9xx_flush_page) | ||
1227 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | static int intel_i915_configure(void) | ||
1232 | { | ||
1233 | struct aper_size_info_fixed *current_size; | ||
1234 | u32 temp; | ||
1235 | u16 gmch_ctrl; | ||
1236 | int i; | ||
1237 | |||
1238 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1239 | |||
1240 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); | ||
1241 | |||
1242 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1243 | |||
1244 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1245 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1246 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1247 | |||
1248 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1249 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1250 | |||
1251 | if (agp_bridge->driver->needs_scratch_page) { | ||
1252 | for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { | ||
1253 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1254 | } | ||
1255 | readl(intel_private.gtt+i-1); /* PCI Posting. */ | ||
1256 | } | ||
1257 | |||
1258 | global_cache_flush(); | ||
1259 | |||
1260 | intel_i9xx_setup_flush(); | ||
1261 | |||
1262 | return 0; | ||
1263 | } | ||
1264 | |||
1265 | static void intel_i915_cleanup(void) | ||
1266 | { | ||
1267 | if (intel_private.i9xx_flush_page) | ||
1268 | iounmap(intel_private.i9xx_flush_page); | ||
1269 | if (intel_private.resource_valid) | ||
1270 | release_resource(&intel_private.ifp_resource); | ||
1271 | intel_private.ifp_resource.start = 0; | ||
1272 | intel_private.resource_valid = 0; | ||
1273 | iounmap(intel_private.gtt); | ||
1274 | iounmap(intel_private.registers); | ||
1275 | } | ||
1276 | |||
1277 | static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) | ||
1278 | { | ||
1279 | if (intel_private.i9xx_flush_page) | ||
1280 | writel(1, intel_private.i9xx_flush_page); | ||
1281 | } | ||
1282 | |||
1283 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1284 | int type) | ||
1285 | { | ||
1286 | int num_entries; | ||
1287 | void *temp; | ||
1288 | int ret = -EINVAL; | ||
1289 | int mask_type; | ||
1290 | |||
1291 | if (mem->page_count == 0) | ||
1292 | goto out; | ||
1293 | |||
1294 | temp = agp_bridge->current_size; | ||
1295 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1296 | |||
1297 | if (pg_start < intel_private.gtt_entries) { | ||
1298 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1299 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1300 | pg_start, intel_private.gtt_entries); | ||
1301 | |||
1302 | dev_info(&intel_private.pcidev->dev, | ||
1303 | "trying to insert into local/stolen memory\n"); | ||
1304 | goto out_err; | ||
1305 | } | ||
1306 | |||
1307 | if ((pg_start + mem->page_count) > num_entries) | ||
1308 | goto out_err; | ||
1309 | |||
1310 | /* The i915 can't check the GTT for entries since it's read only; | ||
1311 | * depend on the caller to make the correct offset decisions. | ||
1312 | */ | ||
1313 | |||
1314 | if (type != mem->type) | ||
1315 | goto out_err; | ||
1316 | |||
1317 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1318 | |||
1319 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1320 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1321 | goto out_err; | ||
1322 | |||
1323 | if (!mem->is_flushed) | ||
1324 | global_cache_flush(); | ||
1325 | |||
1326 | intel_agp_insert_sg_entries(mem, pg_start, mask_type); | ||
1327 | agp_bridge->driver->tlb_flush(mem); | ||
1328 | |||
1329 | out: | ||
1330 | ret = 0; | ||
1331 | out_err: | ||
1332 | mem->is_flushed = true; | ||
1333 | return ret; | ||
1334 | } | ||
1335 | |||
1336 | static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1337 | int type) | ||
1338 | { | ||
1339 | int i; | ||
1340 | |||
1341 | if (mem->page_count == 0) | ||
1342 | return 0; | ||
1343 | |||
1344 | if (pg_start < intel_private.gtt_entries) { | ||
1345 | dev_info(&intel_private.pcidev->dev, | ||
1346 | "trying to disable local/stolen memory\n"); | ||
1347 | return -EINVAL; | ||
1348 | } | ||
1349 | |||
1350 | for (i = pg_start; i < (mem->page_count + pg_start); i++) | ||
1351 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1352 | |||
1353 | readl(intel_private.gtt+i-1); | ||
1354 | |||
1355 | agp_bridge->driver->tlb_flush(mem); | ||
1356 | return 0; | ||
1357 | } | ||
1358 | |||
1359 | /* Return the aperture size by just checking the resource length. The effect | ||
1360 | * described in the spec of the MSAC registers is just changing of the | ||
1361 | * resource size. | ||
1362 | */ | ||
1363 | static int intel_i9xx_fetch_size(void) | ||
1364 | { | ||
1365 | int num_sizes = ARRAY_SIZE(intel_i830_sizes); | ||
1366 | int aper_size; /* size in megabytes */ | ||
1367 | int i; | ||
1368 | |||
1369 | aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); | ||
1370 | |||
1371 | for (i = 0; i < num_sizes; i++) { | ||
1372 | if (aper_size == intel_i830_sizes[i].size) { | ||
1373 | agp_bridge->current_size = intel_i830_sizes + i; | ||
1374 | agp_bridge->previous_size = agp_bridge->current_size; | ||
1375 | return aper_size; | ||
1376 | } | ||
1377 | } | ||
1378 | |||
1379 | return 0; | ||
1380 | } | ||
1381 | |||
1382 | /* The intel i915 automatically initializes the agp aperture during POST. | ||
1383 | * Use the memory already set aside for in the GTT. | ||
1384 | */ | ||
1385 | static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | ||
1386 | { | ||
1387 | int page_order; | ||
1388 | struct aper_size_info_fixed *size; | ||
1389 | int num_entries; | ||
1390 | u32 temp, temp2; | ||
1391 | int gtt_map_size = 256 * 1024; | ||
1392 | |||
1393 | size = agp_bridge->current_size; | ||
1394 | page_order = size->page_order; | ||
1395 | num_entries = size->num_entries; | ||
1396 | agp_bridge->gatt_table_real = NULL; | ||
1397 | |||
1398 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1399 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); | ||
1400 | |||
1401 | if (IS_G33) | ||
1402 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ | ||
1403 | intel_private.gtt = ioremap(temp2, gtt_map_size); | ||
1404 | if (!intel_private.gtt) | ||
1405 | return -ENOMEM; | ||
1406 | |||
1407 | intel_private.gtt_total_size = gtt_map_size / 4; | ||
1408 | |||
1409 | temp &= 0xfff80000; | ||
1410 | |||
1411 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1412 | if (!intel_private.registers) { | ||
1413 | iounmap(intel_private.gtt); | ||
1414 | return -ENOMEM; | ||
1415 | } | ||
1416 | |||
1417 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1418 | global_cache_flush(); /* FIXME: ? */ | ||
1419 | |||
1420 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1421 | intel_i830_init_gtt_entries(); | ||
1422 | |||
1423 | agp_bridge->gatt_table = NULL; | ||
1424 | |||
1425 | agp_bridge->gatt_bus_addr = temp; | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * The i965 supports 36-bit physical addresses, but to keep | ||
1432 | * the format of the GTT the same, the bits that don't fit | ||
1433 | * in a 32-bit word are shifted down to bits 4..7. | ||
1434 | * | ||
1435 | * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" | ||
1436 | * is always zero on 32-bit architectures, so no need to make | ||
1437 | * this conditional. | ||
1438 | */ | ||
1439 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | ||
1440 | dma_addr_t addr, int type) | ||
1441 | { | ||
1442 | /* Shift high bits down */ | ||
1443 | addr |= (addr >> 28) & 0xf0; | ||
1444 | |||
1445 | /* Type checking must be done elsewhere */ | ||
1446 | return addr | bridge->driver->masks[type].mask; | ||
1447 | } | ||
1448 | |||
1449 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | ||
1450 | { | ||
1451 | u16 snb_gmch_ctl; | ||
1452 | |||
1453 | switch (agp_bridge->dev->device) { | ||
1454 | case PCI_DEVICE_ID_INTEL_GM45_HB: | ||
1455 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: | ||
1456 | case PCI_DEVICE_ID_INTEL_Q45_HB: | ||
1457 | case PCI_DEVICE_ID_INTEL_G45_HB: | ||
1458 | case PCI_DEVICE_ID_INTEL_G41_HB: | ||
1459 | case PCI_DEVICE_ID_INTEL_B43_HB: | ||
1460 | case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: | ||
1461 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: | ||
1462 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: | ||
1463 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: | ||
1464 | *gtt_offset = *gtt_size = MB(2); | ||
1465 | break; | ||
1466 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | ||
1467 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | ||
1468 | *gtt_offset = MB(2); | ||
1469 | |||
1470 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
1471 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { | ||
1472 | default: | ||
1473 | case SNB_GTT_SIZE_0M: | ||
1474 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); | ||
1475 | *gtt_size = MB(0); | ||
1476 | break; | ||
1477 | case SNB_GTT_SIZE_1M: | ||
1478 | *gtt_size = MB(1); | ||
1479 | break; | ||
1480 | case SNB_GTT_SIZE_2M: | ||
1481 | *gtt_size = MB(2); | ||
1482 | break; | ||
1483 | } | ||
1484 | break; | ||
1485 | default: | ||
1486 | *gtt_offset = *gtt_size = KB(512); | ||
1487 | } | ||
1488 | } | ||
1489 | |||
1490 | /* The intel i965 automatically initializes the agp aperture during POST. | ||
1491 | * Use the memory already set aside for in the GTT. | ||
1492 | */ | ||
1493 | static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | ||
1494 | { | ||
1495 | int page_order; | ||
1496 | struct aper_size_info_fixed *size; | ||
1497 | int num_entries; | ||
1498 | u32 temp; | ||
1499 | int gtt_offset, gtt_size; | ||
1500 | |||
1501 | size = agp_bridge->current_size; | ||
1502 | page_order = size->page_order; | ||
1503 | num_entries = size->num_entries; | ||
1504 | agp_bridge->gatt_table_real = NULL; | ||
1505 | |||
1506 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1507 | |||
1508 | temp &= 0xfff00000; | ||
1509 | |||
1510 | intel_i965_get_gtt_range(>t_offset, >t_size); | ||
1511 | |||
1512 | intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); | ||
1513 | |||
1514 | if (!intel_private.gtt) | ||
1515 | return -ENOMEM; | ||
1516 | |||
1517 | intel_private.gtt_total_size = gtt_size / 4; | ||
1518 | |||
1519 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1520 | if (!intel_private.registers) { | ||
1521 | iounmap(intel_private.gtt); | ||
1522 | return -ENOMEM; | ||
1523 | } | ||
1524 | |||
1525 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1526 | global_cache_flush(); /* FIXME: ? */ | ||
1527 | |||
1528 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1529 | intel_i830_init_gtt_entries(); | ||
1530 | |||
1531 | agp_bridge->gatt_table = NULL; | ||
1532 | |||
1533 | agp_bridge->gatt_bus_addr = temp; | ||
1534 | |||
1535 | return 0; | ||
1536 | } | ||
1537 | |||
1538 | |||
1539 | static int intel_fetch_size(void) | 21 | static int intel_fetch_size(void) |
1540 | { | 22 | { |
1541 | int i; | 23 | int i; |
@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = { | |||
1982 | .aperture_sizes = intel_generic_sizes, | 464 | .aperture_sizes = intel_generic_sizes, |
1983 | .size_type = U16_APER_SIZE, | 465 | .size_type = U16_APER_SIZE, |
1984 | .num_aperture_sizes = 7, | 466 | .num_aperture_sizes = 7, |
467 | .needs_scratch_page = true, | ||
1985 | .configure = intel_configure, | 468 | .configure = intel_configure, |
1986 | .fetch_size = intel_fetch_size, | 469 | .fetch_size = intel_fetch_size, |
1987 | .cleanup = intel_cleanup, | 470 | .cleanup = intel_cleanup, |
@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = { | |||
2003 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 486 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2004 | }; | 487 | }; |
2005 | 488 | ||
2006 | static const struct agp_bridge_driver intel_810_driver = { | ||
2007 | .owner = THIS_MODULE, | ||
2008 | .aperture_sizes = intel_i810_sizes, | ||
2009 | .size_type = FIXED_APER_SIZE, | ||
2010 | .num_aperture_sizes = 2, | ||
2011 | .needs_scratch_page = true, | ||
2012 | .configure = intel_i810_configure, | ||
2013 | .fetch_size = intel_i810_fetch_size, | ||
2014 | .cleanup = intel_i810_cleanup, | ||
2015 | .tlb_flush = intel_i810_tlbflush, | ||
2016 | .mask_memory = intel_i810_mask_memory, | ||
2017 | .masks = intel_i810_masks, | ||
2018 | .agp_enable = intel_i810_agp_enable, | ||
2019 | .cache_flush = global_cache_flush, | ||
2020 | .create_gatt_table = agp_generic_create_gatt_table, | ||
2021 | .free_gatt_table = agp_generic_free_gatt_table, | ||
2022 | .insert_memory = intel_i810_insert_entries, | ||
2023 | .remove_memory = intel_i810_remove_entries, | ||
2024 | .alloc_by_type = intel_i810_alloc_by_type, | ||
2025 | .free_by_type = intel_i810_free_by_type, | ||
2026 | .agp_alloc_page = agp_generic_alloc_page, | ||
2027 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2028 | .agp_destroy_page = agp_generic_destroy_page, | ||
2029 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2030 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
2031 | }; | ||
2032 | |||
2033 | static const struct agp_bridge_driver intel_815_driver = { | 489 | static const struct agp_bridge_driver intel_815_driver = { |
2034 | .owner = THIS_MODULE, | 490 | .owner = THIS_MODULE, |
2035 | .aperture_sizes = intel_815_sizes, | 491 | .aperture_sizes = intel_815_sizes, |
2036 | .size_type = U8_APER_SIZE, | 492 | .size_type = U8_APER_SIZE, |
2037 | .num_aperture_sizes = 2, | 493 | .num_aperture_sizes = 2, |
494 | .needs_scratch_page = true, | ||
2038 | .configure = intel_815_configure, | 495 | .configure = intel_815_configure, |
2039 | .fetch_size = intel_815_fetch_size, | 496 | .fetch_size = intel_815_fetch_size, |
2040 | .cleanup = intel_8xx_cleanup, | 497 | .cleanup = intel_8xx_cleanup, |
@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = { | |||
2056 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 513 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2057 | }; | 514 | }; |
2058 | 515 | ||
2059 | static const struct agp_bridge_driver intel_830_driver = { | ||
2060 | .owner = THIS_MODULE, | ||
2061 | .aperture_sizes = intel_i830_sizes, | ||
2062 | .size_type = FIXED_APER_SIZE, | ||
2063 | .num_aperture_sizes = 4, | ||
2064 | .needs_scratch_page = true, | ||
2065 | .configure = intel_i830_configure, | ||
2066 | .fetch_size = intel_i830_fetch_size, | ||
2067 | .cleanup = intel_i830_cleanup, | ||
2068 | .tlb_flush = intel_i810_tlbflush, | ||
2069 | .mask_memory = intel_i810_mask_memory, | ||
2070 | .masks = intel_i810_masks, | ||
2071 | .agp_enable = intel_i810_agp_enable, | ||
2072 | .cache_flush = global_cache_flush, | ||
2073 | .create_gatt_table = intel_i830_create_gatt_table, | ||
2074 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2075 | .insert_memory = intel_i830_insert_entries, | ||
2076 | .remove_memory = intel_i830_remove_entries, | ||
2077 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2078 | .free_by_type = intel_i810_free_by_type, | ||
2079 | .agp_alloc_page = agp_generic_alloc_page, | ||
2080 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2081 | .agp_destroy_page = agp_generic_destroy_page, | ||
2082 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2083 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2084 | .chipset_flush = intel_i830_chipset_flush, | ||
2085 | }; | ||
2086 | |||
2087 | static const struct agp_bridge_driver intel_820_driver = { | 516 | static const struct agp_bridge_driver intel_820_driver = { |
2088 | .owner = THIS_MODULE, | 517 | .owner = THIS_MODULE, |
2089 | .aperture_sizes = intel_8xx_sizes, | 518 | .aperture_sizes = intel_8xx_sizes, |
2090 | .size_type = U8_APER_SIZE, | 519 | .size_type = U8_APER_SIZE, |
2091 | .num_aperture_sizes = 7, | 520 | .num_aperture_sizes = 7, |
521 | .needs_scratch_page = true, | ||
2092 | .configure = intel_820_configure, | 522 | .configure = intel_820_configure, |
2093 | .fetch_size = intel_8xx_fetch_size, | 523 | .fetch_size = intel_8xx_fetch_size, |
2094 | .cleanup = intel_820_cleanup, | 524 | .cleanup = intel_820_cleanup, |
@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = { | |||
2115 | .aperture_sizes = intel_830mp_sizes, | 545 | .aperture_sizes = intel_830mp_sizes, |
2116 | .size_type = U8_APER_SIZE, | 546 | .size_type = U8_APER_SIZE, |
2117 | .num_aperture_sizes = 4, | 547 | .num_aperture_sizes = 4, |
548 | .needs_scratch_page = true, | ||
2118 | .configure = intel_830mp_configure, | 549 | .configure = intel_830mp_configure, |
2119 | .fetch_size = intel_8xx_fetch_size, | 550 | .fetch_size = intel_8xx_fetch_size, |
2120 | .cleanup = intel_8xx_cleanup, | 551 | .cleanup = intel_8xx_cleanup, |
@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = { | |||
2141 | .aperture_sizes = intel_8xx_sizes, | 572 | .aperture_sizes = intel_8xx_sizes, |
2142 | .size_type = U8_APER_SIZE, | 573 | .size_type = U8_APER_SIZE, |
2143 | .num_aperture_sizes = 7, | 574 | .num_aperture_sizes = 7, |
575 | .needs_scratch_page = true, | ||
2144 | .configure = intel_840_configure, | 576 | .configure = intel_840_configure, |
2145 | .fetch_size = intel_8xx_fetch_size, | 577 | .fetch_size = intel_8xx_fetch_size, |
2146 | .cleanup = intel_8xx_cleanup, | 578 | .cleanup = intel_8xx_cleanup, |
@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = { | |||
2167 | .aperture_sizes = intel_8xx_sizes, | 599 | .aperture_sizes = intel_8xx_sizes, |
2168 | .size_type = U8_APER_SIZE, | 600 | .size_type = U8_APER_SIZE, |
2169 | .num_aperture_sizes = 7, | 601 | .num_aperture_sizes = 7, |
602 | .needs_scratch_page = true, | ||
2170 | .configure = intel_845_configure, | 603 | .configure = intel_845_configure, |
2171 | .fetch_size = intel_8xx_fetch_size, | 604 | .fetch_size = intel_8xx_fetch_size, |
2172 | .cleanup = intel_8xx_cleanup, | 605 | .cleanup = intel_8xx_cleanup, |
@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = { | |||
2193 | .aperture_sizes = intel_8xx_sizes, | 626 | .aperture_sizes = intel_8xx_sizes, |
2194 | .size_type = U8_APER_SIZE, | 627 | .size_type = U8_APER_SIZE, |
2195 | .num_aperture_sizes = 7, | 628 | .num_aperture_sizes = 7, |
629 | .needs_scratch_page = true, | ||
2196 | .configure = intel_850_configure, | 630 | .configure = intel_850_configure, |
2197 | .fetch_size = intel_8xx_fetch_size, | 631 | .fetch_size = intel_8xx_fetch_size, |
2198 | .cleanup = intel_8xx_cleanup, | 632 | .cleanup = intel_8xx_cleanup, |
@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = { | |||
2219 | .aperture_sizes = intel_8xx_sizes, | 653 | .aperture_sizes = intel_8xx_sizes, |
2220 | .size_type = U8_APER_SIZE, | 654 | .size_type = U8_APER_SIZE, |
2221 | .num_aperture_sizes = 7, | 655 | .num_aperture_sizes = 7, |
656 | .needs_scratch_page = true, | ||
2222 | .configure = intel_860_configure, | 657 | .configure = intel_860_configure, |
2223 | .fetch_size = intel_8xx_fetch_size, | 658 | .fetch_size = intel_8xx_fetch_size, |
2224 | .cleanup = intel_8xx_cleanup, | 659 | .cleanup = intel_8xx_cleanup, |
@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = { | |||
2240 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 675 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2241 | }; | 676 | }; |
2242 | 677 | ||
2243 | static const struct agp_bridge_driver intel_915_driver = { | ||
2244 | .owner = THIS_MODULE, | ||
2245 | .aperture_sizes = intel_i830_sizes, | ||
2246 | .size_type = FIXED_APER_SIZE, | ||
2247 | .num_aperture_sizes = 4, | ||
2248 | .needs_scratch_page = true, | ||
2249 | .configure = intel_i915_configure, | ||
2250 | .fetch_size = intel_i9xx_fetch_size, | ||
2251 | .cleanup = intel_i915_cleanup, | ||
2252 | .tlb_flush = intel_i810_tlbflush, | ||
2253 | .mask_memory = intel_i810_mask_memory, | ||
2254 | .masks = intel_i810_masks, | ||
2255 | .agp_enable = intel_i810_agp_enable, | ||
2256 | .cache_flush = global_cache_flush, | ||
2257 | .create_gatt_table = intel_i915_create_gatt_table, | ||
2258 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2259 | .insert_memory = intel_i915_insert_entries, | ||
2260 | .remove_memory = intel_i915_remove_entries, | ||
2261 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2262 | .free_by_type = intel_i810_free_by_type, | ||
2263 | .agp_alloc_page = agp_generic_alloc_page, | ||
2264 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2265 | .agp_destroy_page = agp_generic_destroy_page, | ||
2266 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2267 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2268 | .chipset_flush = intel_i915_chipset_flush, | ||
2269 | #ifdef USE_PCI_DMA_API | ||
2270 | .agp_map_page = intel_agp_map_page, | ||
2271 | .agp_unmap_page = intel_agp_unmap_page, | ||
2272 | .agp_map_memory = intel_agp_map_memory, | ||
2273 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2274 | #endif | ||
2275 | }; | ||
2276 | |||
2277 | static const struct agp_bridge_driver intel_i965_driver = { | ||
2278 | .owner = THIS_MODULE, | ||
2279 | .aperture_sizes = intel_i830_sizes, | ||
2280 | .size_type = FIXED_APER_SIZE, | ||
2281 | .num_aperture_sizes = 4, | ||
2282 | .needs_scratch_page = true, | ||
2283 | .configure = intel_i915_configure, | ||
2284 | .fetch_size = intel_i9xx_fetch_size, | ||
2285 | .cleanup = intel_i915_cleanup, | ||
2286 | .tlb_flush = intel_i810_tlbflush, | ||
2287 | .mask_memory = intel_i965_mask_memory, | ||
2288 | .masks = intel_i810_masks, | ||
2289 | .agp_enable = intel_i810_agp_enable, | ||
2290 | .cache_flush = global_cache_flush, | ||
2291 | .create_gatt_table = intel_i965_create_gatt_table, | ||
2292 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2293 | .insert_memory = intel_i915_insert_entries, | ||
2294 | .remove_memory = intel_i915_remove_entries, | ||
2295 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2296 | .free_by_type = intel_i810_free_by_type, | ||
2297 | .agp_alloc_page = agp_generic_alloc_page, | ||
2298 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2299 | .agp_destroy_page = agp_generic_destroy_page, | ||
2300 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2301 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2302 | .chipset_flush = intel_i915_chipset_flush, | ||
2303 | #ifdef USE_PCI_DMA_API | ||
2304 | .agp_map_page = intel_agp_map_page, | ||
2305 | .agp_unmap_page = intel_agp_unmap_page, | ||
2306 | .agp_map_memory = intel_agp_map_memory, | ||
2307 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2308 | #endif | ||
2309 | }; | ||
2310 | |||
2311 | static const struct agp_bridge_driver intel_7505_driver = { | 678 | static const struct agp_bridge_driver intel_7505_driver = { |
2312 | .owner = THIS_MODULE, | 679 | .owner = THIS_MODULE, |
2313 | .aperture_sizes = intel_8xx_sizes, | 680 | .aperture_sizes = intel_8xx_sizes, |
2314 | .size_type = U8_APER_SIZE, | 681 | .size_type = U8_APER_SIZE, |
2315 | .num_aperture_sizes = 7, | 682 | .num_aperture_sizes = 7, |
683 | .needs_scratch_page = true, | ||
2316 | .configure = intel_7505_configure, | 684 | .configure = intel_7505_configure, |
2317 | .fetch_size = intel_8xx_fetch_size, | 685 | .fetch_size = intel_8xx_fetch_size, |
2318 | .cleanup = intel_8xx_cleanup, | 686 | .cleanup = intel_8xx_cleanup, |
@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = { | |||
2334 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 702 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2335 | }; | 703 | }; |
2336 | 704 | ||
2337 | static const struct agp_bridge_driver intel_g33_driver = { | ||
2338 | .owner = THIS_MODULE, | ||
2339 | .aperture_sizes = intel_i830_sizes, | ||
2340 | .size_type = FIXED_APER_SIZE, | ||
2341 | .num_aperture_sizes = 4, | ||
2342 | .needs_scratch_page = true, | ||
2343 | .configure = intel_i915_configure, | ||
2344 | .fetch_size = intel_i9xx_fetch_size, | ||
2345 | .cleanup = intel_i915_cleanup, | ||
2346 | .tlb_flush = intel_i810_tlbflush, | ||
2347 | .mask_memory = intel_i965_mask_memory, | ||
2348 | .masks = intel_i810_masks, | ||
2349 | .agp_enable = intel_i810_agp_enable, | ||
2350 | .cache_flush = global_cache_flush, | ||
2351 | .create_gatt_table = intel_i915_create_gatt_table, | ||
2352 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2353 | .insert_memory = intel_i915_insert_entries, | ||
2354 | .remove_memory = intel_i915_remove_entries, | ||
2355 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2356 | .free_by_type = intel_i810_free_by_type, | ||
2357 | .agp_alloc_page = agp_generic_alloc_page, | ||
2358 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2359 | .agp_destroy_page = agp_generic_destroy_page, | ||
2360 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2361 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2362 | .chipset_flush = intel_i915_chipset_flush, | ||
2363 | #ifdef USE_PCI_DMA_API | ||
2364 | .agp_map_page = intel_agp_map_page, | ||
2365 | .agp_unmap_page = intel_agp_unmap_page, | ||
2366 | .agp_map_memory = intel_agp_map_memory, | ||
2367 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2368 | #endif | ||
2369 | }; | ||
2370 | |||
2371 | static int find_gmch(u16 device) | 705 | static int find_gmch(u16 device) |
2372 | { | 706 | { |
2373 | struct pci_dev *gmch_device; | 707 | struct pci_dev *gmch_device; |
@@ -2392,103 +726,137 @@ static int find_gmch(u16 device) | |||
2392 | static const struct intel_driver_description { | 726 | static const struct intel_driver_description { |
2393 | unsigned int chip_id; | 727 | unsigned int chip_id; |
2394 | unsigned int gmch_chip_id; | 728 | unsigned int gmch_chip_id; |
2395 | unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */ | ||
2396 | char *name; | 729 | char *name; |
2397 | const struct agp_bridge_driver *driver; | 730 | const struct agp_bridge_driver *driver; |
2398 | const struct agp_bridge_driver *gmch_driver; | 731 | const struct agp_bridge_driver *gmch_driver; |
2399 | } intel_agp_chipsets[] = { | 732 | } intel_agp_chipsets[] = { |
2400 | { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, | 733 | { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, |
2401 | { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, | 734 | { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, |
2402 | { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, | 735 | { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, |
2403 | { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", | 736 | { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", |
2404 | NULL, &intel_810_driver }, | 737 | NULL, &intel_810_driver }, |
2405 | { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", | 738 | { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", |
2406 | NULL, &intel_810_driver }, | 739 | NULL, &intel_810_driver }, |
2407 | { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", | 740 | { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", |
2408 | NULL, &intel_810_driver }, | 741 | NULL, &intel_810_driver }, |
2409 | { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", | 742 | { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", |
2410 | &intel_815_driver, &intel_810_driver }, | 743 | &intel_815_driver, &intel_810_driver }, |
2411 | { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, | 744 | { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, |
2412 | { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, | 745 | { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, |
2413 | { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", | 746 | { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", |
2414 | &intel_830mp_driver, &intel_830_driver }, | 747 | &intel_830mp_driver, &intel_830_driver }, |
2415 | { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, | 748 | { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, |
2416 | { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, | 749 | { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, |
2417 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", | 750 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", |
2418 | &intel_845_driver, &intel_830_driver }, | 751 | &intel_845_driver, &intel_830_driver }, |
2419 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, | 752 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, |
2420 | { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", | 753 | { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", |
2421 | &intel_845_driver, &intel_830_driver }, | 754 | &intel_845_driver, &intel_830_driver }, |
2422 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, | 755 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, |
2423 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", | 756 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", |
2424 | &intel_845_driver, &intel_830_driver }, | 757 | &intel_845_driver, &intel_830_driver }, |
2425 | { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, | 758 | { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, |
2426 | { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", | 759 | { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", |
2427 | &intel_845_driver, &intel_830_driver }, | 760 | &intel_845_driver, &intel_830_driver }, |
2428 | { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, | 761 | { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, |
2429 | { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", | 762 | { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
2430 | NULL, &intel_915_driver }, | 763 | NULL, &intel_915_driver }, |
2431 | { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", | 764 | { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
2432 | NULL, &intel_915_driver }, | 765 | NULL, &intel_915_driver }, |
2433 | { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", | 766 | { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
2434 | NULL, &intel_915_driver }, | 767 | NULL, &intel_915_driver }, |
2435 | { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", | 768 | { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
2436 | NULL, &intel_915_driver }, | 769 | NULL, &intel_915_driver }, |
2437 | { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", | 770 | { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
2438 | NULL, &intel_915_driver }, | 771 | NULL, &intel_915_driver }, |
2439 | { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", | 772 | { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
2440 | NULL, &intel_915_driver }, | 773 | NULL, &intel_915_driver }, |
2441 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", | 774 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
2442 | NULL, &intel_i965_driver }, | 775 | NULL, &intel_i965_driver }, |
2443 | { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", | 776 | { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
2444 | NULL, &intel_i965_driver }, | 777 | NULL, &intel_i965_driver }, |
2445 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", | 778 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
2446 | NULL, &intel_i965_driver }, | 779 | NULL, &intel_i965_driver }, |
2447 | { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", | 780 | { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
2448 | NULL, &intel_i965_driver }, | 781 | NULL, &intel_i965_driver }, |
2449 | { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", | 782 | { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
2450 | NULL, &intel_i965_driver }, | 783 | NULL, &intel_i965_driver }, |
2451 | { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", | 784 | { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
2452 | NULL, &intel_i965_driver }, | 785 | NULL, &intel_i965_driver }, |
2453 | { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, | 786 | { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, |
2454 | { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, | 787 | { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, |
2455 | { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", | 788 | { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
2456 | NULL, &intel_g33_driver }, | 789 | NULL, &intel_g33_driver }, |
2457 | { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", | 790 | { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
2458 | NULL, &intel_g33_driver }, | 791 | NULL, &intel_g33_driver }, |
2459 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", | 792 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
2460 | NULL, &intel_g33_driver }, | 793 | NULL, &intel_g33_driver }, |
2461 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", | 794 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
2462 | NULL, &intel_g33_driver }, | 795 | NULL, &intel_g33_driver }, |
2463 | { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", | 796 | { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
2464 | NULL, &intel_g33_driver }, | 797 | NULL, &intel_g33_driver }, |
2465 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, | 798 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, |
2466 | "GM45", NULL, &intel_i965_driver }, | 799 | "GM45", NULL, &intel_i965_driver }, |
2467 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, | 800 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, |
2468 | "Eaglelake", NULL, &intel_i965_driver }, | 801 | "Eaglelake", NULL, &intel_i965_driver }, |
2469 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, | 802 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, |
2470 | "Q45/Q43", NULL, &intel_i965_driver }, | 803 | "Q45/Q43", NULL, &intel_i965_driver }, |
2471 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, | 804 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, |
2472 | "G45/G43", NULL, &intel_i965_driver }, | 805 | "G45/G43", NULL, &intel_i965_driver }, |
2473 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, | 806 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, |
2474 | "B43", NULL, &intel_i965_driver }, | 807 | "B43", NULL, &intel_i965_driver }, |
2475 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, | 808 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, |
2476 | "G41", NULL, &intel_i965_driver }, | 809 | "G41", NULL, &intel_i965_driver }, |
2477 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, | 810 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
2478 | "HD Graphics", NULL, &intel_i965_driver }, | 811 | "HD Graphics", NULL, &intel_i965_driver }, |
2479 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 812 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2480 | "HD Graphics", NULL, &intel_i965_driver }, | 813 | "HD Graphics", NULL, &intel_i965_driver }, |
2481 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 814 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2482 | "HD Graphics", NULL, &intel_i965_driver }, | 815 | "HD Graphics", NULL, &intel_i965_driver }, |
2483 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 816 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2484 | "HD Graphics", NULL, &intel_i965_driver }, | 817 | "HD Graphics", NULL, &intel_i965_driver }, |
2485 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, | 818 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, |
2486 | "Sandybridge", NULL, &intel_i965_driver }, | 819 | "Sandybridge", NULL, &intel_i965_driver }, |
2487 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, | 820 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, |
2488 | "Sandybridge", NULL, &intel_i965_driver }, | 821 | "Sandybridge", NULL, &intel_i965_driver }, |
2489 | { 0, 0, 0, NULL, NULL, NULL } | 822 | { 0, 0, NULL, NULL, NULL } |
2490 | }; | 823 | }; |
2491 | 824 | ||
825 | static int __devinit intel_gmch_probe(struct pci_dev *pdev, | ||
826 | struct agp_bridge_data *bridge) | ||
827 | { | ||
828 | int i; | ||
829 | bridge->driver = NULL; | ||
830 | |||
831 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | ||
832 | if ((intel_agp_chipsets[i].gmch_chip_id != 0) && | ||
833 | find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { | ||
834 | bridge->driver = | ||
835 | intel_agp_chipsets[i].gmch_driver; | ||
836 | break; | ||
837 | } | ||
838 | } | ||
839 | |||
840 | if (!bridge->driver) | ||
841 | return 0; | ||
842 | |||
843 | bridge->dev_private_data = &intel_private; | ||
844 | bridge->dev = pdev; | ||
845 | |||
846 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); | ||
847 | |||
848 | if (bridge->driver->mask_memory == intel_i965_mask_memory) { | ||
849 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | ||
850 | dev_err(&intel_private.pcidev->dev, | ||
851 | "set gfx device dma mask 36bit failed!\n"); | ||
852 | else | ||
853 | pci_set_consistent_dma_mask(intel_private.pcidev, | ||
854 | DMA_BIT_MASK(36)); | ||
855 | } | ||
856 | |||
857 | return 1; | ||
858 | } | ||
859 | |||
2492 | static int __devinit agp_intel_probe(struct pci_dev *pdev, | 860 | static int __devinit agp_intel_probe(struct pci_dev *pdev, |
2493 | const struct pci_device_id *ent) | 861 | const struct pci_device_id *ent) |
2494 | { | 862 | { |
@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2503 | if (!bridge) | 871 | if (!bridge) |
2504 | return -ENOMEM; | 872 | return -ENOMEM; |
2505 | 873 | ||
874 | bridge->capndx = cap_ptr; | ||
875 | |||
876 | if (intel_gmch_probe(pdev, bridge)) | ||
877 | goto found_gmch; | ||
878 | |||
2506 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | 879 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { |
2507 | /* In case that multiple models of gfx chip may | 880 | /* In case that multiple models of gfx chip may |
2508 | stand on same host bridge type, this can be | 881 | stand on same host bridge type, this can be |
2509 | sure we detect the right IGD. */ | 882 | sure we detect the right IGD. */ |
2510 | if (pdev->device == intel_agp_chipsets[i].chip_id) { | 883 | if (pdev->device == intel_agp_chipsets[i].chip_id) { |
2511 | if ((intel_agp_chipsets[i].gmch_chip_id != 0) && | 884 | bridge->driver = intel_agp_chipsets[i].driver; |
2512 | find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { | 885 | break; |
2513 | bridge->driver = | ||
2514 | intel_agp_chipsets[i].gmch_driver; | ||
2515 | break; | ||
2516 | } else if (intel_agp_chipsets[i].multi_gmch_chip) { | ||
2517 | continue; | ||
2518 | } else { | ||
2519 | bridge->driver = intel_agp_chipsets[i].driver; | ||
2520 | break; | ||
2521 | } | ||
2522 | } | 886 | } |
2523 | } | 887 | } |
2524 | 888 | ||
@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2530 | return -ENODEV; | 894 | return -ENODEV; |
2531 | } | 895 | } |
2532 | 896 | ||
2533 | if (bridge->driver == NULL) { | 897 | if (!bridge->driver) { |
2534 | /* bridge has no AGP and no IGD detected */ | ||
2535 | if (cap_ptr) | 898 | if (cap_ptr) |
2536 | dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", | 899 | dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", |
2537 | intel_agp_chipsets[i].gmch_chip_id); | 900 | intel_agp_chipsets[i].gmch_chip_id); |
2538 | agp_put_bridge(bridge); | 901 | agp_put_bridge(bridge); |
2539 | return -ENODEV; | 902 | return -ENODEV; |
2540 | } | 903 | } |
2541 | 904 | ||
2542 | bridge->dev = pdev; | 905 | bridge->dev = pdev; |
2543 | bridge->capndx = cap_ptr; | 906 | bridge->dev_private_data = NULL; |
2544 | bridge->dev_private_data = &intel_private; | ||
2545 | 907 | ||
2546 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); | 908 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); |
2547 | 909 | ||
@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2577 | &bridge->mode); | 939 | &bridge->mode); |
2578 | } | 940 | } |
2579 | 941 | ||
2580 | if (bridge->driver->mask_memory == intel_i965_mask_memory) { | 942 | found_gmch: |
2581 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | ||
2582 | dev_err(&intel_private.pcidev->dev, | ||
2583 | "set gfx device dma mask 36bit failed!\n"); | ||
2584 | else | ||
2585 | pci_set_consistent_dma_mask(intel_private.pcidev, | ||
2586 | DMA_BIT_MASK(36)); | ||
2587 | } | ||
2588 | |||
2589 | pci_set_drvdata(pdev, bridge); | 943 | pci_set_drvdata(pdev, bridge); |
2590 | err = agp_add_bridge(bridge); | 944 | err = agp_add_bridge(bridge); |
2591 | if (!err) | 945 | if (!err) |
@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev) | |||
2611 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); | 965 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); |
2612 | int ret_val; | 966 | int ret_val; |
2613 | 967 | ||
2614 | if (bridge->driver == &intel_generic_driver) | 968 | bridge->driver->configure(); |
2615 | intel_configure(); | ||
2616 | else if (bridge->driver == &intel_850_driver) | ||
2617 | intel_850_configure(); | ||
2618 | else if (bridge->driver == &intel_845_driver) | ||
2619 | intel_845_configure(); | ||
2620 | else if (bridge->driver == &intel_830mp_driver) | ||
2621 | intel_830mp_configure(); | ||
2622 | else if (bridge->driver == &intel_915_driver) | ||
2623 | intel_i915_configure(); | ||
2624 | else if (bridge->driver == &intel_830_driver) | ||
2625 | intel_i830_configure(); | ||
2626 | else if (bridge->driver == &intel_810_driver) | ||
2627 | intel_i810_configure(); | ||
2628 | else if (bridge->driver == &intel_i965_driver) | ||
2629 | intel_i915_configure(); | ||
2630 | 969 | ||
2631 | ret_val = agp_rebind_memory(); | 970 | ret_val = agp_rebind_memory(); |
2632 | if (ret_val != 0) | 971 | if (ret_val != 0) |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h new file mode 100644 index 000000000000..2547465d4658 --- /dev/null +++ b/drivers/char/agp/intel-agp.h | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * Common Intel AGPGART and GTT definitions. | ||
3 | */ | ||
4 | |||
5 | /* Intel registers */ | ||
6 | #define INTEL_APSIZE 0xb4 | ||
7 | #define INTEL_ATTBASE 0xb8 | ||
8 | #define INTEL_AGPCTRL 0xb0 | ||
9 | #define INTEL_NBXCFG 0x50 | ||
10 | #define INTEL_ERRSTS 0x91 | ||
11 | |||
12 | /* Intel i830 registers */ | ||
13 | #define I830_GMCH_CTRL 0x52 | ||
14 | #define I830_GMCH_ENABLED 0x4 | ||
15 | #define I830_GMCH_MEM_MASK 0x1 | ||
16 | #define I830_GMCH_MEM_64M 0x1 | ||
17 | #define I830_GMCH_MEM_128M 0 | ||
18 | #define I830_GMCH_GMS_MASK 0x70 | ||
19 | #define I830_GMCH_GMS_DISABLED 0x00 | ||
20 | #define I830_GMCH_GMS_LOCAL 0x10 | ||
21 | #define I830_GMCH_GMS_STOLEN_512 0x20 | ||
22 | #define I830_GMCH_GMS_STOLEN_1024 0x30 | ||
23 | #define I830_GMCH_GMS_STOLEN_8192 0x40 | ||
24 | #define I830_RDRAM_CHANNEL_TYPE 0x03010 | ||
25 | #define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) | ||
26 | #define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) | ||
27 | |||
28 | /* This one is for I830MP w. an external graphic card */ | ||
29 | #define INTEL_I830_ERRSTS 0x92 | ||
30 | |||
31 | /* Intel 855GM/852GM registers */ | ||
32 | #define I855_GMCH_GMS_MASK 0xF0 | ||
33 | #define I855_GMCH_GMS_STOLEN_0M 0x0 | ||
34 | #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) | ||
35 | #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) | ||
36 | #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) | ||
37 | #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) | ||
38 | #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) | ||
39 | #define I85X_CAPID 0x44 | ||
40 | #define I85X_VARIANT_MASK 0x7 | ||
41 | #define I85X_VARIANT_SHIFT 5 | ||
42 | #define I855_GME 0x0 | ||
43 | #define I855_GM 0x4 | ||
44 | #define I852_GME 0x2 | ||
45 | #define I852_GM 0x5 | ||
46 | |||
47 | /* Intel i845 registers */ | ||
48 | #define INTEL_I845_AGPM 0x51 | ||
49 | #define INTEL_I845_ERRSTS 0xc8 | ||
50 | |||
51 | /* Intel i860 registers */ | ||
52 | #define INTEL_I860_MCHCFG 0x50 | ||
53 | #define INTEL_I860_ERRSTS 0xc8 | ||
54 | |||
55 | /* Intel i810 registers */ | ||
56 | #define I810_GMADDR 0x10 | ||
57 | #define I810_MMADDR 0x14 | ||
58 | #define I810_PTE_BASE 0x10000 | ||
59 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | ||
60 | #define I810_PTE_LOCAL 0x00000002 | ||
61 | #define I810_PTE_VALID 0x00000001 | ||
62 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | ||
63 | #define I810_SMRAM_MISCC 0x70 | ||
64 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | ||
65 | #define I810_GFX_MEM_WIN_32M 0x00010000 | ||
66 | #define I810_GMS 0x000000c0 | ||
67 | #define I810_GMS_DISABLE 0x00000000 | ||
68 | #define I810_PGETBL_CTL 0x2020 | ||
69 | #define I810_PGETBL_ENABLED 0x00000001 | ||
70 | #define I965_PGETBL_SIZE_MASK 0x0000000e | ||
71 | #define I965_PGETBL_SIZE_512KB (0 << 1) | ||
72 | #define I965_PGETBL_SIZE_256KB (1 << 1) | ||
73 | #define I965_PGETBL_SIZE_128KB (2 << 1) | ||
74 | #define I965_PGETBL_SIZE_1MB (3 << 1) | ||
75 | #define I965_PGETBL_SIZE_2MB (4 << 1) | ||
76 | #define I965_PGETBL_SIZE_1_5MB (5 << 1) | ||
77 | #define G33_PGETBL_SIZE_MASK (3 << 8) | ||
78 | #define G33_PGETBL_SIZE_1M (1 << 8) | ||
79 | #define G33_PGETBL_SIZE_2M (2 << 8) | ||
80 | |||
81 | #define I810_DRAM_CTL 0x3000 | ||
82 | #define I810_DRAM_ROW_0 0x00000001 | ||
83 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 | ||
84 | |||
85 | /* Intel 815 register */ | ||
86 | #define INTEL_815_APCONT 0x51 | ||
87 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF | ||
88 | |||
89 | /* Intel i820 registers */ | ||
90 | #define INTEL_I820_RDCR 0x51 | ||
91 | #define INTEL_I820_ERRSTS 0xc8 | ||
92 | |||
93 | /* Intel i840 registers */ | ||
94 | #define INTEL_I840_MCHCFG 0x50 | ||
95 | #define INTEL_I840_ERRSTS 0xc8 | ||
96 | |||
97 | /* Intel i850 registers */ | ||
98 | #define INTEL_I850_MCHCFG 0x50 | ||
99 | #define INTEL_I850_ERRSTS 0xc8 | ||
100 | |||
101 | /* intel 915G registers */ | ||
102 | #define I915_GMADDR 0x18 | ||
103 | #define I915_MMADDR 0x10 | ||
104 | #define I915_PTEADDR 0x1C | ||
105 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) | ||
106 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | ||
107 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
108 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
109 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
110 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
111 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
112 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
113 | |||
114 | #define I915_IFPADDR 0x60 | ||
115 | |||
116 | /* Intel 965G registers */ | ||
117 | #define I965_MSAC 0x62 | ||
118 | #define I965_IFPADDR 0x70 | ||
119 | |||
120 | /* Intel 7505 registers */ | ||
121 | #define INTEL_I7505_APSIZE 0x74 | ||
122 | #define INTEL_I7505_NCAPID 0x60 | ||
123 | #define INTEL_I7505_NISTAT 0x6c | ||
124 | #define INTEL_I7505_ATTBASE 0x78 | ||
125 | #define INTEL_I7505_ERRSTS 0x42 | ||
126 | #define INTEL_I7505_AGPCTRL 0x70 | ||
127 | #define INTEL_I7505_MCHCFG 0x50 | ||
128 | |||
129 | #define SNB_GMCH_CTRL 0x50 | ||
130 | #define SNB_GMCH_GMS_STOLEN_MASK 0xF8 | ||
131 | #define SNB_GMCH_GMS_STOLEN_32M (1 << 3) | ||
132 | #define SNB_GMCH_GMS_STOLEN_64M (2 << 3) | ||
133 | #define SNB_GMCH_GMS_STOLEN_96M (3 << 3) | ||
134 | #define SNB_GMCH_GMS_STOLEN_128M (4 << 3) | ||
135 | #define SNB_GMCH_GMS_STOLEN_160M (5 << 3) | ||
136 | #define SNB_GMCH_GMS_STOLEN_192M (6 << 3) | ||
137 | #define SNB_GMCH_GMS_STOLEN_224M (7 << 3) | ||
138 | #define SNB_GMCH_GMS_STOLEN_256M (8 << 3) | ||
139 | #define SNB_GMCH_GMS_STOLEN_288M (9 << 3) | ||
140 | #define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) | ||
141 | #define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) | ||
142 | #define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) | ||
143 | #define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) | ||
144 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | ||
145 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | ||
146 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | ||
147 | #define SNB_GTT_SIZE_0M (0 << 8) | ||
148 | #define SNB_GTT_SIZE_1M (1 << 8) | ||
149 | #define SNB_GTT_SIZE_2M (2 << 8) | ||
150 | #define SNB_GTT_SIZE_MASK (3 << 8) | ||
151 | |||
152 | /* pci devices ids */ | ||
153 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 | ||
154 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | ||
155 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | ||
156 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 | ||
157 | #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 | ||
158 | #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 | ||
159 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 | ||
160 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 | ||
161 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 | ||
162 | #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 | ||
163 | #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 | ||
164 | #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 | ||
165 | #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 | ||
166 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | ||
167 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | ||
168 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | ||
169 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 | ||
170 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 | ||
171 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 | ||
172 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 | ||
173 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | ||
174 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | ||
175 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | ||
176 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | ||
177 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | ||
178 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | ||
179 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | ||
180 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | ||
181 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | ||
182 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | ||
183 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 | ||
184 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 | ||
185 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | ||
186 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 | ||
187 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 | ||
188 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 | ||
189 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | ||
190 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | ||
191 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | ||
192 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | ||
193 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | ||
194 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | ||
195 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a | ||
196 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 | ||
197 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 | ||
198 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | ||
199 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | ||
200 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | ||
201 | |||
202 | /* cover 915 and 945 variants */ | ||
203 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | ||
204 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ | ||
205 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ | ||
206 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ | ||
207 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ | ||
208 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) | ||
209 | |||
210 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ | ||
211 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ | ||
212 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ | ||
213 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | ||
214 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | ||
215 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) | ||
216 | |||
217 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | ||
218 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | ||
219 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ | ||
220 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
221 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
222 | |||
223 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
224 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
225 | |||
226 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | ||
227 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
228 | |||
229 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | ||
230 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | ||
231 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | ||
232 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | ||
233 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | ||
234 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | ||
235 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ | ||
236 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ | ||
237 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | ||
238 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | ||
239 | IS_SNB) | ||
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c new file mode 100644 index 000000000000..e8ea6825822c --- /dev/null +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -0,0 +1,1516 @@ | |||
1 | /* | ||
2 | * Intel GTT (Graphics Translation Table) routines | ||
3 | * | ||
4 | * Caveat: This driver implements the linux agp interface, but this is far from | ||
5 | * a agp driver! GTT support ended up here for purely historical reasons: The | ||
6 | * old userspace intel graphics drivers needed an interface to map memory into | ||
7 | * the GTT. And the drm provides a default interface for graphic devices sitting | ||
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to | ||
9 | * avoid having to create a new api. | ||
10 | * | ||
11 | * With gem this does not make much sense anymore, just needlessly complicates | ||
12 | * the code. But as long as the old graphics stack is still support, it's stuck | ||
13 | * here. | ||
14 | * | ||
15 | * /fairy-tale-mode off | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * If we have Intel graphics, we're not going to have anything other than | ||
20 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | ||
21 | * on the Intel IOMMU support (CONFIG_DMAR). | ||
22 | * Only newer chipsets need to bother with this, of course. | ||
23 | */ | ||
24 | #ifdef CONFIG_DMAR | ||
25 | #define USE_PCI_DMA_API 1 | ||
26 | #endif | ||
27 | |||
28 | static const struct aper_size_info_fixed intel_i810_sizes[] = | ||
29 | { | ||
30 | {64, 16384, 4}, | ||
31 | /* The 32M mode still requires a 64k gatt */ | ||
32 | {32, 8192, 4} | ||
33 | }; | ||
34 | |||
35 | #define AGP_DCACHE_MEMORY 1 | ||
36 | #define AGP_PHYS_MEMORY 2 | ||
37 | #define INTEL_AGP_CACHED_MEMORY 3 | ||
38 | |||
39 | static struct gatt_mask intel_i810_masks[] = | ||
40 | { | ||
41 | {.mask = I810_PTE_VALID, .type = 0}, | ||
42 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, | ||
43 | {.mask = I810_PTE_VALID, .type = 0}, | ||
44 | {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, | ||
45 | .type = INTEL_AGP_CACHED_MEMORY} | ||
46 | }; | ||
47 | |||
48 | static struct _intel_private { | ||
49 | struct pci_dev *pcidev; /* device one */ | ||
50 | u8 __iomem *registers; | ||
51 | u32 __iomem *gtt; /* I915G */ | ||
52 | int num_dcache_entries; | ||
53 | /* gtt_entries is the number of gtt entries that are already mapped | ||
54 | * to stolen memory. Stolen memory is larger than the memory mapped | ||
55 | * through gtt_entries, as it includes some reserved space for the BIOS | ||
56 | * popup and for the GTT. | ||
57 | */ | ||
58 | int gtt_entries; /* i830+ */ | ||
59 | int gtt_total_size; | ||
60 | union { | ||
61 | void __iomem *i9xx_flush_page; | ||
62 | void *i8xx_flush_page; | ||
63 | }; | ||
64 | struct page *i8xx_page; | ||
65 | struct resource ifp_resource; | ||
66 | int resource_valid; | ||
67 | } intel_private; | ||
68 | |||
69 | #ifdef USE_PCI_DMA_API | ||
70 | static int intel_agp_map_page(struct page *page, dma_addr_t *ret) | ||
71 | { | ||
72 | *ret = pci_map_page(intel_private.pcidev, page, 0, | ||
73 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
74 | if (pci_dma_mapping_error(intel_private.pcidev, *ret)) | ||
75 | return -EINVAL; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) | ||
80 | { | ||
81 | pci_unmap_page(intel_private.pcidev, dma, | ||
82 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
83 | } | ||
84 | |||
85 | static void intel_agp_free_sglist(struct agp_memory *mem) | ||
86 | { | ||
87 | struct sg_table st; | ||
88 | |||
89 | st.sgl = mem->sg_list; | ||
90 | st.orig_nents = st.nents = mem->page_count; | ||
91 | |||
92 | sg_free_table(&st); | ||
93 | |||
94 | mem->sg_list = NULL; | ||
95 | mem->num_sg = 0; | ||
96 | } | ||
97 | |||
98 | static int intel_agp_map_memory(struct agp_memory *mem) | ||
99 | { | ||
100 | struct sg_table st; | ||
101 | struct scatterlist *sg; | ||
102 | int i; | ||
103 | |||
104 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | ||
105 | |||
106 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | ||
107 | return -ENOMEM; | ||
108 | |||
109 | mem->sg_list = sg = st.sgl; | ||
110 | |||
111 | for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) | ||
112 | sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); | ||
113 | |||
114 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | ||
115 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
116 | if (unlikely(!mem->num_sg)) { | ||
117 | intel_agp_free_sglist(mem); | ||
118 | return -ENOMEM; | ||
119 | } | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static void intel_agp_unmap_memory(struct agp_memory *mem) | ||
124 | { | ||
125 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | ||
126 | |||
127 | pci_unmap_sg(intel_private.pcidev, mem->sg_list, | ||
128 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
129 | intel_agp_free_sglist(mem); | ||
130 | } | ||
131 | |||
132 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
133 | off_t pg_start, int mask_type) | ||
134 | { | ||
135 | struct scatterlist *sg; | ||
136 | int i, j; | ||
137 | |||
138 | j = pg_start; | ||
139 | |||
140 | WARN_ON(!mem->num_sg); | ||
141 | |||
142 | if (mem->num_sg == mem->page_count) { | ||
143 | for_each_sg(mem->sg_list, sg, mem->page_count, i) { | ||
144 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
145 | sg_dma_address(sg), mask_type), | ||
146 | intel_private.gtt+j); | ||
147 | j++; | ||
148 | } | ||
149 | } else { | ||
150 | /* sg may merge pages, but we have to separate | ||
151 | * per-page addr for GTT */ | ||
152 | unsigned int len, m; | ||
153 | |||
154 | for_each_sg(mem->sg_list, sg, mem->num_sg, i) { | ||
155 | len = sg_dma_len(sg) / PAGE_SIZE; | ||
156 | for (m = 0; m < len; m++) { | ||
157 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
158 | sg_dma_address(sg) + m * PAGE_SIZE, | ||
159 | mask_type), | ||
160 | intel_private.gtt+j); | ||
161 | j++; | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | readl(intel_private.gtt+j-1); | ||
166 | } | ||
167 | |||
168 | #else | ||
169 | |||
170 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
171 | off_t pg_start, int mask_type) | ||
172 | { | ||
173 | int i, j; | ||
174 | u32 cache_bits = 0; | ||
175 | |||
176 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
177 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
178 | { | ||
179 | cache_bits = I830_PTE_SYSTEM_CACHED; | ||
180 | } | ||
181 | |||
182 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
183 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
184 | page_to_phys(mem->pages[i]), mask_type), | ||
185 | intel_private.gtt+j); | ||
186 | } | ||
187 | |||
188 | readl(intel_private.gtt+j-1); | ||
189 | } | ||
190 | |||
191 | #endif | ||
192 | |||
193 | static int intel_i810_fetch_size(void) | ||
194 | { | ||
195 | u32 smram_miscc; | ||
196 | struct aper_size_info_fixed *values; | ||
197 | |||
198 | pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); | ||
199 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
200 | |||
201 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { | ||
202 | dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); | ||
203 | return 0; | ||
204 | } | ||
205 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { | ||
206 | agp_bridge->current_size = (void *) (values + 1); | ||
207 | agp_bridge->aperture_size_idx = 1; | ||
208 | return values[1].size; | ||
209 | } else { | ||
210 | agp_bridge->current_size = (void *) (values); | ||
211 | agp_bridge->aperture_size_idx = 0; | ||
212 | return values[0].size; | ||
213 | } | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int intel_i810_configure(void) | ||
219 | { | ||
220 | struct aper_size_info_fixed *current_size; | ||
221 | u32 temp; | ||
222 | int i; | ||
223 | |||
224 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
225 | |||
226 | if (!intel_private.registers) { | ||
227 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
228 | temp &= 0xfff80000; | ||
229 | |||
230 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
231 | if (!intel_private.registers) { | ||
232 | dev_err(&intel_private.pcidev->dev, | ||
233 | "can't remap memory\n"); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | if ((readl(intel_private.registers+I810_DRAM_CTL) | ||
239 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | ||
240 | /* This will need to be dynamically assigned */ | ||
241 | dev_info(&intel_private.pcidev->dev, | ||
242 | "detected 4MB dedicated video ram\n"); | ||
243 | intel_private.num_dcache_entries = 1024; | ||
244 | } | ||
245 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
246 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
247 | writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
248 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
249 | |||
250 | if (agp_bridge->driver->needs_scratch_page) { | ||
251 | for (i = 0; i < current_size->num_entries; i++) { | ||
252 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
253 | } | ||
254 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ | ||
255 | } | ||
256 | global_cache_flush(); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static void intel_i810_cleanup(void) | ||
261 | { | ||
262 | writel(0, intel_private.registers+I810_PGETBL_CTL); | ||
263 | readl(intel_private.registers); /* PCI Posting. */ | ||
264 | iounmap(intel_private.registers); | ||
265 | } | ||
266 | |||
267 | static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) | ||
268 | { | ||
269 | return; | ||
270 | } | ||
271 | |||
272 | /* Exists to support ARGB cursors */ | ||
273 | static struct page *i8xx_alloc_pages(void) | ||
274 | { | ||
275 | struct page *page; | ||
276 | |||
277 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); | ||
278 | if (page == NULL) | ||
279 | return NULL; | ||
280 | |||
281 | if (set_pages_uc(page, 4) < 0) { | ||
282 | set_pages_wb(page, 4); | ||
283 | __free_pages(page, 2); | ||
284 | return NULL; | ||
285 | } | ||
286 | get_page(page); | ||
287 | atomic_inc(&agp_bridge->current_memory_agp); | ||
288 | return page; | ||
289 | } | ||
290 | |||
291 | static void i8xx_destroy_pages(struct page *page) | ||
292 | { | ||
293 | if (page == NULL) | ||
294 | return; | ||
295 | |||
296 | set_pages_wb(page, 4); | ||
297 | put_page(page); | ||
298 | __free_pages(page, 2); | ||
299 | atomic_dec(&agp_bridge->current_memory_agp); | ||
300 | } | ||
301 | |||
302 | static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | ||
303 | int type) | ||
304 | { | ||
305 | if (type < AGP_USER_TYPES) | ||
306 | return type; | ||
307 | else if (type == AGP_USER_CACHED_MEMORY) | ||
308 | return INTEL_AGP_CACHED_MEMORY; | ||
309 | else | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
314 | int type) | ||
315 | { | ||
316 | int i, j, num_entries; | ||
317 | void *temp; | ||
318 | int ret = -EINVAL; | ||
319 | int mask_type; | ||
320 | |||
321 | if (mem->page_count == 0) | ||
322 | goto out; | ||
323 | |||
324 | temp = agp_bridge->current_size; | ||
325 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
326 | |||
327 | if ((pg_start + mem->page_count) > num_entries) | ||
328 | goto out_err; | ||
329 | |||
330 | |||
331 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | ||
332 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { | ||
333 | ret = -EBUSY; | ||
334 | goto out_err; | ||
335 | } | ||
336 | } | ||
337 | |||
338 | if (type != mem->type) | ||
339 | goto out_err; | ||
340 | |||
341 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
342 | |||
343 | switch (mask_type) { | ||
344 | case AGP_DCACHE_MEMORY: | ||
345 | if (!mem->is_flushed) | ||
346 | global_cache_flush(); | ||
347 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
348 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, | ||
349 | intel_private.registers+I810_PTE_BASE+(i*4)); | ||
350 | } | ||
351 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
352 | break; | ||
353 | case AGP_PHYS_MEMORY: | ||
354 | case AGP_NORMAL_MEMORY: | ||
355 | if (!mem->is_flushed) | ||
356 | global_cache_flush(); | ||
357 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
358 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
359 | page_to_phys(mem->pages[i]), mask_type), | ||
360 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
361 | } | ||
362 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
363 | break; | ||
364 | default: | ||
365 | goto out_err; | ||
366 | } | ||
367 | |||
368 | out: | ||
369 | ret = 0; | ||
370 | out_err: | ||
371 | mem->is_flushed = true; | ||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
376 | int type) | ||
377 | { | ||
378 | int i; | ||
379 | |||
380 | if (mem->page_count == 0) | ||
381 | return 0; | ||
382 | |||
383 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
384 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
385 | } | ||
386 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * The i810/i830 requires a physical address to program its mouse | ||
393 | * pointer into hardware. | ||
394 | * However the Xserver still writes to it through the agp aperture. | ||
395 | */ | ||
396 | static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) | ||
397 | { | ||
398 | struct agp_memory *new; | ||
399 | struct page *page; | ||
400 | |||
401 | switch (pg_count) { | ||
402 | case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); | ||
403 | break; | ||
404 | case 4: | ||
405 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
406 | page = i8xx_alloc_pages(); | ||
407 | break; | ||
408 | default: | ||
409 | return NULL; | ||
410 | } | ||
411 | |||
412 | if (page == NULL) | ||
413 | return NULL; | ||
414 | |||
415 | new = agp_create_memory(pg_count); | ||
416 | if (new == NULL) | ||
417 | return NULL; | ||
418 | |||
419 | new->pages[0] = page; | ||
420 | if (pg_count == 4) { | ||
421 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
422 | new->pages[1] = new->pages[0] + 1; | ||
423 | new->pages[2] = new->pages[1] + 1; | ||
424 | new->pages[3] = new->pages[2] + 1; | ||
425 | } | ||
426 | new->page_count = pg_count; | ||
427 | new->num_scratch_pages = pg_count; | ||
428 | new->type = AGP_PHYS_MEMORY; | ||
429 | new->physical = page_to_phys(new->pages[0]); | ||
430 | return new; | ||
431 | } | ||
432 | |||
433 | static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) | ||
434 | { | ||
435 | struct agp_memory *new; | ||
436 | |||
437 | if (type == AGP_DCACHE_MEMORY) { | ||
438 | if (pg_count != intel_private.num_dcache_entries) | ||
439 | return NULL; | ||
440 | |||
441 | new = agp_create_memory(1); | ||
442 | if (new == NULL) | ||
443 | return NULL; | ||
444 | |||
445 | new->type = AGP_DCACHE_MEMORY; | ||
446 | new->page_count = pg_count; | ||
447 | new->num_scratch_pages = 0; | ||
448 | agp_free_page_array(new); | ||
449 | return new; | ||
450 | } | ||
451 | if (type == AGP_PHYS_MEMORY) | ||
452 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
453 | return NULL; | ||
454 | } | ||
455 | |||
456 | static void intel_i810_free_by_type(struct agp_memory *curr) | ||
457 | { | ||
458 | agp_free_key(curr->key); | ||
459 | if (curr->type == AGP_PHYS_MEMORY) { | ||
460 | if (curr->page_count == 4) | ||
461 | i8xx_destroy_pages(curr->pages[0]); | ||
462 | else { | ||
463 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
464 | AGP_PAGE_DESTROY_UNMAP); | ||
465 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
466 | AGP_PAGE_DESTROY_FREE); | ||
467 | } | ||
468 | agp_free_page_array(curr); | ||
469 | } | ||
470 | kfree(curr); | ||
471 | } | ||
472 | |||
473 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, | ||
474 | dma_addr_t addr, int type) | ||
475 | { | ||
476 | /* Type checking must be done elsewhere */ | ||
477 | return addr | bridge->driver->masks[type].mask; | ||
478 | } | ||
479 | |||
480 | static struct aper_size_info_fixed intel_i830_sizes[] = | ||
481 | { | ||
482 | {128, 32768, 5}, | ||
483 | /* The 64M mode still requires a 128k gatt */ | ||
484 | {64, 16384, 5}, | ||
485 | {256, 65536, 6}, | ||
486 | {512, 131072, 7}, | ||
487 | }; | ||
488 | |||
489 | static void intel_i830_init_gtt_entries(void) | ||
490 | { | ||
491 | u16 gmch_ctrl; | ||
492 | int gtt_entries = 0; | ||
493 | u8 rdct; | ||
494 | int local = 0; | ||
495 | static const int ddt[4] = { 0, 16, 32, 64 }; | ||
496 | int size; /* reserved space (in kb) at the top of stolen memory */ | ||
497 | |||
498 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
499 | |||
500 | if (IS_I965) { | ||
501 | u32 pgetbl_ctl; | ||
502 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); | ||
503 | |||
504 | /* The 965 has a field telling us the size of the GTT, | ||
505 | * which may be larger than what is necessary to map the | ||
506 | * aperture. | ||
507 | */ | ||
508 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { | ||
509 | case I965_PGETBL_SIZE_128KB: | ||
510 | size = 128; | ||
511 | break; | ||
512 | case I965_PGETBL_SIZE_256KB: | ||
513 | size = 256; | ||
514 | break; | ||
515 | case I965_PGETBL_SIZE_512KB: | ||
516 | size = 512; | ||
517 | break; | ||
518 | case I965_PGETBL_SIZE_1MB: | ||
519 | size = 1024; | ||
520 | break; | ||
521 | case I965_PGETBL_SIZE_2MB: | ||
522 | size = 2048; | ||
523 | break; | ||
524 | case I965_PGETBL_SIZE_1_5MB: | ||
525 | size = 1024 + 512; | ||
526 | break; | ||
527 | default: | ||
528 | dev_info(&intel_private.pcidev->dev, | ||
529 | "unknown page table size, assuming 512KB\n"); | ||
530 | size = 512; | ||
531 | } | ||
532 | size += 4; /* add in BIOS popup space */ | ||
533 | } else if (IS_G33 && !IS_PINEVIEW) { | ||
534 | /* G33's GTT size defined in gmch_ctrl */ | ||
535 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | ||
536 | case G33_PGETBL_SIZE_1M: | ||
537 | size = 1024; | ||
538 | break; | ||
539 | case G33_PGETBL_SIZE_2M: | ||
540 | size = 2048; | ||
541 | break; | ||
542 | default: | ||
543 | dev_info(&agp_bridge->dev->dev, | ||
544 | "unknown page table size 0x%x, assuming 512KB\n", | ||
545 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | ||
546 | size = 512; | ||
547 | } | ||
548 | size += 4; | ||
549 | } else if (IS_G4X || IS_PINEVIEW) { | ||
550 | /* On 4 series hardware, GTT stolen is separate from graphics | ||
551 | * stolen, ignore it in stolen gtt entries counting. However, | ||
552 | * 4KB of the stolen memory doesn't get mapped to the GTT. | ||
553 | */ | ||
554 | size = 4; | ||
555 | } else { | ||
556 | /* On previous hardware, the GTT size was just what was | ||
557 | * required to map the aperture. | ||
558 | */ | ||
559 | size = agp_bridge->driver->fetch_size() + 4; | ||
560 | } | ||
561 | |||
562 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || | ||
563 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
564 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { | ||
565 | case I830_GMCH_GMS_STOLEN_512: | ||
566 | gtt_entries = KB(512) - KB(size); | ||
567 | break; | ||
568 | case I830_GMCH_GMS_STOLEN_1024: | ||
569 | gtt_entries = MB(1) - KB(size); | ||
570 | break; | ||
571 | case I830_GMCH_GMS_STOLEN_8192: | ||
572 | gtt_entries = MB(8) - KB(size); | ||
573 | break; | ||
574 | case I830_GMCH_GMS_LOCAL: | ||
575 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); | ||
576 | gtt_entries = (I830_RDRAM_ND(rdct) + 1) * | ||
577 | MB(ddt[I830_RDRAM_DDT(rdct)]); | ||
578 | local = 1; | ||
579 | break; | ||
580 | default: | ||
581 | gtt_entries = 0; | ||
582 | break; | ||
583 | } | ||
584 | } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
585 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { | ||
586 | /* | ||
587 | * SandyBridge has new memory control reg at 0x50.w | ||
588 | */ | ||
589 | u16 snb_gmch_ctl; | ||
590 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
591 | switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { | ||
592 | case SNB_GMCH_GMS_STOLEN_32M: | ||
593 | gtt_entries = MB(32) - KB(size); | ||
594 | break; | ||
595 | case SNB_GMCH_GMS_STOLEN_64M: | ||
596 | gtt_entries = MB(64) - KB(size); | ||
597 | break; | ||
598 | case SNB_GMCH_GMS_STOLEN_96M: | ||
599 | gtt_entries = MB(96) - KB(size); | ||
600 | break; | ||
601 | case SNB_GMCH_GMS_STOLEN_128M: | ||
602 | gtt_entries = MB(128) - KB(size); | ||
603 | break; | ||
604 | case SNB_GMCH_GMS_STOLEN_160M: | ||
605 | gtt_entries = MB(160) - KB(size); | ||
606 | break; | ||
607 | case SNB_GMCH_GMS_STOLEN_192M: | ||
608 | gtt_entries = MB(192) - KB(size); | ||
609 | break; | ||
610 | case SNB_GMCH_GMS_STOLEN_224M: | ||
611 | gtt_entries = MB(224) - KB(size); | ||
612 | break; | ||
613 | case SNB_GMCH_GMS_STOLEN_256M: | ||
614 | gtt_entries = MB(256) - KB(size); | ||
615 | break; | ||
616 | case SNB_GMCH_GMS_STOLEN_288M: | ||
617 | gtt_entries = MB(288) - KB(size); | ||
618 | break; | ||
619 | case SNB_GMCH_GMS_STOLEN_320M: | ||
620 | gtt_entries = MB(320) - KB(size); | ||
621 | break; | ||
622 | case SNB_GMCH_GMS_STOLEN_352M: | ||
623 | gtt_entries = MB(352) - KB(size); | ||
624 | break; | ||
625 | case SNB_GMCH_GMS_STOLEN_384M: | ||
626 | gtt_entries = MB(384) - KB(size); | ||
627 | break; | ||
628 | case SNB_GMCH_GMS_STOLEN_416M: | ||
629 | gtt_entries = MB(416) - KB(size); | ||
630 | break; | ||
631 | case SNB_GMCH_GMS_STOLEN_448M: | ||
632 | gtt_entries = MB(448) - KB(size); | ||
633 | break; | ||
634 | case SNB_GMCH_GMS_STOLEN_480M: | ||
635 | gtt_entries = MB(480) - KB(size); | ||
636 | break; | ||
637 | case SNB_GMCH_GMS_STOLEN_512M: | ||
638 | gtt_entries = MB(512) - KB(size); | ||
639 | break; | ||
640 | } | ||
641 | } else { | ||
642 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { | ||
643 | case I855_GMCH_GMS_STOLEN_1M: | ||
644 | gtt_entries = MB(1) - KB(size); | ||
645 | break; | ||
646 | case I855_GMCH_GMS_STOLEN_4M: | ||
647 | gtt_entries = MB(4) - KB(size); | ||
648 | break; | ||
649 | case I855_GMCH_GMS_STOLEN_8M: | ||
650 | gtt_entries = MB(8) - KB(size); | ||
651 | break; | ||
652 | case I855_GMCH_GMS_STOLEN_16M: | ||
653 | gtt_entries = MB(16) - KB(size); | ||
654 | break; | ||
655 | case I855_GMCH_GMS_STOLEN_32M: | ||
656 | gtt_entries = MB(32) - KB(size); | ||
657 | break; | ||
658 | case I915_GMCH_GMS_STOLEN_48M: | ||
659 | /* Check it's really I915G */ | ||
660 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
661 | gtt_entries = MB(48) - KB(size); | ||
662 | else | ||
663 | gtt_entries = 0; | ||
664 | break; | ||
665 | case I915_GMCH_GMS_STOLEN_64M: | ||
666 | /* Check it's really I915G */ | ||
667 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
668 | gtt_entries = MB(64) - KB(size); | ||
669 | else | ||
670 | gtt_entries = 0; | ||
671 | break; | ||
672 | case G33_GMCH_GMS_STOLEN_128M: | ||
673 | if (IS_G33 || IS_I965 || IS_G4X) | ||
674 | gtt_entries = MB(128) - KB(size); | ||
675 | else | ||
676 | gtt_entries = 0; | ||
677 | break; | ||
678 | case G33_GMCH_GMS_STOLEN_256M: | ||
679 | if (IS_G33 || IS_I965 || IS_G4X) | ||
680 | gtt_entries = MB(256) - KB(size); | ||
681 | else | ||
682 | gtt_entries = 0; | ||
683 | break; | ||
684 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
685 | if (IS_I965 || IS_G4X) | ||
686 | gtt_entries = MB(96) - KB(size); | ||
687 | else | ||
688 | gtt_entries = 0; | ||
689 | break; | ||
690 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
691 | if (IS_I965 || IS_G4X) | ||
692 | gtt_entries = MB(160) - KB(size); | ||
693 | else | ||
694 | gtt_entries = 0; | ||
695 | break; | ||
696 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
697 | if (IS_I965 || IS_G4X) | ||
698 | gtt_entries = MB(224) - KB(size); | ||
699 | else | ||
700 | gtt_entries = 0; | ||
701 | break; | ||
702 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
703 | if (IS_I965 || IS_G4X) | ||
704 | gtt_entries = MB(352) - KB(size); | ||
705 | else | ||
706 | gtt_entries = 0; | ||
707 | break; | ||
708 | default: | ||
709 | gtt_entries = 0; | ||
710 | break; | ||
711 | } | ||
712 | } | ||
713 | if (gtt_entries > 0) { | ||
714 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", | ||
715 | gtt_entries / KB(1), local ? "local" : "stolen"); | ||
716 | gtt_entries /= KB(4); | ||
717 | } else { | ||
718 | dev_info(&agp_bridge->dev->dev, | ||
719 | "no pre-allocated video memory detected\n"); | ||
720 | gtt_entries = 0; | ||
721 | } | ||
722 | |||
723 | intel_private.gtt_entries = gtt_entries; | ||
724 | } | ||
725 | |||
726 | static void intel_i830_fini_flush(void) | ||
727 | { | ||
728 | kunmap(intel_private.i8xx_page); | ||
729 | intel_private.i8xx_flush_page = NULL; | ||
730 | unmap_page_from_agp(intel_private.i8xx_page); | ||
731 | |||
732 | __free_page(intel_private.i8xx_page); | ||
733 | intel_private.i8xx_page = NULL; | ||
734 | } | ||
735 | |||
736 | static void intel_i830_setup_flush(void) | ||
737 | { | ||
738 | /* return if we've already set the flush mechanism up */ | ||
739 | if (intel_private.i8xx_page) | ||
740 | return; | ||
741 | |||
742 | intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); | ||
743 | if (!intel_private.i8xx_page) | ||
744 | return; | ||
745 | |||
746 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
747 | if (!intel_private.i8xx_flush_page) | ||
748 | intel_i830_fini_flush(); | ||
749 | } | ||
750 | |||
751 | /* The chipset_flush interface needs to get data that has already been | ||
752 | * flushed out of the CPU all the way out to main memory, because the GPU | ||
753 | * doesn't snoop those buffers. | ||
754 | * | ||
755 | * The 8xx series doesn't have the same lovely interface for flushing the | ||
756 | * chipset write buffers that the later chips do. According to the 865 | ||
757 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in | ||
758 | * that buffer out, we just fill 1KB and clflush it out, on the assumption | ||
759 | * that it'll push whatever was in there out. It appears to work. | ||
760 | */ | ||
761 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | ||
762 | { | ||
763 | unsigned int *pg = intel_private.i8xx_flush_page; | ||
764 | |||
765 | memset(pg, 0, 1024); | ||
766 | |||
767 | if (cpu_has_clflush) | ||
768 | clflush_cache_range(pg, 1024); | ||
769 | else if (wbinvd_on_all_cpus() != 0) | ||
770 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
771 | } | ||
772 | |||
773 | /* The intel i830 automatically initializes the agp aperture during POST. | ||
774 | * Use the memory already set aside for in the GTT. | ||
775 | */ | ||
776 | static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | ||
777 | { | ||
778 | int page_order; | ||
779 | struct aper_size_info_fixed *size; | ||
780 | int num_entries; | ||
781 | u32 temp; | ||
782 | |||
783 | size = agp_bridge->current_size; | ||
784 | page_order = size->page_order; | ||
785 | num_entries = size->num_entries; | ||
786 | agp_bridge->gatt_table_real = NULL; | ||
787 | |||
788 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
789 | temp &= 0xfff80000; | ||
790 | |||
791 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
792 | if (!intel_private.registers) | ||
793 | return -ENOMEM; | ||
794 | |||
795 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
796 | global_cache_flush(); /* FIXME: ?? */ | ||
797 | |||
798 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
799 | intel_i830_init_gtt_entries(); | ||
800 | |||
801 | agp_bridge->gatt_table = NULL; | ||
802 | |||
803 | agp_bridge->gatt_bus_addr = temp; | ||
804 | |||
805 | return 0; | ||
806 | } | ||
807 | |||
808 | /* Return the gatt table to a sane state. Use the top of stolen | ||
809 | * memory for the GTT. | ||
810 | */ | ||
811 | static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) | ||
812 | { | ||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | static int intel_i830_fetch_size(void) | ||
817 | { | ||
818 | u16 gmch_ctrl; | ||
819 | struct aper_size_info_fixed *values; | ||
820 | |||
821 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
822 | |||
823 | if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && | ||
824 | agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
825 | /* 855GM/852GM/865G has 128MB aperture size */ | ||
826 | agp_bridge->current_size = (void *) values; | ||
827 | agp_bridge->aperture_size_idx = 0; | ||
828 | return values[0].size; | ||
829 | } | ||
830 | |||
831 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
832 | |||
833 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { | ||
834 | agp_bridge->current_size = (void *) values; | ||
835 | agp_bridge->aperture_size_idx = 0; | ||
836 | return values[0].size; | ||
837 | } else { | ||
838 | agp_bridge->current_size = (void *) (values + 1); | ||
839 | agp_bridge->aperture_size_idx = 1; | ||
840 | return values[1].size; | ||
841 | } | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | static int intel_i830_configure(void) | ||
847 | { | ||
848 | struct aper_size_info_fixed *current_size; | ||
849 | u32 temp; | ||
850 | u16 gmch_ctrl; | ||
851 | int i; | ||
852 | |||
853 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
854 | |||
855 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
856 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
857 | |||
858 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
859 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
860 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
861 | |||
862 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
863 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
864 | |||
865 | if (agp_bridge->driver->needs_scratch_page) { | ||
866 | for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { | ||
867 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
868 | } | ||
869 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ | ||
870 | } | ||
871 | |||
872 | global_cache_flush(); | ||
873 | |||
874 | intel_i830_setup_flush(); | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static void intel_i830_cleanup(void) | ||
879 | { | ||
880 | iounmap(intel_private.registers); | ||
881 | } | ||
882 | |||
883 | static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
884 | int type) | ||
885 | { | ||
886 | int i, j, num_entries; | ||
887 | void *temp; | ||
888 | int ret = -EINVAL; | ||
889 | int mask_type; | ||
890 | |||
891 | if (mem->page_count == 0) | ||
892 | goto out; | ||
893 | |||
894 | temp = agp_bridge->current_size; | ||
895 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
896 | |||
897 | if (pg_start < intel_private.gtt_entries) { | ||
898 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
899 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
900 | pg_start, intel_private.gtt_entries); | ||
901 | |||
902 | dev_info(&intel_private.pcidev->dev, | ||
903 | "trying to insert into local/stolen memory\n"); | ||
904 | goto out_err; | ||
905 | } | ||
906 | |||
907 | if ((pg_start + mem->page_count) > num_entries) | ||
908 | goto out_err; | ||
909 | |||
910 | /* The i830 can't check the GTT for entries since its read only, | ||
911 | * depend on the caller to make the correct offset decisions. | ||
912 | */ | ||
913 | |||
914 | if (type != mem->type) | ||
915 | goto out_err; | ||
916 | |||
917 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
918 | |||
919 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
920 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
921 | goto out_err; | ||
922 | |||
923 | if (!mem->is_flushed) | ||
924 | global_cache_flush(); | ||
925 | |||
926 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
927 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
928 | page_to_phys(mem->pages[i]), mask_type), | ||
929 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
930 | } | ||
931 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
932 | |||
933 | out: | ||
934 | ret = 0; | ||
935 | out_err: | ||
936 | mem->is_flushed = true; | ||
937 | return ret; | ||
938 | } | ||
939 | |||
940 | static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
941 | int type) | ||
942 | { | ||
943 | int i; | ||
944 | |||
945 | if (mem->page_count == 0) | ||
946 | return 0; | ||
947 | |||
948 | if (pg_start < intel_private.gtt_entries) { | ||
949 | dev_info(&intel_private.pcidev->dev, | ||
950 | "trying to disable local/stolen memory\n"); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | |||
954 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
955 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
956 | } | ||
957 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) | ||
963 | { | ||
964 | if (type == AGP_PHYS_MEMORY) | ||
965 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
966 | /* always return NULL for other allocation types for now */ | ||
967 | return NULL; | ||
968 | } | ||
969 | |||
970 | static int intel_alloc_chipset_flush_resource(void) | ||
971 | { | ||
972 | int ret; | ||
973 | ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, | ||
974 | PAGE_SIZE, PCIBIOS_MIN_MEM, 0, | ||
975 | pcibios_align_resource, agp_bridge->dev); | ||
976 | |||
977 | return ret; | ||
978 | } | ||
979 | |||
980 | static void intel_i915_setup_chipset_flush(void) | ||
981 | { | ||
982 | int ret; | ||
983 | u32 temp; | ||
984 | |||
985 | pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); | ||
986 | if (!(temp & 0x1)) { | ||
987 | intel_alloc_chipset_flush_resource(); | ||
988 | intel_private.resource_valid = 1; | ||
989 | pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
990 | } else { | ||
991 | temp &= ~1; | ||
992 | |||
993 | intel_private.resource_valid = 1; | ||
994 | intel_private.ifp_resource.start = temp; | ||
995 | intel_private.ifp_resource.end = temp + PAGE_SIZE; | ||
996 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
997 | /* some BIOSes reserve this area in a pnp some don't */ | ||
998 | if (ret) | ||
999 | intel_private.resource_valid = 0; | ||
1000 | } | ||
1001 | } | ||
1002 | |||
1003 | static void intel_i965_g33_setup_chipset_flush(void) | ||
1004 | { | ||
1005 | u32 temp_hi, temp_lo; | ||
1006 | int ret; | ||
1007 | |||
1008 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); | ||
1009 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); | ||
1010 | |||
1011 | if (!(temp_lo & 0x1)) { | ||
1012 | |||
1013 | intel_alloc_chipset_flush_resource(); | ||
1014 | |||
1015 | intel_private.resource_valid = 1; | ||
1016 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, | ||
1017 | upper_32_bits(intel_private.ifp_resource.start)); | ||
1018 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1019 | } else { | ||
1020 | u64 l64; | ||
1021 | |||
1022 | temp_lo &= ~0x1; | ||
1023 | l64 = ((u64)temp_hi << 32) | temp_lo; | ||
1024 | |||
1025 | intel_private.resource_valid = 1; | ||
1026 | intel_private.ifp_resource.start = l64; | ||
1027 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; | ||
1028 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1029 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1030 | if (ret) | ||
1031 | intel_private.resource_valid = 0; | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | static void intel_i9xx_setup_flush(void) | ||
1036 | { | ||
1037 | /* return if already configured */ | ||
1038 | if (intel_private.ifp_resource.start) | ||
1039 | return; | ||
1040 | |||
1041 | if (IS_SNB) | ||
1042 | return; | ||
1043 | |||
1044 | /* setup a resource for this object */ | ||
1045 | intel_private.ifp_resource.name = "Intel Flush Page"; | ||
1046 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | ||
1047 | |||
1048 | /* Setup chipset flush for 915 */ | ||
1049 | if (IS_I965 || IS_G33 || IS_G4X) { | ||
1050 | intel_i965_g33_setup_chipset_flush(); | ||
1051 | } else { | ||
1052 | intel_i915_setup_chipset_flush(); | ||
1053 | } | ||
1054 | |||
1055 | if (intel_private.ifp_resource.start) { | ||
1056 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | ||
1057 | if (!intel_private.i9xx_flush_page) | ||
1058 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1062 | static int intel_i915_configure(void) | ||
1063 | { | ||
1064 | struct aper_size_info_fixed *current_size; | ||
1065 | u32 temp; | ||
1066 | u16 gmch_ctrl; | ||
1067 | int i; | ||
1068 | |||
1069 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1070 | |||
1071 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); | ||
1072 | |||
1073 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1074 | |||
1075 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1076 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1077 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1078 | |||
1079 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1080 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1081 | |||
1082 | if (agp_bridge->driver->needs_scratch_page) { | ||
1083 | for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { | ||
1084 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1085 | } | ||
1086 | readl(intel_private.gtt+i-1); /* PCI Posting. */ | ||
1087 | } | ||
1088 | |||
1089 | global_cache_flush(); | ||
1090 | |||
1091 | intel_i9xx_setup_flush(); | ||
1092 | |||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | static void intel_i915_cleanup(void) | ||
1097 | { | ||
1098 | if (intel_private.i9xx_flush_page) | ||
1099 | iounmap(intel_private.i9xx_flush_page); | ||
1100 | if (intel_private.resource_valid) | ||
1101 | release_resource(&intel_private.ifp_resource); | ||
1102 | intel_private.ifp_resource.start = 0; | ||
1103 | intel_private.resource_valid = 0; | ||
1104 | iounmap(intel_private.gtt); | ||
1105 | iounmap(intel_private.registers); | ||
1106 | } | ||
1107 | |||
1108 | static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) | ||
1109 | { | ||
1110 | if (intel_private.i9xx_flush_page) | ||
1111 | writel(1, intel_private.i9xx_flush_page); | ||
1112 | } | ||
1113 | |||
1114 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1115 | int type) | ||
1116 | { | ||
1117 | int num_entries; | ||
1118 | void *temp; | ||
1119 | int ret = -EINVAL; | ||
1120 | int mask_type; | ||
1121 | |||
1122 | if (mem->page_count == 0) | ||
1123 | goto out; | ||
1124 | |||
1125 | temp = agp_bridge->current_size; | ||
1126 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1127 | |||
1128 | if (pg_start < intel_private.gtt_entries) { | ||
1129 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1130 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1131 | pg_start, intel_private.gtt_entries); | ||
1132 | |||
1133 | dev_info(&intel_private.pcidev->dev, | ||
1134 | "trying to insert into local/stolen memory\n"); | ||
1135 | goto out_err; | ||
1136 | } | ||
1137 | |||
1138 | if ((pg_start + mem->page_count) > num_entries) | ||
1139 | goto out_err; | ||
1140 | |||
1141 | /* The i915 can't check the GTT for entries since it's read only; | ||
1142 | * depend on the caller to make the correct offset decisions. | ||
1143 | */ | ||
1144 | |||
1145 | if (type != mem->type) | ||
1146 | goto out_err; | ||
1147 | |||
1148 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1149 | |||
1150 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1151 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1152 | goto out_err; | ||
1153 | |||
1154 | if (!mem->is_flushed) | ||
1155 | global_cache_flush(); | ||
1156 | |||
1157 | intel_agp_insert_sg_entries(mem, pg_start, mask_type); | ||
1158 | |||
1159 | out: | ||
1160 | ret = 0; | ||
1161 | out_err: | ||
1162 | mem->is_flushed = true; | ||
1163 | return ret; | ||
1164 | } | ||
1165 | |||
1166 | static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1167 | int type) | ||
1168 | { | ||
1169 | int i; | ||
1170 | |||
1171 | if (mem->page_count == 0) | ||
1172 | return 0; | ||
1173 | |||
1174 | if (pg_start < intel_private.gtt_entries) { | ||
1175 | dev_info(&intel_private.pcidev->dev, | ||
1176 | "trying to disable local/stolen memory\n"); | ||
1177 | return -EINVAL; | ||
1178 | } | ||
1179 | |||
1180 | for (i = pg_start; i < (mem->page_count + pg_start); i++) | ||
1181 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1182 | |||
1183 | readl(intel_private.gtt+i-1); | ||
1184 | |||
1185 | return 0; | ||
1186 | } | ||
1187 | |||
1188 | /* Return the aperture size by just checking the resource length. The effect | ||
1189 | * described in the spec of the MSAC registers is just changing of the | ||
1190 | * resource size. | ||
1191 | */ | ||
1192 | static int intel_i9xx_fetch_size(void) | ||
1193 | { | ||
1194 | int num_sizes = ARRAY_SIZE(intel_i830_sizes); | ||
1195 | int aper_size; /* size in megabytes */ | ||
1196 | int i; | ||
1197 | |||
1198 | aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); | ||
1199 | |||
1200 | for (i = 0; i < num_sizes; i++) { | ||
1201 | if (aper_size == intel_i830_sizes[i].size) { | ||
1202 | agp_bridge->current_size = intel_i830_sizes + i; | ||
1203 | return aper_size; | ||
1204 | } | ||
1205 | } | ||
1206 | |||
1207 | return 0; | ||
1208 | } | ||
1209 | |||
1210 | /* The intel i915 automatically initializes the agp aperture during POST. | ||
1211 | * Use the memory already set aside for in the GTT. | ||
1212 | */ | ||
1213 | static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | ||
1214 | { | ||
1215 | int page_order; | ||
1216 | struct aper_size_info_fixed *size; | ||
1217 | int num_entries; | ||
1218 | u32 temp, temp2; | ||
1219 | int gtt_map_size = 256 * 1024; | ||
1220 | |||
1221 | size = agp_bridge->current_size; | ||
1222 | page_order = size->page_order; | ||
1223 | num_entries = size->num_entries; | ||
1224 | agp_bridge->gatt_table_real = NULL; | ||
1225 | |||
1226 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1227 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); | ||
1228 | |||
1229 | if (IS_G33) | ||
1230 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ | ||
1231 | intel_private.gtt = ioremap(temp2, gtt_map_size); | ||
1232 | if (!intel_private.gtt) | ||
1233 | return -ENOMEM; | ||
1234 | |||
1235 | intel_private.gtt_total_size = gtt_map_size / 4; | ||
1236 | |||
1237 | temp &= 0xfff80000; | ||
1238 | |||
1239 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1240 | if (!intel_private.registers) { | ||
1241 | iounmap(intel_private.gtt); | ||
1242 | return -ENOMEM; | ||
1243 | } | ||
1244 | |||
1245 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1246 | global_cache_flush(); /* FIXME: ? */ | ||
1247 | |||
1248 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1249 | intel_i830_init_gtt_entries(); | ||
1250 | |||
1251 | agp_bridge->gatt_table = NULL; | ||
1252 | |||
1253 | agp_bridge->gatt_bus_addr = temp; | ||
1254 | |||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | /* | ||
1259 | * The i965 supports 36-bit physical addresses, but to keep | ||
1260 | * the format of the GTT the same, the bits that don't fit | ||
1261 | * in a 32-bit word are shifted down to bits 4..7. | ||
1262 | * | ||
1263 | * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" | ||
1264 | * is always zero on 32-bit architectures, so no need to make | ||
1265 | * this conditional. | ||
1266 | */ | ||
1267 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | ||
1268 | dma_addr_t addr, int type) | ||
1269 | { | ||
1270 | /* Shift high bits down */ | ||
1271 | addr |= (addr >> 28) & 0xf0; | ||
1272 | |||
1273 | /* Type checking must be done elsewhere */ | ||
1274 | return addr | bridge->driver->masks[type].mask; | ||
1275 | } | ||
1276 | |||
1277 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | ||
1278 | { | ||
1279 | u16 snb_gmch_ctl; | ||
1280 | |||
1281 | switch (agp_bridge->dev->device) { | ||
1282 | case PCI_DEVICE_ID_INTEL_GM45_HB: | ||
1283 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: | ||
1284 | case PCI_DEVICE_ID_INTEL_Q45_HB: | ||
1285 | case PCI_DEVICE_ID_INTEL_G45_HB: | ||
1286 | case PCI_DEVICE_ID_INTEL_G41_HB: | ||
1287 | case PCI_DEVICE_ID_INTEL_B43_HB: | ||
1288 | case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: | ||
1289 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: | ||
1290 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: | ||
1291 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: | ||
1292 | *gtt_offset = *gtt_size = MB(2); | ||
1293 | break; | ||
1294 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | ||
1295 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | ||
1296 | *gtt_offset = MB(2); | ||
1297 | |||
1298 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
1299 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { | ||
1300 | default: | ||
1301 | case SNB_GTT_SIZE_0M: | ||
1302 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); | ||
1303 | *gtt_size = MB(0); | ||
1304 | break; | ||
1305 | case SNB_GTT_SIZE_1M: | ||
1306 | *gtt_size = MB(1); | ||
1307 | break; | ||
1308 | case SNB_GTT_SIZE_2M: | ||
1309 | *gtt_size = MB(2); | ||
1310 | break; | ||
1311 | } | ||
1312 | break; | ||
1313 | default: | ||
1314 | *gtt_offset = *gtt_size = KB(512); | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | /* The intel i965 automatically initializes the agp aperture during POST. | ||
1319 | * Use the memory already set aside for in the GTT. | ||
1320 | */ | ||
1321 | static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | ||
1322 | { | ||
1323 | int page_order; | ||
1324 | struct aper_size_info_fixed *size; | ||
1325 | int num_entries; | ||
1326 | u32 temp; | ||
1327 | int gtt_offset, gtt_size; | ||
1328 | |||
1329 | size = agp_bridge->current_size; | ||
1330 | page_order = size->page_order; | ||
1331 | num_entries = size->num_entries; | ||
1332 | agp_bridge->gatt_table_real = NULL; | ||
1333 | |||
1334 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1335 | |||
1336 | temp &= 0xfff00000; | ||
1337 | |||
1338 | intel_i965_get_gtt_range(>t_offset, >t_size); | ||
1339 | |||
1340 | intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); | ||
1341 | |||
1342 | if (!intel_private.gtt) | ||
1343 | return -ENOMEM; | ||
1344 | |||
1345 | intel_private.gtt_total_size = gtt_size / 4; | ||
1346 | |||
1347 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1348 | if (!intel_private.registers) { | ||
1349 | iounmap(intel_private.gtt); | ||
1350 | return -ENOMEM; | ||
1351 | } | ||
1352 | |||
1353 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1354 | global_cache_flush(); /* FIXME: ? */ | ||
1355 | |||
1356 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1357 | intel_i830_init_gtt_entries(); | ||
1358 | |||
1359 | agp_bridge->gatt_table = NULL; | ||
1360 | |||
1361 | agp_bridge->gatt_bus_addr = temp; | ||
1362 | |||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | static const struct agp_bridge_driver intel_810_driver = { | ||
1367 | .owner = THIS_MODULE, | ||
1368 | .aperture_sizes = intel_i810_sizes, | ||
1369 | .size_type = FIXED_APER_SIZE, | ||
1370 | .num_aperture_sizes = 2, | ||
1371 | .needs_scratch_page = true, | ||
1372 | .configure = intel_i810_configure, | ||
1373 | .fetch_size = intel_i810_fetch_size, | ||
1374 | .cleanup = intel_i810_cleanup, | ||
1375 | .mask_memory = intel_i810_mask_memory, | ||
1376 | .masks = intel_i810_masks, | ||
1377 | .agp_enable = intel_i810_agp_enable, | ||
1378 | .cache_flush = global_cache_flush, | ||
1379 | .create_gatt_table = agp_generic_create_gatt_table, | ||
1380 | .free_gatt_table = agp_generic_free_gatt_table, | ||
1381 | .insert_memory = intel_i810_insert_entries, | ||
1382 | .remove_memory = intel_i810_remove_entries, | ||
1383 | .alloc_by_type = intel_i810_alloc_by_type, | ||
1384 | .free_by_type = intel_i810_free_by_type, | ||
1385 | .agp_alloc_page = agp_generic_alloc_page, | ||
1386 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1387 | .agp_destroy_page = agp_generic_destroy_page, | ||
1388 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1389 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1390 | }; | ||
1391 | |||
1392 | static const struct agp_bridge_driver intel_830_driver = { | ||
1393 | .owner = THIS_MODULE, | ||
1394 | .aperture_sizes = intel_i830_sizes, | ||
1395 | .size_type = FIXED_APER_SIZE, | ||
1396 | .num_aperture_sizes = 4, | ||
1397 | .needs_scratch_page = true, | ||
1398 | .configure = intel_i830_configure, | ||
1399 | .fetch_size = intel_i830_fetch_size, | ||
1400 | .cleanup = intel_i830_cleanup, | ||
1401 | .mask_memory = intel_i810_mask_memory, | ||
1402 | .masks = intel_i810_masks, | ||
1403 | .agp_enable = intel_i810_agp_enable, | ||
1404 | .cache_flush = global_cache_flush, | ||
1405 | .create_gatt_table = intel_i830_create_gatt_table, | ||
1406 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1407 | .insert_memory = intel_i830_insert_entries, | ||
1408 | .remove_memory = intel_i830_remove_entries, | ||
1409 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1410 | .free_by_type = intel_i810_free_by_type, | ||
1411 | .agp_alloc_page = agp_generic_alloc_page, | ||
1412 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1413 | .agp_destroy_page = agp_generic_destroy_page, | ||
1414 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1415 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1416 | .chipset_flush = intel_i830_chipset_flush, | ||
1417 | }; | ||
1418 | |||
1419 | static const struct agp_bridge_driver intel_915_driver = { | ||
1420 | .owner = THIS_MODULE, | ||
1421 | .aperture_sizes = intel_i830_sizes, | ||
1422 | .size_type = FIXED_APER_SIZE, | ||
1423 | .num_aperture_sizes = 4, | ||
1424 | .needs_scratch_page = true, | ||
1425 | .configure = intel_i915_configure, | ||
1426 | .fetch_size = intel_i9xx_fetch_size, | ||
1427 | .cleanup = intel_i915_cleanup, | ||
1428 | .mask_memory = intel_i810_mask_memory, | ||
1429 | .masks = intel_i810_masks, | ||
1430 | .agp_enable = intel_i810_agp_enable, | ||
1431 | .cache_flush = global_cache_flush, | ||
1432 | .create_gatt_table = intel_i915_create_gatt_table, | ||
1433 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1434 | .insert_memory = intel_i915_insert_entries, | ||
1435 | .remove_memory = intel_i915_remove_entries, | ||
1436 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1437 | .free_by_type = intel_i810_free_by_type, | ||
1438 | .agp_alloc_page = agp_generic_alloc_page, | ||
1439 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1440 | .agp_destroy_page = agp_generic_destroy_page, | ||
1441 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1442 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1443 | .chipset_flush = intel_i915_chipset_flush, | ||
1444 | #ifdef USE_PCI_DMA_API | ||
1445 | .agp_map_page = intel_agp_map_page, | ||
1446 | .agp_unmap_page = intel_agp_unmap_page, | ||
1447 | .agp_map_memory = intel_agp_map_memory, | ||
1448 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1449 | #endif | ||
1450 | }; | ||
1451 | |||
1452 | static const struct agp_bridge_driver intel_i965_driver = { | ||
1453 | .owner = THIS_MODULE, | ||
1454 | .aperture_sizes = intel_i830_sizes, | ||
1455 | .size_type = FIXED_APER_SIZE, | ||
1456 | .num_aperture_sizes = 4, | ||
1457 | .needs_scratch_page = true, | ||
1458 | .configure = intel_i915_configure, | ||
1459 | .fetch_size = intel_i9xx_fetch_size, | ||
1460 | .cleanup = intel_i915_cleanup, | ||
1461 | .mask_memory = intel_i965_mask_memory, | ||
1462 | .masks = intel_i810_masks, | ||
1463 | .agp_enable = intel_i810_agp_enable, | ||
1464 | .cache_flush = global_cache_flush, | ||
1465 | .create_gatt_table = intel_i965_create_gatt_table, | ||
1466 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1467 | .insert_memory = intel_i915_insert_entries, | ||
1468 | .remove_memory = intel_i915_remove_entries, | ||
1469 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1470 | .free_by_type = intel_i810_free_by_type, | ||
1471 | .agp_alloc_page = agp_generic_alloc_page, | ||
1472 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1473 | .agp_destroy_page = agp_generic_destroy_page, | ||
1474 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1475 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1476 | .chipset_flush = intel_i915_chipset_flush, | ||
1477 | #ifdef USE_PCI_DMA_API | ||
1478 | .agp_map_page = intel_agp_map_page, | ||
1479 | .agp_unmap_page = intel_agp_unmap_page, | ||
1480 | .agp_map_memory = intel_agp_map_memory, | ||
1481 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1482 | #endif | ||
1483 | }; | ||
1484 | |||
1485 | static const struct agp_bridge_driver intel_g33_driver = { | ||
1486 | .owner = THIS_MODULE, | ||
1487 | .aperture_sizes = intel_i830_sizes, | ||
1488 | .size_type = FIXED_APER_SIZE, | ||
1489 | .num_aperture_sizes = 4, | ||
1490 | .needs_scratch_page = true, | ||
1491 | .configure = intel_i915_configure, | ||
1492 | .fetch_size = intel_i9xx_fetch_size, | ||
1493 | .cleanup = intel_i915_cleanup, | ||
1494 | .mask_memory = intel_i965_mask_memory, | ||
1495 | .masks = intel_i810_masks, | ||
1496 | .agp_enable = intel_i810_agp_enable, | ||
1497 | .cache_flush = global_cache_flush, | ||
1498 | .create_gatt_table = intel_i915_create_gatt_table, | ||
1499 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1500 | .insert_memory = intel_i915_insert_entries, | ||
1501 | .remove_memory = intel_i915_remove_entries, | ||
1502 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1503 | .free_by_type = intel_i810_free_by_type, | ||
1504 | .agp_alloc_page = agp_generic_alloc_page, | ||
1505 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1506 | .agp_destroy_page = agp_generic_destroy_page, | ||
1507 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1508 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1509 | .chipset_flush = intel_i915_chipset_flush, | ||
1510 | #ifdef USE_PCI_DMA_API | ||
1511 | .agp_map_page = intel_agp_map_page, | ||
1512 | .agp_unmap_page = intel_agp_unmap_page, | ||
1513 | .agp_map_memory = intel_agp_map_memory, | ||
1514 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1515 | #endif | ||
1516 | }; | ||
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 10f24e349a26..b9734a978186 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c | |||
@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = { | |||
310 | .aperture_sizes = nvidia_generic_sizes, | 310 | .aperture_sizes = nvidia_generic_sizes, |
311 | .size_type = U8_APER_SIZE, | 311 | .size_type = U8_APER_SIZE, |
312 | .num_aperture_sizes = 5, | 312 | .num_aperture_sizes = 5, |
313 | .needs_scratch_page = true, | ||
313 | .configure = nvidia_configure, | 314 | .configure = nvidia_configure, |
314 | .fetch_size = nvidia_fetch_size, | 315 | .fetch_size = nvidia_fetch_size, |
315 | .cleanup = nvidia_cleanup, | 316 | .cleanup = nvidia_cleanup, |
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index 6c3837a0184d..29aacd81de78 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c | |||
@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = { | |||
125 | .aperture_sizes = sis_generic_sizes, | 125 | .aperture_sizes = sis_generic_sizes, |
126 | .size_type = U8_APER_SIZE, | 126 | .size_type = U8_APER_SIZE, |
127 | .num_aperture_sizes = 7, | 127 | .num_aperture_sizes = 7, |
128 | .needs_scratch_page = true, | ||
128 | .configure = sis_configure, | 129 | .configure = sis_configure, |
129 | .fetch_size = sis_fetch_size, | 130 | .fetch_size = sis_fetch_size, |
130 | .cleanup = sis_cleanup, | 131 | .cleanup = sis_cleanup, |
@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = { | |||
415 | .subvendor = PCI_ANY_ID, | 416 | .subvendor = PCI_ANY_ID, |
416 | .subdevice = PCI_ANY_ID, | 417 | .subdevice = PCI_ANY_ID, |
417 | }, | 418 | }, |
418 | { | ||
419 | .class = (PCI_CLASS_BRIDGE_HOST << 8), | ||
420 | .class_mask = ~0, | ||
421 | .vendor = PCI_VENDOR_ID_SI, | ||
422 | .device = PCI_DEVICE_ID_SI_760, | ||
423 | .subvendor = PCI_ANY_ID, | ||
424 | .subdevice = PCI_ANY_ID, | ||
425 | }, | ||
426 | { } | 419 | { } |
427 | }; | 420 | }; |
428 | 421 | ||
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 6f48931ac1ce..95db71360d24 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | static int uninorth_rev; | 29 | static int uninorth_rev; |
30 | static int is_u3; | 30 | static int is_u3; |
31 | static u32 scratch_value; | ||
31 | 32 | ||
32 | #define DEFAULT_APERTURE_SIZE 256 | 33 | #define DEFAULT_APERTURE_SIZE 256 |
33 | #define DEFAULT_APERTURE_STRING "256" | 34 | #define DEFAULT_APERTURE_STRING "256" |
@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty | |||
172 | 173 | ||
173 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; | 174 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; |
174 | for (i = 0; i < mem->page_count; ++i) { | 175 | for (i = 0; i < mem->page_count; ++i) { |
175 | if (gp[i]) { | 176 | if (gp[i] != scratch_value) { |
176 | dev_info(&agp_bridge->dev->dev, | 177 | dev_info(&agp_bridge->dev->dev, |
177 | "uninorth_insert_memory: entry 0x%x occupied (%x)\n", | 178 | "uninorth_insert_memory: entry 0x%x occupied (%x)\n", |
178 | i, gp[i]); | 179 | i, gp[i]); |
@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
214 | return 0; | 215 | return 0; |
215 | 216 | ||
216 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; | 217 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; |
217 | for (i = 0; i < mem->page_count; ++i) | 218 | for (i = 0; i < mem->page_count; ++i) { |
218 | gp[i] = 0; | 219 | gp[i] = scratch_value; |
220 | } | ||
219 | mb(); | 221 | mb(); |
220 | uninorth_tlbflush(mem); | 222 | uninorth_tlbflush(mem); |
221 | 223 | ||
@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) | |||
421 | 423 | ||
422 | bridge->gatt_bus_addr = virt_to_phys(table); | 424 | bridge->gatt_bus_addr = virt_to_phys(table); |
423 | 425 | ||
426 | if (is_u3) | ||
427 | scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL; | ||
428 | else | ||
429 | scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) | | ||
430 | 0x1UL); | ||
424 | for (i = 0; i < num_entries; i++) | 431 | for (i = 0; i < num_entries; i++) |
425 | bridge->gatt_table[i] = 0; | 432 | bridge->gatt_table[i] = scratch_value; |
426 | 433 | ||
427 | return 0; | 434 | return 0; |
428 | 435 | ||
@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = { | |||
519 | .agp_destroy_pages = agp_generic_destroy_pages, | 526 | .agp_destroy_pages = agp_generic_destroy_pages, |
520 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 527 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
521 | .cant_use_aperture = true, | 528 | .cant_use_aperture = true, |
529 | .needs_scratch_page = true, | ||
522 | }; | 530 | }; |
523 | 531 | ||
524 | const struct agp_bridge_driver u3_agp_driver = { | 532 | const struct agp_bridge_driver u3_agp_driver = { |
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index d3bd243867fc..df67e80019d2 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c | |||
@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = { | |||
175 | .aperture_sizes = agp3_generic_sizes, | 175 | .aperture_sizes = agp3_generic_sizes, |
176 | .size_type = U8_APER_SIZE, | 176 | .size_type = U8_APER_SIZE, |
177 | .num_aperture_sizes = 10, | 177 | .num_aperture_sizes = 10, |
178 | .needs_scratch_page = true, | ||
178 | .configure = via_configure_agp3, | 179 | .configure = via_configure_agp3, |
179 | .fetch_size = via_fetch_size_agp3, | 180 | .fetch_size = via_fetch_size_agp3, |
180 | .cleanup = via_cleanup_agp3, | 181 | .cleanup = via_cleanup_agp3, |
@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = { | |||
201 | .aperture_sizes = via_generic_sizes, | 202 | .aperture_sizes = via_generic_sizes, |
202 | .size_type = U8_APER_SIZE, | 203 | .size_type = U8_APER_SIZE, |
203 | .num_aperture_sizes = 9, | 204 | .num_aperture_sizes = 9, |
205 | .needs_scratch_page = true, | ||
204 | .configure = via_configure, | 206 | .configure = via_configure, |
205 | .fetch_size = via_fetch_size, | 207 | .fetch_size = via_fetch_size, |
206 | .cleanup = via_cleanup, | 208 | .cleanup = via_cleanup, |
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 56b27671adc4..4f8d60c25a98 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c | |||
@@ -84,6 +84,7 @@ static char *serial_version = "4.30"; | |||
84 | #include <linux/smp_lock.h> | 84 | #include <linux/smp_lock.h> |
85 | #include <linux/init.h> | 85 | #include <linux/init.h> |
86 | #include <linux/bitops.h> | 86 | #include <linux/bitops.h> |
87 | #include <linux/platform_device.h> | ||
87 | 88 | ||
88 | #include <asm/setup.h> | 89 | #include <asm/setup.h> |
89 | 90 | ||
@@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = { | |||
1954 | /* | 1955 | /* |
1955 | * The serial driver boot-time initialization code! | 1956 | * The serial driver boot-time initialization code! |
1956 | */ | 1957 | */ |
1957 | static int __init rs_init(void) | 1958 | static int __init amiga_serial_probe(struct platform_device *pdev) |
1958 | { | 1959 | { |
1959 | unsigned long flags; | 1960 | unsigned long flags; |
1960 | struct serial_state * state; | 1961 | struct serial_state * state; |
1961 | int error; | 1962 | int error; |
1962 | 1963 | ||
1963 | if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL)) | ||
1964 | return -ENODEV; | ||
1965 | |||
1966 | serial_driver = alloc_tty_driver(1); | 1964 | serial_driver = alloc_tty_driver(1); |
1967 | if (!serial_driver) | 1965 | if (!serial_driver) |
1968 | return -ENOMEM; | 1966 | return -ENOMEM; |
1969 | 1967 | ||
1970 | /* | ||
1971 | * We request SERDAT and SERPER only, because the serial registers are | ||
1972 | * too spreaded over the custom register space | ||
1973 | */ | ||
1974 | if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4, | ||
1975 | "amiserial [Paula]")) { | ||
1976 | error = -EBUSY; | ||
1977 | goto fail_put_tty_driver; | ||
1978 | } | ||
1979 | |||
1980 | IRQ_ports = NULL; | 1968 | IRQ_ports = NULL; |
1981 | 1969 | ||
1982 | show_serial_version(); | 1970 | show_serial_version(); |
@@ -1998,7 +1986,7 @@ static int __init rs_init(void) | |||
1998 | 1986 | ||
1999 | error = tty_register_driver(serial_driver); | 1987 | error = tty_register_driver(serial_driver); |
2000 | if (error) | 1988 | if (error) |
2001 | goto fail_release_mem_region; | 1989 | goto fail_put_tty_driver; |
2002 | 1990 | ||
2003 | state = rs_table; | 1991 | state = rs_table; |
2004 | state->magic = SSTATE_MAGIC; | 1992 | state->magic = SSTATE_MAGIC; |
@@ -2050,23 +2038,24 @@ static int __init rs_init(void) | |||
2050 | ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ | 2038 | ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ |
2051 | ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ | 2039 | ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ |
2052 | 2040 | ||
2041 | platform_set_drvdata(pdev, state); | ||
2042 | |||
2053 | return 0; | 2043 | return 0; |
2054 | 2044 | ||
2055 | fail_free_irq: | 2045 | fail_free_irq: |
2056 | free_irq(IRQ_AMIGA_TBE, state); | 2046 | free_irq(IRQ_AMIGA_TBE, state); |
2057 | fail_unregister: | 2047 | fail_unregister: |
2058 | tty_unregister_driver(serial_driver); | 2048 | tty_unregister_driver(serial_driver); |
2059 | fail_release_mem_region: | ||
2060 | release_mem_region(CUSTOM_PHYSADDR+0x30, 4); | ||
2061 | fail_put_tty_driver: | 2049 | fail_put_tty_driver: |
2062 | put_tty_driver(serial_driver); | 2050 | put_tty_driver(serial_driver); |
2063 | return error; | 2051 | return error; |
2064 | } | 2052 | } |
2065 | 2053 | ||
2066 | static __exit void rs_exit(void) | 2054 | static int __exit amiga_serial_remove(struct platform_device *pdev) |
2067 | { | 2055 | { |
2068 | int error; | 2056 | int error; |
2069 | struct async_struct *info = rs_table[0].info; | 2057 | struct serial_state *state = platform_get_drvdata(pdev); |
2058 | struct async_struct *info = state->info; | ||
2070 | 2059 | ||
2071 | /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ | 2060 | /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ |
2072 | tasklet_kill(&info->tlet); | 2061 | tasklet_kill(&info->tlet); |
@@ -2075,19 +2064,38 @@ static __exit void rs_exit(void) | |||
2075 | error); | 2064 | error); |
2076 | put_tty_driver(serial_driver); | 2065 | put_tty_driver(serial_driver); |
2077 | 2066 | ||
2078 | if (info) { | 2067 | rs_table[0].info = NULL; |
2079 | rs_table[0].info = NULL; | 2068 | kfree(info); |
2080 | kfree(info); | ||
2081 | } | ||
2082 | 2069 | ||
2083 | free_irq(IRQ_AMIGA_TBE, rs_table); | 2070 | free_irq(IRQ_AMIGA_TBE, rs_table); |
2084 | free_irq(IRQ_AMIGA_RBF, rs_table); | 2071 | free_irq(IRQ_AMIGA_RBF, rs_table); |
2085 | 2072 | ||
2086 | release_mem_region(CUSTOM_PHYSADDR+0x30, 4); | 2073 | platform_set_drvdata(pdev, NULL); |
2074 | |||
2075 | return error; | ||
2076 | } | ||
2077 | |||
2078 | static struct platform_driver amiga_serial_driver = { | ||
2079 | .remove = __exit_p(amiga_serial_remove), | ||
2080 | .driver = { | ||
2081 | .name = "amiga-serial", | ||
2082 | .owner = THIS_MODULE, | ||
2083 | }, | ||
2084 | }; | ||
2085 | |||
2086 | static int __init amiga_serial_init(void) | ||
2087 | { | ||
2088 | return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe); | ||
2089 | } | ||
2090 | |||
2091 | module_init(amiga_serial_init); | ||
2092 | |||
2093 | static void __exit amiga_serial_exit(void) | ||
2094 | { | ||
2095 | platform_driver_unregister(&amiga_serial_driver); | ||
2087 | } | 2096 | } |
2088 | 2097 | ||
2089 | module_init(rs_init) | 2098 | module_exit(amiga_serial_exit); |
2090 | module_exit(rs_exit) | ||
2091 | 2099 | ||
2092 | 2100 | ||
2093 | #if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) | 2101 | #if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) |
@@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init); | |||
2154 | #endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ | 2162 | #endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ |
2155 | 2163 | ||
2156 | MODULE_LICENSE("GPL"); | 2164 | MODULE_LICENSE("GPL"); |
2165 | MODULE_ALIAS("platform:amiga-serial"); | ||
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index 4f568cb9af3f..033e1505fca9 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c | |||
@@ -265,8 +265,8 @@ static unsigned int apm_poll(struct file *fp, poll_table * wait) | |||
265 | * Only when everyone who has opened /dev/apm_bios with write permission | 265 | * Only when everyone who has opened /dev/apm_bios with write permission |
266 | * has acknowledge does the actual suspend happen. | 266 | * has acknowledge does the actual suspend happen. |
267 | */ | 267 | */ |
268 | static int | 268 | static long |
269 | apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) | 269 | apm_ioctl(struct file *filp, u_int cmd, u_long arg) |
270 | { | 270 | { |
271 | struct apm_user *as = filp->private_data; | 271 | struct apm_user *as = filp->private_data; |
272 | int err = -EINVAL; | 272 | int err = -EINVAL; |
@@ -274,6 +274,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) | |||
274 | if (!as->suser || !as->writer) | 274 | if (!as->suser || !as->writer) |
275 | return -EPERM; | 275 | return -EPERM; |
276 | 276 | ||
277 | lock_kernel(); | ||
277 | switch (cmd) { | 278 | switch (cmd) { |
278 | case APM_IOC_SUSPEND: | 279 | case APM_IOC_SUSPEND: |
279 | mutex_lock(&state_lock); | 280 | mutex_lock(&state_lock); |
@@ -334,6 +335,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) | |||
334 | mutex_unlock(&state_lock); | 335 | mutex_unlock(&state_lock); |
335 | break; | 336 | break; |
336 | } | 337 | } |
338 | unlock_kernel(); | ||
337 | 339 | ||
338 | return err; | 340 | return err; |
339 | } | 341 | } |
@@ -397,7 +399,7 @@ static const struct file_operations apm_bios_fops = { | |||
397 | .owner = THIS_MODULE, | 399 | .owner = THIS_MODULE, |
398 | .read = apm_read, | 400 | .read = apm_read, |
399 | .poll = apm_poll, | 401 | .poll = apm_poll, |
400 | .ioctl = apm_ioctl, | 402 | .unlocked_ioctl = apm_ioctl, |
401 | .open = apm_open, | 403 | .open = apm_open, |
402 | .release = apm_release, | 404 | .release = apm_release, |
403 | }; | 405 | }; |
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index a7424bf7eacf..f4ae0e0fb631 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/smp_lock.h> | ||
29 | #include <linux/miscdevice.h> | 30 | #include <linux/miscdevice.h> |
30 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
31 | #include <linux/wait.h> | 32 | #include <linux/wait.h> |
@@ -106,8 +107,7 @@ static unsigned int DeviceErrorCount; /* number of device error */ | |||
106 | 107 | ||
107 | static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *); | 108 | static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *); |
108 | static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *); | 109 | static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *); |
109 | static int ac_ioctl(struct inode *, struct file *, unsigned int, | 110 | static long ac_ioctl(struct file *, unsigned int, unsigned long); |
110 | unsigned long); | ||
111 | static irqreturn_t ac_interrupt(int, void *); | 111 | static irqreturn_t ac_interrupt(int, void *); |
112 | 112 | ||
113 | static const struct file_operations ac_fops = { | 113 | static const struct file_operations ac_fops = { |
@@ -115,7 +115,7 @@ static const struct file_operations ac_fops = { | |||
115 | .llseek = no_llseek, | 115 | .llseek = no_llseek, |
116 | .read = ac_read, | 116 | .read = ac_read, |
117 | .write = ac_write, | 117 | .write = ac_write, |
118 | .ioctl = ac_ioctl, | 118 | .unlocked_ioctl = ac_ioctl, |
119 | }; | 119 | }; |
120 | 120 | ||
121 | static struct miscdevice ac_miscdev = { | 121 | static struct miscdevice ac_miscdev = { |
@@ -689,7 +689,7 @@ static irqreturn_t ac_interrupt(int vec, void *dev_instance) | |||
689 | 689 | ||
690 | 690 | ||
691 | 691 | ||
692 | static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | 692 | static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
693 | 693 | ||
694 | { /* @ ADG ou ATO selon le cas */ | 694 | { /* @ ADG ou ATO selon le cas */ |
695 | int i; | 695 | int i; |
@@ -703,15 +703,11 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un | |||
703 | /* In general, the device is only openable by root anyway, so we're not | 703 | /* In general, the device is only openable by root anyway, so we're not |
704 | particularly concerned that bogus ioctls can flood the console. */ | 704 | particularly concerned that bogus ioctls can flood the console. */ |
705 | 705 | ||
706 | adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL); | 706 | adgl = memdup_user(argp, sizeof(struct st_ram_io)); |
707 | if (!adgl) | 707 | if (IS_ERR(adgl)) |
708 | return -ENOMEM; | 708 | return PTR_ERR(adgl); |
709 | 709 | ||
710 | if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) { | 710 | lock_kernel(); |
711 | kfree(adgl); | ||
712 | return -EFAULT; | ||
713 | } | ||
714 | |||
715 | IndexCard = adgl->num_card-1; | 711 | IndexCard = adgl->num_card-1; |
716 | 712 | ||
717 | if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { | 713 | if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { |
@@ -721,6 +717,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un | |||
721 | warncount--; | 717 | warncount--; |
722 | } | 718 | } |
723 | kfree(adgl); | 719 | kfree(adgl); |
720 | unlock_kernel(); | ||
724 | return -EINVAL; | 721 | return -EINVAL; |
725 | } | 722 | } |
726 | 723 | ||
@@ -838,6 +835,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un | |||
838 | } | 835 | } |
839 | Dummy = readb(apbs[IndexCard].RamIO + VERS); | 836 | Dummy = readb(apbs[IndexCard].RamIO + VERS); |
840 | kfree(adgl); | 837 | kfree(adgl); |
838 | unlock_kernel(); | ||
841 | return 0; | 839 | return 0; |
842 | } | 840 | } |
843 | 841 | ||
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c index 61f0146e215d..dbee8688f75c 100644 --- a/drivers/char/ds1620.c +++ b/drivers/char/ds1620.c | |||
@@ -232,7 +232,7 @@ ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) | |||
232 | } | 232 | } |
233 | 233 | ||
234 | static int | 234 | static int |
235 | ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | 235 | ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
236 | { | 236 | { |
237 | struct therm therm; | 237 | struct therm therm; |
238 | union { | 238 | union { |
@@ -316,6 +316,18 @@ ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned | |||
316 | return 0; | 316 | return 0; |
317 | } | 317 | } |
318 | 318 | ||
319 | static long | ||
320 | ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
321 | { | ||
322 | int ret; | ||
323 | |||
324 | lock_kernel(); | ||
325 | ret = ds1620_ioctl(file, cmd, arg); | ||
326 | unlock_kernel(); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
319 | #ifdef THERM_USE_PROC | 331 | #ifdef THERM_USE_PROC |
320 | static int | 332 | static int |
321 | proc_therm_ds1620_read(char *buf, char **start, off_t offset, | 333 | proc_therm_ds1620_read(char *buf, char **start, off_t offset, |
@@ -344,7 +356,7 @@ static const struct file_operations ds1620_fops = { | |||
344 | .owner = THIS_MODULE, | 356 | .owner = THIS_MODULE, |
345 | .open = ds1620_open, | 357 | .open = ds1620_open, |
346 | .read = ds1620_read, | 358 | .read = ds1620_read, |
347 | .ioctl = ds1620_ioctl, | 359 | .unlocked_ioctl = ds1620_unlocked_ioctl, |
348 | }; | 360 | }; |
349 | 361 | ||
350 | static struct miscdevice ds1620_miscdev = { | 362 | static struct miscdevice ds1620_miscdev = { |
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 045c930e6320..e3859d4eaead 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c | |||
@@ -93,8 +93,8 @@ static ssize_t dtlk_write(struct file *, const char __user *, | |||
93 | static unsigned int dtlk_poll(struct file *, poll_table *); | 93 | static unsigned int dtlk_poll(struct file *, poll_table *); |
94 | static int dtlk_open(struct inode *, struct file *); | 94 | static int dtlk_open(struct inode *, struct file *); |
95 | static int dtlk_release(struct inode *, struct file *); | 95 | static int dtlk_release(struct inode *, struct file *); |
96 | static int dtlk_ioctl(struct inode *inode, struct file *file, | 96 | static long dtlk_ioctl(struct file *file, |
97 | unsigned int cmd, unsigned long arg); | 97 | unsigned int cmd, unsigned long arg); |
98 | 98 | ||
99 | static const struct file_operations dtlk_fops = | 99 | static const struct file_operations dtlk_fops = |
100 | { | 100 | { |
@@ -102,7 +102,7 @@ static const struct file_operations dtlk_fops = | |||
102 | .read = dtlk_read, | 102 | .read = dtlk_read, |
103 | .write = dtlk_write, | 103 | .write = dtlk_write, |
104 | .poll = dtlk_poll, | 104 | .poll = dtlk_poll, |
105 | .ioctl = dtlk_ioctl, | 105 | .unlocked_ioctl = dtlk_ioctl, |
106 | .open = dtlk_open, | 106 | .open = dtlk_open, |
107 | .release = dtlk_release, | 107 | .release = dtlk_release, |
108 | }; | 108 | }; |
@@ -263,10 +263,9 @@ static void dtlk_timer_tick(unsigned long data) | |||
263 | wake_up_interruptible(&dtlk_process_list); | 263 | wake_up_interruptible(&dtlk_process_list); |
264 | } | 264 | } |
265 | 265 | ||
266 | static int dtlk_ioctl(struct inode *inode, | 266 | static long dtlk_ioctl(struct file *file, |
267 | struct file *file, | 267 | unsigned int cmd, |
268 | unsigned int cmd, | 268 | unsigned long arg) |
269 | unsigned long arg) | ||
270 | { | 269 | { |
271 | char __user *argp = (char __user *)arg; | 270 | char __user *argp = (char __user *)arg; |
272 | struct dtlk_settings *sp; | 271 | struct dtlk_settings *sp; |
@@ -276,7 +275,9 @@ static int dtlk_ioctl(struct inode *inode, | |||
276 | switch (cmd) { | 275 | switch (cmd) { |
277 | 276 | ||
278 | case DTLK_INTERROGATE: | 277 | case DTLK_INTERROGATE: |
278 | lock_kernel(); | ||
279 | sp = dtlk_interrogate(); | 279 | sp = dtlk_interrogate(); |
280 | unlock_kernel(); | ||
280 | if (copy_to_user(argp, sp, sizeof(struct dtlk_settings))) | 281 | if (copy_to_user(argp, sp, sizeof(struct dtlk_settings))) |
281 | return -EINVAL; | 282 | return -EINVAL; |
282 | return 0; | 283 | return 0; |
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c index fda4181b5e67..82b5a88a82d7 100644 --- a/drivers/char/generic_nvram.c +++ b/drivers/char/generic_nvram.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/miscdevice.h> | 19 | #include <linux/miscdevice.h> |
20 | #include <linux/fcntl.h> | 20 | #include <linux/fcntl.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/smp_lock.h> | ||
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | #include <asm/nvram.h> | 24 | #include <asm/nvram.h> |
24 | #ifdef CONFIG_PPC_PMAC | 25 | #ifdef CONFIG_PPC_PMAC |
@@ -84,8 +85,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf, | |||
84 | return p - buf; | 85 | return p - buf; |
85 | } | 86 | } |
86 | 87 | ||
87 | static int nvram_ioctl(struct inode *inode, struct file *file, | 88 | static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
88 | unsigned int cmd, unsigned long arg) | ||
89 | { | 89 | { |
90 | switch(cmd) { | 90 | switch(cmd) { |
91 | #ifdef CONFIG_PPC_PMAC | 91 | #ifdef CONFIG_PPC_PMAC |
@@ -116,12 +116,23 @@ static int nvram_ioctl(struct inode *inode, struct file *file, | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
120 | { | ||
121 | int ret; | ||
122 | |||
123 | lock_kernel(); | ||
124 | ret = nvram_ioctl(file, cmd, arg); | ||
125 | unlock_kernel(); | ||
126 | |||
127 | return ret; | ||
128 | } | ||
129 | |||
119 | const struct file_operations nvram_fops = { | 130 | const struct file_operations nvram_fops = { |
120 | .owner = THIS_MODULE, | 131 | .owner = THIS_MODULE, |
121 | .llseek = nvram_llseek, | 132 | .llseek = nvram_llseek, |
122 | .read = read_nvram, | 133 | .read = read_nvram, |
123 | .write = write_nvram, | 134 | .write = write_nvram, |
124 | .ioctl = nvram_ioctl, | 135 | .unlocked_ioctl = nvram_unlocked_ioctl, |
125 | }; | 136 | }; |
126 | 137 | ||
127 | static struct miscdevice nvram_dev = { | 138 | static struct miscdevice nvram_dev = { |
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c index 31e7c91c2d9d..b6c2cc167c11 100644 --- a/drivers/char/genrtc.c +++ b/drivers/char/genrtc.c | |||
@@ -262,7 +262,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit) | |||
262 | #endif | 262 | #endif |
263 | } | 263 | } |
264 | 264 | ||
265 | static int gen_rtc_ioctl(struct inode *inode, struct file *file, | 265 | static int gen_rtc_ioctl(struct file *file, |
266 | unsigned int cmd, unsigned long arg) | 266 | unsigned int cmd, unsigned long arg) |
267 | { | 267 | { |
268 | struct rtc_time wtime; | 268 | struct rtc_time wtime; |
@@ -332,6 +332,18 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file, | |||
332 | return -EINVAL; | 332 | return -EINVAL; |
333 | } | 333 | } |
334 | 334 | ||
335 | static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd, | ||
336 | unsigned long arg) | ||
337 | { | ||
338 | int ret; | ||
339 | |||
340 | lock_kernel(); | ||
341 | ret = gen_rtc_ioctl(file, cmd, arg); | ||
342 | unlock_kernel(); | ||
343 | |||
344 | return ret; | ||
345 | } | ||
346 | |||
335 | /* | 347 | /* |
336 | * We enforce only one user at a time here with the open/close. | 348 | * We enforce only one user at a time here with the open/close. |
337 | * Also clear the previous interrupt data on an open, and clean | 349 | * Also clear the previous interrupt data on an open, and clean |
@@ -482,7 +494,7 @@ static const struct file_operations gen_rtc_fops = { | |||
482 | .read = gen_rtc_read, | 494 | .read = gen_rtc_read, |
483 | .poll = gen_rtc_poll, | 495 | .poll = gen_rtc_poll, |
484 | #endif | 496 | #endif |
485 | .ioctl = gen_rtc_ioctl, | 497 | .unlocked_ioctl = gen_rtc_unlocked_ioctl, |
486 | .open = gen_rtc_open, | 498 | .open = gen_rtc_open, |
487 | .release = gen_rtc_release, | 499 | .release = gen_rtc_release, |
488 | }; | 500 | }; |
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 712d9f271aa6..e0249722d25f 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c | |||
@@ -49,8 +49,9 @@ | |||
49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
50 | #include <linux/sysrq.h> | 50 | #include <linux/sysrq.h> |
51 | #include <linux/timer.h> | 51 | #include <linux/timer.h> |
52 | #include <linux/time.h> | ||
52 | 53 | ||
53 | #define VERSION_STR "0.9.0" | 54 | #define VERSION_STR "0.9.1" |
54 | 55 | ||
55 | #define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */ | 56 | #define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */ |
56 | #define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */ | 57 | #define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */ |
@@ -119,10 +120,8 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); | |||
119 | #if defined(CONFIG_S390) | 120 | #if defined(CONFIG_S390) |
120 | # define HAVE_MONOTONIC | 121 | # define HAVE_MONOTONIC |
121 | # define TIMER_FREQ 1000000000ULL | 122 | # define TIMER_FREQ 1000000000ULL |
122 | #elif defined(CONFIG_IA64) | ||
123 | # define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq) | ||
124 | #else | 123 | #else |
125 | # define TIMER_FREQ (HZ*loops_per_jiffy) | 124 | # define TIMER_FREQ 1000000000ULL |
126 | #endif | 125 | #endif |
127 | 126 | ||
128 | #ifdef HAVE_MONOTONIC | 127 | #ifdef HAVE_MONOTONIC |
@@ -130,7 +129,9 @@ extern unsigned long long monotonic_clock(void); | |||
130 | #else | 129 | #else |
131 | static inline unsigned long long monotonic_clock(void) | 130 | static inline unsigned long long monotonic_clock(void) |
132 | { | 131 | { |
133 | return get_cycles(); | 132 | struct timespec ts; |
133 | getrawmonotonic(&ts); | ||
134 | return timespec_to_ns(&ts); | ||
134 | } | 135 | } |
135 | #endif /* HAVE_MONOTONIC */ | 136 | #endif /* HAVE_MONOTONIC */ |
136 | 137 | ||
@@ -168,6 +169,13 @@ static void hangcheck_fire(unsigned long data) | |||
168 | printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); | 169 | printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); |
169 | } | 170 | } |
170 | } | 171 | } |
172 | #if 0 | ||
173 | /* | ||
174 | * Enable to investigate delays in detail | ||
175 | */ | ||
176 | printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n", | ||
177 | tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); | ||
178 | #endif | ||
171 | mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); | 179 | mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); |
172 | hangcheck_tsc = monotonic_clock(); | 180 | hangcheck_tsc = monotonic_clock(); |
173 | } | 181 | } |
@@ -180,7 +188,7 @@ static int __init hangcheck_init(void) | |||
180 | #if defined (HAVE_MONOTONIC) | 188 | #if defined (HAVE_MONOTONIC) |
181 | printk("Hangcheck: Using monotonic_clock().\n"); | 189 | printk("Hangcheck: Using monotonic_clock().\n"); |
182 | #else | 190 | #else |
183 | printk("Hangcheck: Using get_cycles().\n"); | 191 | printk("Hangcheck: Using getrawmonotonic().\n"); |
184 | #endif /* HAVE_MONOTONIC */ | 192 | #endif /* HAVE_MONOTONIC */ |
185 | hangcheck_tsc_margin = | 193 | hangcheck_tsc_margin = |
186 | (unsigned long long)(hangcheck_margin + hangcheck_tick); | 194 | (unsigned long long)(hangcheck_margin + hangcheck_tick); |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 9ded667625ac..a0a1829d3198 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -431,14 +431,18 @@ static int hpet_release(struct inode *inode, struct file *file) | |||
431 | 431 | ||
432 | static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); | 432 | static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); |
433 | 433 | ||
434 | static int | 434 | static long hpet_ioctl(struct file *file, unsigned int cmd, |
435 | hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | 435 | unsigned long arg) |
436 | unsigned long arg) | ||
437 | { | 436 | { |
438 | struct hpet_dev *devp; | 437 | struct hpet_dev *devp; |
438 | int ret; | ||
439 | 439 | ||
440 | devp = file->private_data; | 440 | devp = file->private_data; |
441 | return hpet_ioctl_common(devp, cmd, arg, 0); | 441 | lock_kernel(); |
442 | ret = hpet_ioctl_common(devp, cmd, arg, 0); | ||
443 | unlock_kernel(); | ||
444 | |||
445 | return ret; | ||
442 | } | 446 | } |
443 | 447 | ||
444 | static int hpet_ioctl_ieon(struct hpet_dev *devp) | 448 | static int hpet_ioctl_ieon(struct hpet_dev *devp) |
@@ -654,7 +658,7 @@ static const struct file_operations hpet_fops = { | |||
654 | .llseek = no_llseek, | 658 | .llseek = no_llseek, |
655 | .read = hpet_read, | 659 | .read = hpet_read, |
656 | .poll = hpet_poll, | 660 | .poll = hpet_poll, |
657 | .ioctl = hpet_ioctl, | 661 | .unlocked_ioctl = hpet_ioctl, |
658 | .open = hpet_open, | 662 | .open = hpet_open, |
659 | .release = hpet_release, | 663 | .release = hpet_release, |
660 | .fasync = hpet_fasync, | 664 | .fasync = hpet_fasync, |
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 793b236c9266..d4b14ff1c4c1 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c | |||
@@ -194,10 +194,8 @@ static inline void print_state(struct hvsi_struct *hp) | |||
194 | "HVSI_WAIT_FOR_MCTRL_RESPONSE", | 194 | "HVSI_WAIT_FOR_MCTRL_RESPONSE", |
195 | "HVSI_FSP_DIED", | 195 | "HVSI_FSP_DIED", |
196 | }; | 196 | }; |
197 | const char *name = state_names[hp->state]; | 197 | const char *name = (hp->state < ARRAY_SIZE(state_names)) |
198 | 198 | ? state_names[hp->state] : "UNKNOWN"; | |
199 | if (hp->state > ARRAY_SIZE(state_names)) | ||
200 | name = "UNKNOWN"; | ||
201 | 199 | ||
202 | pr_debug("hvsi%i: state = %s\n", hp->index, name); | 200 | pr_debug("hvsi%i: state = %s\n", hp->index, name); |
203 | #endif /* DEBUG */ | 201 | #endif /* DEBUG */ |
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 10f868eefaa6..0f9cbf1aaf15 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c | |||
@@ -660,7 +660,7 @@ static int __devinit n2rng_probe(struct of_device *op, | |||
660 | np->hvapi_major); | 660 | np->hvapi_major); |
661 | goto out_hvapi_unregister; | 661 | goto out_hvapi_unregister; |
662 | } | 662 | } |
663 | np->num_units = of_getintprop_default(op->node, | 663 | np->num_units = of_getintprop_default(op->dev.of_node, |
664 | "rng-#units", 0); | 664 | "rng-#units", 0); |
665 | if (!np->num_units) { | 665 | if (!np->num_units) { |
666 | dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); | 666 | dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); |
@@ -751,8 +751,11 @@ static const struct of_device_id n2rng_match[] = { | |||
751 | MODULE_DEVICE_TABLE(of, n2rng_match); | 751 | MODULE_DEVICE_TABLE(of, n2rng_match); |
752 | 752 | ||
753 | static struct of_platform_driver n2rng_driver = { | 753 | static struct of_platform_driver n2rng_driver = { |
754 | .name = "n2rng", | 754 | .driver = { |
755 | .match_table = n2rng_match, | 755 | .name = "n2rng", |
756 | .owner = THIS_MODULE, | ||
757 | .of_match_table = n2rng_match, | ||
758 | }, | ||
756 | .probe = n2rng_probe, | 759 | .probe = n2rng_probe, |
757 | .remove = __devexit_p(n2rng_remove), | 760 | .remove = __devexit_p(n2rng_remove), |
758 | }; | 761 | }; |
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index a8b4c4010144..a348c7e9aa0b 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c | |||
@@ -15,6 +15,10 @@ | |||
15 | #include <linux/amba/bus.h> | 15 | #include <linux/amba/bus.h> |
16 | #include <linux/hw_random.h> | 16 | #include <linux/hw_random.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/clk.h> | ||
19 | #include <linux/err.h> | ||
20 | |||
21 | static struct clk *rng_clk; | ||
18 | 22 | ||
19 | static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) | 23 | static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) |
20 | { | 24 | { |
@@ -40,6 +44,15 @@ static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id) | |||
40 | void __iomem *base; | 44 | void __iomem *base; |
41 | int ret; | 45 | int ret; |
42 | 46 | ||
47 | rng_clk = clk_get(&dev->dev, NULL); | ||
48 | if (IS_ERR(rng_clk)) { | ||
49 | dev_err(&dev->dev, "could not get rng clock\n"); | ||
50 | ret = PTR_ERR(rng_clk); | ||
51 | return ret; | ||
52 | } | ||
53 | |||
54 | clk_enable(rng_clk); | ||
55 | |||
43 | ret = amba_request_regions(dev, dev->dev.init_name); | 56 | ret = amba_request_regions(dev, dev->dev.init_name); |
44 | if (ret) | 57 | if (ret) |
45 | return ret; | 58 | return ret; |
@@ -57,6 +70,8 @@ out_unmap: | |||
57 | iounmap(base); | 70 | iounmap(base); |
58 | out_release: | 71 | out_release: |
59 | amba_release_regions(dev); | 72 | amba_release_regions(dev); |
73 | clk_disable(rng_clk); | ||
74 | clk_put(rng_clk); | ||
60 | return ret; | 75 | return ret; |
61 | } | 76 | } |
62 | 77 | ||
@@ -66,6 +81,8 @@ static int nmk_rng_remove(struct amba_device *dev) | |||
66 | hwrng_unregister(&nmk_rng); | 81 | hwrng_unregister(&nmk_rng); |
67 | iounmap(base); | 82 | iounmap(base); |
68 | amba_release_regions(dev); | 83 | amba_release_regions(dev); |
84 | clk_disable(rng_clk); | ||
85 | clk_put(rng_clk); | ||
69 | return 0; | 86 | return 0; |
70 | } | 87 | } |
71 | 88 | ||
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index 7fa61dd1d9d9..261ba8f22b8b 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c | |||
@@ -98,7 +98,7 @@ static int __devinit rng_probe(struct of_device *ofdev, | |||
98 | const struct of_device_id *match) | 98 | const struct of_device_id *match) |
99 | { | 99 | { |
100 | void __iomem *rng_regs; | 100 | void __iomem *rng_regs; |
101 | struct device_node *rng_np = ofdev->node; | 101 | struct device_node *rng_np = ofdev->dev.of_node; |
102 | struct resource res; | 102 | struct resource res; |
103 | int err = 0; | 103 | int err = 0; |
104 | 104 | ||
@@ -140,8 +140,11 @@ static struct of_device_id rng_match[] = { | |||
140 | }; | 140 | }; |
141 | 141 | ||
142 | static struct of_platform_driver rng_driver = { | 142 | static struct of_platform_driver rng_driver = { |
143 | .name = "pasemi-rng", | 143 | .driver = { |
144 | .match_table = rng_match, | 144 | .name = "pasemi-rng", |
145 | .owner = THIS_MODULE, | ||
146 | .of_match_table = rng_match, | ||
147 | }, | ||
145 | .probe = rng_probe, | 148 | .probe = rng_probe, |
146 | .remove = rng_remove, | 149 | .remove = rng_remove, |
147 | }; | 150 | }; |
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 64fe0a793efd..75f1cbd61c17 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c | |||
@@ -32,7 +32,7 @@ static bool busy; | |||
32 | static void random_recv_done(struct virtqueue *vq) | 32 | static void random_recv_done(struct virtqueue *vq) |
33 | { | 33 | { |
34 | /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ | 34 | /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ |
35 | if (!vq->vq_ops->get_buf(vq, &data_avail)) | 35 | if (!virtqueue_get_buf(vq, &data_avail)) |
36 | return; | 36 | return; |
37 | 37 | ||
38 | complete(&have_data); | 38 | complete(&have_data); |
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size) | |||
46 | sg_init_one(&sg, buf, size); | 46 | sg_init_one(&sg, buf, size); |
47 | 47 | ||
48 | /* There should always be room for one buffer. */ | 48 | /* There should always be room for one buffer. */ |
49 | if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0) | 49 | if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0) |
50 | BUG(); | 50 | BUG(); |
51 | 51 | ||
52 | vq->vq_ops->kick(vq); | 52 | virtqueue_kick(vq); |
53 | } | 53 | } |
54 | 54 | ||
55 | static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) | 55 | static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) |
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 65545de3dbf4..d8ec92a38980 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c | |||
@@ -228,8 +228,7 @@ static int handle_send_req(ipmi_user_t user, | |||
228 | return rv; | 228 | return rv; |
229 | } | 229 | } |
230 | 230 | ||
231 | static int ipmi_ioctl(struct inode *inode, | 231 | static int ipmi_ioctl(struct file *file, |
232 | struct file *file, | ||
233 | unsigned int cmd, | 232 | unsigned int cmd, |
234 | unsigned long data) | 233 | unsigned long data) |
235 | { | 234 | { |
@@ -630,6 +629,23 @@ static int ipmi_ioctl(struct inode *inode, | |||
630 | return rv; | 629 | return rv; |
631 | } | 630 | } |
632 | 631 | ||
632 | /* | ||
633 | * Note: it doesn't make sense to take the BKL here but | ||
634 | * not in compat_ipmi_ioctl. -arnd | ||
635 | */ | ||
636 | static long ipmi_unlocked_ioctl(struct file *file, | ||
637 | unsigned int cmd, | ||
638 | unsigned long data) | ||
639 | { | ||
640 | int ret; | ||
641 | |||
642 | lock_kernel(); | ||
643 | ret = ipmi_ioctl(file, cmd, data); | ||
644 | unlock_kernel(); | ||
645 | |||
646 | return ret; | ||
647 | } | ||
648 | |||
633 | #ifdef CONFIG_COMPAT | 649 | #ifdef CONFIG_COMPAT |
634 | 650 | ||
635 | /* | 651 | /* |
@@ -802,7 +818,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, | |||
802 | if (copy_to_user(precv64, &recv64, sizeof(recv64))) | 818 | if (copy_to_user(precv64, &recv64, sizeof(recv64))) |
803 | return -EFAULT; | 819 | return -EFAULT; |
804 | 820 | ||
805 | rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep, | 821 | rc = ipmi_ioctl(filep, |
806 | ((cmd == COMPAT_IPMICTL_RECEIVE_MSG) | 822 | ((cmd == COMPAT_IPMICTL_RECEIVE_MSG) |
807 | ? IPMICTL_RECEIVE_MSG | 823 | ? IPMICTL_RECEIVE_MSG |
808 | : IPMICTL_RECEIVE_MSG_TRUNC), | 824 | : IPMICTL_RECEIVE_MSG_TRUNC), |
@@ -819,14 +835,14 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, | |||
819 | return rc; | 835 | return rc; |
820 | } | 836 | } |
821 | default: | 837 | default: |
822 | return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg); | 838 | return ipmi_ioctl(filep, cmd, arg); |
823 | } | 839 | } |
824 | } | 840 | } |
825 | #endif | 841 | #endif |
826 | 842 | ||
827 | static const struct file_operations ipmi_fops = { | 843 | static const struct file_operations ipmi_fops = { |
828 | .owner = THIS_MODULE, | 844 | .owner = THIS_MODULE, |
829 | .ioctl = ipmi_ioctl, | 845 | .unlocked_ioctl = ipmi_unlocked_ioctl, |
830 | #ifdef CONFIG_COMPAT | 846 | #ifdef CONFIG_COMPAT |
831 | .compat_ioctl = compat_ipmi_ioctl, | 847 | .compat_ioctl = compat_ipmi_ioctl, |
832 | #endif | 848 | #endif |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c6ad4234378d..4f3f8c9ec262 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, | |||
2505 | return rv; | 2505 | return rv; |
2506 | } | 2506 | } |
2507 | 2507 | ||
2508 | printk(KERN_INFO | 2508 | dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " |
2509 | "ipmi: Found new BMC (man_id: 0x%6.6x, " | 2509 | "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", |
2510 | " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", | 2510 | bmc->id.manufacturer_id, |
2511 | bmc->id.manufacturer_id, | 2511 | bmc->id.product_id, |
2512 | bmc->id.product_id, | 2512 | bmc->id.device_id); |
2513 | bmc->id.device_id); | ||
2514 | } | 2513 | } |
2515 | 2514 | ||
2516 | /* | 2515 | /* |
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void) | |||
4037 | 4036 | ||
4038 | static struct timer_list ipmi_timer; | 4037 | static struct timer_list ipmi_timer; |
4039 | 4038 | ||
4040 | /* Call every ~100 ms. */ | 4039 | /* Call every ~1000 ms. */ |
4041 | #define IPMI_TIMEOUT_TIME 100 | 4040 | #define IPMI_TIMEOUT_TIME 1000 |
4042 | 4041 | ||
4043 | /* How many jiffies does it take to get to the timeout time. */ | 4042 | /* How many jiffies does it take to get to the timeout time. */ |
4044 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) | 4043 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 4462b113ba3f..35603dd4e6c5 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -107,6 +107,14 @@ enum si_type { | |||
107 | }; | 107 | }; |
108 | static char *si_to_str[] = { "kcs", "smic", "bt" }; | 108 | static char *si_to_str[] = { "kcs", "smic", "bt" }; |
109 | 109 | ||
110 | enum ipmi_addr_src { | ||
111 | SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, | ||
112 | SI_PCI, SI_DEVICETREE, SI_DEFAULT | ||
113 | }; | ||
114 | static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", | ||
115 | "ACPI", "SMBIOS", "PCI", | ||
116 | "device-tree", "default" }; | ||
117 | |||
110 | #define DEVICE_NAME "ipmi_si" | 118 | #define DEVICE_NAME "ipmi_si" |
111 | 119 | ||
112 | static struct platform_driver ipmi_driver = { | 120 | static struct platform_driver ipmi_driver = { |
@@ -188,7 +196,7 @@ struct smi_info { | |||
188 | int (*irq_setup)(struct smi_info *info); | 196 | int (*irq_setup)(struct smi_info *info); |
189 | void (*irq_cleanup)(struct smi_info *info); | 197 | void (*irq_cleanup)(struct smi_info *info); |
190 | unsigned int io_size; | 198 | unsigned int io_size; |
191 | char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ | 199 | enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ |
192 | void (*addr_source_cleanup)(struct smi_info *info); | 200 | void (*addr_source_cleanup)(struct smi_info *info); |
193 | void *addr_source_data; | 201 | void *addr_source_data; |
194 | 202 | ||
@@ -300,6 +308,7 @@ static int num_max_busy_us; | |||
300 | 308 | ||
301 | static int unload_when_empty = 1; | 309 | static int unload_when_empty = 1; |
302 | 310 | ||
311 | static int add_smi(struct smi_info *smi); | ||
303 | static int try_smi_init(struct smi_info *smi); | 312 | static int try_smi_init(struct smi_info *smi); |
304 | static void cleanup_one_si(struct smi_info *to_clean); | 313 | static void cleanup_one_si(struct smi_info *to_clean); |
305 | 314 | ||
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info, | |||
314 | { | 323 | { |
315 | /* Deliver the message to the upper layer with the lock | 324 | /* Deliver the message to the upper layer with the lock |
316 | released. */ | 325 | released. */ |
317 | spin_unlock(&(smi_info->si_lock)); | 326 | |
318 | ipmi_smi_msg_received(smi_info->intf, msg); | 327 | if (smi_info->run_to_completion) { |
319 | spin_lock(&(smi_info->si_lock)); | 328 | ipmi_smi_msg_received(smi_info->intf, msg); |
329 | } else { | ||
330 | spin_unlock(&(smi_info->si_lock)); | ||
331 | ipmi_smi_msg_received(smi_info->intf, msg); | ||
332 | spin_lock(&(smi_info->si_lock)); | ||
333 | } | ||
320 | } | 334 | } |
321 | 335 | ||
322 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) | 336 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) |
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info) | |||
445 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 459 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
446 | start_disable_irq(smi_info); | 460 | start_disable_irq(smi_info); |
447 | smi_info->interrupt_disabled = 1; | 461 | smi_info->interrupt_disabled = 1; |
462 | if (!atomic_read(&smi_info->stop_operation)) | ||
463 | mod_timer(&smi_info->si_timer, | ||
464 | jiffies + SI_TIMEOUT_JIFFIES); | ||
448 | } | 465 | } |
449 | } | 466 | } |
450 | 467 | ||
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
576 | smi_info->handlers->get_result(smi_info->si_sm, msg, 3); | 593 | smi_info->handlers->get_result(smi_info->si_sm, msg, 3); |
577 | if (msg[2] != 0) { | 594 | if (msg[2] != 0) { |
578 | /* Error clearing flags */ | 595 | /* Error clearing flags */ |
579 | printk(KERN_WARNING | 596 | dev_warn(smi_info->dev, |
580 | "ipmi_si: Error clearing flags: %2.2x\n", | 597 | "Error clearing flags: %2.2x\n", msg[2]); |
581 | msg[2]); | ||
582 | } | 598 | } |
583 | if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) | 599 | if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) |
584 | start_enable_irq(smi_info); | 600 | start_enable_irq(smi_info); |
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
670 | /* We got the flags from the SMI, now handle them. */ | 686 | /* We got the flags from the SMI, now handle them. */ |
671 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 687 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
672 | if (msg[2] != 0) { | 688 | if (msg[2] != 0) { |
673 | printk(KERN_WARNING | 689 | dev_warn(smi_info->dev, "Could not enable interrupts" |
674 | "ipmi_si: Could not enable interrupts" | 690 | ", failed get, using polled mode.\n"); |
675 | ", failed get, using polled mode.\n"); | ||
676 | smi_info->si_state = SI_NORMAL; | 691 | smi_info->si_state = SI_NORMAL; |
677 | } else { | 692 | } else { |
678 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 693 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
693 | 708 | ||
694 | /* We got the flags from the SMI, now handle them. */ | 709 | /* We got the flags from the SMI, now handle them. */ |
695 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 710 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
696 | if (msg[2] != 0) { | 711 | if (msg[2] != 0) |
697 | printk(KERN_WARNING | 712 | dev_warn(smi_info->dev, "Could not enable interrupts" |
698 | "ipmi_si: Could not enable interrupts" | 713 | ", failed set, using polled mode.\n"); |
699 | ", failed set, using polled mode.\n"); | 714 | else |
700 | } | 715 | smi_info->interrupt_disabled = 0; |
701 | smi_info->si_state = SI_NORMAL; | 716 | smi_info->si_state = SI_NORMAL; |
702 | break; | 717 | break; |
703 | } | 718 | } |
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
709 | /* We got the flags from the SMI, now handle them. */ | 724 | /* We got the flags from the SMI, now handle them. */ |
710 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 725 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
711 | if (msg[2] != 0) { | 726 | if (msg[2] != 0) { |
712 | printk(KERN_WARNING | 727 | dev_warn(smi_info->dev, "Could not disable interrupts" |
713 | "ipmi_si: Could not disable interrupts" | 728 | ", failed get.\n"); |
714 | ", failed get.\n"); | ||
715 | smi_info->si_state = SI_NORMAL; | 729 | smi_info->si_state = SI_NORMAL; |
716 | } else { | 730 | } else { |
717 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 731 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
733 | /* We got the flags from the SMI, now handle them. */ | 747 | /* We got the flags from the SMI, now handle them. */ |
734 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 748 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
735 | if (msg[2] != 0) { | 749 | if (msg[2] != 0) { |
736 | printk(KERN_WARNING | 750 | dev_warn(smi_info->dev, "Could not disable interrupts" |
737 | "ipmi_si: Could not disable interrupts" | 751 | ", failed set.\n"); |
738 | ", failed set.\n"); | ||
739 | } | 752 | } |
740 | smi_info->si_state = SI_NORMAL; | 753 | smi_info->si_state = SI_NORMAL; |
741 | break; | 754 | break; |
@@ -877,6 +890,11 @@ static void sender(void *send_info, | |||
877 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 890 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
878 | #endif | 891 | #endif |
879 | 892 | ||
893 | mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); | ||
894 | |||
895 | if (smi_info->thread) | ||
896 | wake_up_process(smi_info->thread); | ||
897 | |||
880 | if (smi_info->run_to_completion) { | 898 | if (smi_info->run_to_completion) { |
881 | /* | 899 | /* |
882 | * If we are running to completion, then throw it in | 900 | * If we are running to completion, then throw it in |
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data) | |||
997 | ; /* do nothing */ | 1015 | ; /* do nothing */ |
998 | else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) | 1016 | else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) |
999 | schedule(); | 1017 | schedule(); |
1018 | else if (smi_result == SI_SM_IDLE) | ||
1019 | schedule_timeout_interruptible(100); | ||
1000 | else | 1020 | else |
1001 | schedule_timeout_interruptible(0); | 1021 | schedule_timeout_interruptible(0); |
1002 | } | 1022 | } |
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data) | |||
1039 | unsigned long flags; | 1059 | unsigned long flags; |
1040 | unsigned long jiffies_now; | 1060 | unsigned long jiffies_now; |
1041 | long time_diff; | 1061 | long time_diff; |
1062 | long timeout; | ||
1042 | #ifdef DEBUG_TIMING | 1063 | #ifdef DEBUG_TIMING |
1043 | struct timeval t; | 1064 | struct timeval t; |
1044 | #endif | 1065 | #endif |
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data) | |||
1059 | 1080 | ||
1060 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 1081 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
1061 | /* Running with interrupts, only do long timeouts. */ | 1082 | /* Running with interrupts, only do long timeouts. */ |
1062 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 1083 | timeout = jiffies + SI_TIMEOUT_JIFFIES; |
1063 | smi_inc_stat(smi_info, long_timeouts); | 1084 | smi_inc_stat(smi_info, long_timeouts); |
1064 | goto do_add_timer; | 1085 | goto do_mod_timer; |
1065 | } | 1086 | } |
1066 | 1087 | ||
1067 | /* | 1088 | /* |
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data) | |||
1070 | */ | 1091 | */ |
1071 | if (smi_result == SI_SM_CALL_WITH_DELAY) { | 1092 | if (smi_result == SI_SM_CALL_WITH_DELAY) { |
1072 | smi_inc_stat(smi_info, short_timeouts); | 1093 | smi_inc_stat(smi_info, short_timeouts); |
1073 | smi_info->si_timer.expires = jiffies + 1; | 1094 | timeout = jiffies + 1; |
1074 | } else { | 1095 | } else { |
1075 | smi_inc_stat(smi_info, long_timeouts); | 1096 | smi_inc_stat(smi_info, long_timeouts); |
1076 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 1097 | timeout = jiffies + SI_TIMEOUT_JIFFIES; |
1077 | } | 1098 | } |
1078 | 1099 | ||
1079 | do_add_timer: | 1100 | do_mod_timer: |
1080 | add_timer(&(smi_info->si_timer)); | 1101 | if (smi_result != SI_SM_IDLE) |
1102 | mod_timer(&(smi_info->si_timer), timeout); | ||
1081 | } | 1103 | } |
1082 | 1104 | ||
1083 | static irqreturn_t si_irq_handler(int irq, void *data) | 1105 | static irqreturn_t si_irq_handler(int irq, void *data) |
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info, | |||
1144 | new_smi->thread = kthread_run(ipmi_thread, new_smi, | 1166 | new_smi->thread = kthread_run(ipmi_thread, new_smi, |
1145 | "kipmi%d", new_smi->intf_num); | 1167 | "kipmi%d", new_smi->intf_num); |
1146 | if (IS_ERR(new_smi->thread)) { | 1168 | if (IS_ERR(new_smi->thread)) { |
1147 | printk(KERN_NOTICE "ipmi_si_intf: Could not start" | 1169 | dev_notice(new_smi->dev, "Could not start" |
1148 | " kernel thread due to error %ld, only using" | 1170 | " kernel thread due to error %ld, only using" |
1149 | " timers to drive the interface\n", | 1171 | " timers to drive the interface\n", |
1150 | PTR_ERR(new_smi->thread)); | 1172 | PTR_ERR(new_smi->thread)); |
1151 | new_smi->thread = NULL; | 1173 | new_smi->thread = NULL; |
1152 | } | 1174 | } |
1153 | } | 1175 | } |
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info) | |||
1308 | DEVICE_NAME, | 1330 | DEVICE_NAME, |
1309 | info); | 1331 | info); |
1310 | if (rv) { | 1332 | if (rv) { |
1311 | printk(KERN_WARNING | 1333 | dev_warn(info->dev, "%s unable to claim interrupt %d," |
1312 | "ipmi_si: %s unable to claim interrupt %d," | 1334 | " running polled\n", |
1313 | " running polled\n", | 1335 | DEVICE_NAME, info->irq); |
1314 | DEVICE_NAME, info->irq); | ||
1315 | info->irq = 0; | 1336 | info->irq = 0; |
1316 | } else { | 1337 | } else { |
1317 | info->irq_cleanup = std_irq_cleanup; | 1338 | info->irq_cleanup = std_irq_cleanup; |
1318 | printk(" Using irq %d\n", info->irq); | 1339 | dev_info(info->dev, "Using irq %d\n", info->irq); |
1319 | } | 1340 | } |
1320 | 1341 | ||
1321 | return rv; | 1342 | return rv; |
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info) | |||
1406 | info->io.outputb = port_outl; | 1427 | info->io.outputb = port_outl; |
1407 | break; | 1428 | break; |
1408 | default: | 1429 | default: |
1409 | printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", | 1430 | dev_warn(info->dev, "Invalid register size: %d\n", |
1410 | info->io.regsize); | 1431 | info->io.regsize); |
1411 | return -EINVAL; | 1432 | return -EINVAL; |
1412 | } | 1433 | } |
1413 | 1434 | ||
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info) | |||
1529 | break; | 1550 | break; |
1530 | #endif | 1551 | #endif |
1531 | default: | 1552 | default: |
1532 | printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", | 1553 | dev_warn(info->dev, "Invalid register size: %d\n", |
1533 | info->io.regsize); | 1554 | info->io.regsize); |
1534 | return -EINVAL; | 1555 | return -EINVAL; |
1535 | } | 1556 | } |
1536 | 1557 | ||
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) | |||
1755 | goto out; | 1776 | goto out; |
1756 | } | 1777 | } |
1757 | 1778 | ||
1758 | info->addr_source = "hotmod"; | 1779 | info->addr_source = SI_HOTMOD; |
1759 | info->si_type = si_type; | 1780 | info->si_type = si_type; |
1760 | info->io.addr_data = addr; | 1781 | info->io.addr_data = addr; |
1761 | info->io.addr_type = addr_space; | 1782 | info->io.addr_type = addr_space; |
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) | |||
1777 | info->irq_setup = std_irq_setup; | 1798 | info->irq_setup = std_irq_setup; |
1778 | info->slave_addr = ipmb; | 1799 | info->slave_addr = ipmb; |
1779 | 1800 | ||
1780 | try_smi_init(info); | 1801 | if (!add_smi(info)) |
1802 | if (try_smi_init(info)) | ||
1803 | cleanup_one_si(info); | ||
1781 | } else { | 1804 | } else { |
1782 | /* remove */ | 1805 | /* remove */ |
1783 | struct smi_info *e, *tmp_e; | 1806 | struct smi_info *e, *tmp_e; |
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void) | |||
1813 | if (!info) | 1836 | if (!info) |
1814 | return; | 1837 | return; |
1815 | 1838 | ||
1816 | info->addr_source = "hardcoded"; | 1839 | info->addr_source = SI_HARDCODED; |
1840 | printk(KERN_INFO PFX "probing via hardcoded address\n"); | ||
1817 | 1841 | ||
1818 | if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { | 1842 | if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { |
1819 | info->si_type = SI_KCS; | 1843 | info->si_type = SI_KCS; |
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void) | |||
1822 | } else if (strcmp(si_type[i], "bt") == 0) { | 1846 | } else if (strcmp(si_type[i], "bt") == 0) { |
1823 | info->si_type = SI_BT; | 1847 | info->si_type = SI_BT; |
1824 | } else { | 1848 | } else { |
1825 | printk(KERN_WARNING | 1849 | printk(KERN_WARNING PFX "Interface type specified " |
1826 | "ipmi_si: Interface type specified " | ||
1827 | "for interface %d, was invalid: %s\n", | 1850 | "for interface %d, was invalid: %s\n", |
1828 | i, si_type[i]); | 1851 | i, si_type[i]); |
1829 | kfree(info); | 1852 | kfree(info); |
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void) | |||
1841 | info->io.addr_data = addrs[i]; | 1864 | info->io.addr_data = addrs[i]; |
1842 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | 1865 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
1843 | } else { | 1866 | } else { |
1844 | printk(KERN_WARNING | 1867 | printk(KERN_WARNING PFX "Interface type specified " |
1845 | "ipmi_si: Interface type specified " | 1868 | "for interface %d, but port and address were " |
1846 | "for interface %d, " | 1869 | "not set or set to zero.\n", i); |
1847 | "but port and address were not set or " | ||
1848 | "set to zero.\n", i); | ||
1849 | kfree(info); | 1870 | kfree(info); |
1850 | continue; | 1871 | continue; |
1851 | } | 1872 | } |
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void) | |||
1863 | info->irq_setup = std_irq_setup; | 1884 | info->irq_setup = std_irq_setup; |
1864 | info->slave_addr = slave_addrs[i]; | 1885 | info->slave_addr = slave_addrs[i]; |
1865 | 1886 | ||
1866 | try_smi_init(info); | 1887 | if (!add_smi(info)) |
1888 | if (try_smi_init(info)) | ||
1889 | cleanup_one_si(info); | ||
1867 | } | 1890 | } |
1868 | } | 1891 | } |
1869 | 1892 | ||
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info) | |||
1923 | &ipmi_acpi_gpe, | 1946 | &ipmi_acpi_gpe, |
1924 | info); | 1947 | info); |
1925 | if (status != AE_OK) { | 1948 | if (status != AE_OK) { |
1926 | printk(KERN_WARNING | 1949 | dev_warn(info->dev, "%s unable to claim ACPI GPE %d," |
1927 | "ipmi_si: %s unable to claim ACPI GPE %d," | 1950 | " running polled\n", DEVICE_NAME, info->irq); |
1928 | " running polled\n", | ||
1929 | DEVICE_NAME, info->irq); | ||
1930 | info->irq = 0; | 1951 | info->irq = 0; |
1931 | return -EINVAL; | 1952 | return -EINVAL; |
1932 | } else { | 1953 | } else { |
1933 | info->irq_cleanup = acpi_gpe_irq_cleanup; | 1954 | info->irq_cleanup = acpi_gpe_irq_cleanup; |
1934 | printk(" Using ACPI GPE %d\n", info->irq); | 1955 | dev_info(info->dev, "Using ACPI GPE %d\n", info->irq); |
1935 | return 0; | 1956 | return 0; |
1936 | } | 1957 | } |
1937 | } | 1958 | } |
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) | |||
1989 | u8 addr_space; | 2010 | u8 addr_space; |
1990 | 2011 | ||
1991 | if (spmi->IPMIlegacy != 1) { | 2012 | if (spmi->IPMIlegacy != 1) { |
1992 | printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); | 2013 | printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); |
1993 | return -ENODEV; | 2014 | return -ENODEV; |
1994 | } | 2015 | } |
1995 | 2016 | ||
1996 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) | 2017 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) | |||
2000 | 2021 | ||
2001 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 2022 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
2002 | if (!info) { | 2023 | if (!info) { |
2003 | printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); | 2024 | printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); |
2004 | return -ENOMEM; | 2025 | return -ENOMEM; |
2005 | } | 2026 | } |
2006 | 2027 | ||
2007 | info->addr_source = "SPMI"; | 2028 | info->addr_source = SI_SPMI; |
2029 | printk(KERN_INFO PFX "probing via SPMI\n"); | ||
2008 | 2030 | ||
2009 | /* Figure out the interface type. */ | 2031 | /* Figure out the interface type. */ |
2010 | switch (spmi->InterfaceType) { | 2032 | switch (spmi->InterfaceType) { |
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) | |||
2018 | info->si_type = SI_BT; | 2040 | info->si_type = SI_BT; |
2019 | break; | 2041 | break; |
2020 | default: | 2042 | default: |
2021 | printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", | 2043 | printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", |
2022 | spmi->InterfaceType); | 2044 | spmi->InterfaceType); |
2023 | kfree(info); | 2045 | kfree(info); |
2024 | return -EIO; | 2046 | return -EIO; |
2025 | } | 2047 | } |
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) | |||
2055 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 2077 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2056 | } else { | 2078 | } else { |
2057 | kfree(info); | 2079 | kfree(info); |
2058 | printk(KERN_WARNING | 2080 | printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n"); |
2059 | "ipmi_si: Unknown ACPI I/O Address type\n"); | ||
2060 | return -EIO; | 2081 | return -EIO; |
2061 | } | 2082 | } |
2062 | info->io.addr_data = spmi->addr.address; | 2083 | info->io.addr_data = spmi->addr.address; |
2063 | 2084 | ||
2064 | try_smi_init(info); | 2085 | add_smi(info); |
2065 | 2086 | ||
2066 | return 0; | 2087 | return 0; |
2067 | } | 2088 | } |
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | |||
2093 | { | 2114 | { |
2094 | struct acpi_device *acpi_dev; | 2115 | struct acpi_device *acpi_dev; |
2095 | struct smi_info *info; | 2116 | struct smi_info *info; |
2117 | struct resource *res; | ||
2096 | acpi_handle handle; | 2118 | acpi_handle handle; |
2097 | acpi_status status; | 2119 | acpi_status status; |
2098 | unsigned long long tmp; | 2120 | unsigned long long tmp; |
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | |||
2105 | if (!info) | 2127 | if (!info) |
2106 | return -ENOMEM; | 2128 | return -ENOMEM; |
2107 | 2129 | ||
2108 | info->addr_source = "ACPI"; | 2130 | info->addr_source = SI_ACPI; |
2131 | printk(KERN_INFO PFX "probing via ACPI\n"); | ||
2109 | 2132 | ||
2110 | handle = acpi_dev->handle; | 2133 | handle = acpi_dev->handle; |
2111 | 2134 | ||
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | |||
2125 | info->si_type = SI_BT; | 2148 | info->si_type = SI_BT; |
2126 | break; | 2149 | break; |
2127 | default: | 2150 | default: |
2128 | dev_info(&dev->dev, "unknown interface type %lld\n", tmp); | 2151 | dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); |
2129 | goto err_free; | 2152 | goto err_free; |
2130 | } | 2153 | } |
2131 | 2154 | ||
2132 | if (pnp_port_valid(dev, 0)) { | 2155 | res = pnp_get_resource(dev, IORESOURCE_IO, 0); |
2156 | if (res) { | ||
2133 | info->io_setup = port_setup; | 2157 | info->io_setup = port_setup; |
2134 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 2158 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2135 | info->io.addr_data = pnp_port_start(dev, 0); | ||
2136 | } else if (pnp_mem_valid(dev, 0)) { | ||
2137 | info->io_setup = mem_setup; | ||
2138 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | ||
2139 | info->io.addr_data = pnp_mem_start(dev, 0); | ||
2140 | } else { | 2159 | } else { |
2160 | res = pnp_get_resource(dev, IORESOURCE_MEM, 0); | ||
2161 | if (res) { | ||
2162 | info->io_setup = mem_setup; | ||
2163 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | ||
2164 | } | ||
2165 | } | ||
2166 | if (!res) { | ||
2141 | dev_err(&dev->dev, "no I/O or memory address\n"); | 2167 | dev_err(&dev->dev, "no I/O or memory address\n"); |
2142 | goto err_free; | 2168 | goto err_free; |
2143 | } | 2169 | } |
2170 | info->io.addr_data = res->start; | ||
2144 | 2171 | ||
2145 | info->io.regspacing = DEFAULT_REGSPACING; | 2172 | info->io.regspacing = DEFAULT_REGSPACING; |
2146 | info->io.regsize = DEFAULT_REGSPACING; | 2173 | info->io.regsize = DEFAULT_REGSPACING; |
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | |||
2156 | info->irq_setup = std_irq_setup; | 2183 | info->irq_setup = std_irq_setup; |
2157 | } | 2184 | } |
2158 | 2185 | ||
2159 | info->dev = &acpi_dev->dev; | 2186 | info->dev = &dev->dev; |
2160 | pnp_set_drvdata(dev, info); | 2187 | pnp_set_drvdata(dev, info); |
2161 | 2188 | ||
2162 | return try_smi_init(info); | 2189 | dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", |
2190 | res, info->io.regsize, info->io.regspacing, | ||
2191 | info->irq); | ||
2192 | |||
2193 | return add_smi(info); | ||
2163 | 2194 | ||
2164 | err_free: | 2195 | err_free: |
2165 | kfree(info); | 2196 | kfree(info); |
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) | |||
2264 | 2295 | ||
2265 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 2296 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
2266 | if (!info) { | 2297 | if (!info) { |
2267 | printk(KERN_ERR | 2298 | printk(KERN_ERR PFX "Could not allocate SI data\n"); |
2268 | "ipmi_si: Could not allocate SI data\n"); | ||
2269 | return; | 2299 | return; |
2270 | } | 2300 | } |
2271 | 2301 | ||
2272 | info->addr_source = "SMBIOS"; | 2302 | info->addr_source = SI_SMBIOS; |
2303 | printk(KERN_INFO PFX "probing via SMBIOS\n"); | ||
2273 | 2304 | ||
2274 | switch (ipmi_data->type) { | 2305 | switch (ipmi_data->type) { |
2275 | case 0x01: /* KCS */ | 2306 | case 0x01: /* KCS */ |
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) | |||
2299 | 2330 | ||
2300 | default: | 2331 | default: |
2301 | kfree(info); | 2332 | kfree(info); |
2302 | printk(KERN_WARNING | 2333 | printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n", |
2303 | "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n", | ||
2304 | ipmi_data->addr_space); | 2334 | ipmi_data->addr_space); |
2305 | return; | 2335 | return; |
2306 | } | 2336 | } |
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) | |||
2318 | if (info->irq) | 2348 | if (info->irq) |
2319 | info->irq_setup = std_irq_setup; | 2349 | info->irq_setup = std_irq_setup; |
2320 | 2350 | ||
2321 | try_smi_init(info); | 2351 | add_smi(info); |
2322 | } | 2352 | } |
2323 | 2353 | ||
2324 | static void __devinit dmi_find_bmc(void) | 2354 | static void __devinit dmi_find_bmc(void) |
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, | |||
2368 | if (!info) | 2398 | if (!info) |
2369 | return -ENOMEM; | 2399 | return -ENOMEM; |
2370 | 2400 | ||
2371 | info->addr_source = "PCI"; | 2401 | info->addr_source = SI_PCI; |
2402 | dev_info(&pdev->dev, "probing via PCI"); | ||
2372 | 2403 | ||
2373 | switch (class_type) { | 2404 | switch (class_type) { |
2374 | case PCI_ERMC_CLASSCODE_TYPE_SMIC: | 2405 | case PCI_ERMC_CLASSCODE_TYPE_SMIC: |
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, | |||
2385 | 2416 | ||
2386 | default: | 2417 | default: |
2387 | kfree(info); | 2418 | kfree(info); |
2388 | printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", | 2419 | dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type); |
2389 | pci_name(pdev), class_type); | ||
2390 | return -ENOMEM; | 2420 | return -ENOMEM; |
2391 | } | 2421 | } |
2392 | 2422 | ||
2393 | rv = pci_enable_device(pdev); | 2423 | rv = pci_enable_device(pdev); |
2394 | if (rv) { | 2424 | if (rv) { |
2395 | printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", | 2425 | dev_err(&pdev->dev, "couldn't enable PCI device\n"); |
2396 | pci_name(pdev)); | ||
2397 | kfree(info); | 2426 | kfree(info); |
2398 | return rv; | 2427 | return rv; |
2399 | } | 2428 | } |
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, | |||
2421 | info->dev = &pdev->dev; | 2450 | info->dev = &pdev->dev; |
2422 | pci_set_drvdata(pdev, info); | 2451 | pci_set_drvdata(pdev, info); |
2423 | 2452 | ||
2424 | return try_smi_init(info); | 2453 | dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", |
2454 | &pdev->resource[0], info->io.regsize, info->io.regspacing, | ||
2455 | info->irq); | ||
2456 | |||
2457 | return add_smi(info); | ||
2425 | } | 2458 | } |
2426 | 2459 | ||
2427 | static void __devexit ipmi_pci_remove(struct pci_dev *pdev) | 2460 | static void __devexit ipmi_pci_remove(struct pci_dev *pdev) |
@@ -2469,11 +2502,11 @@ static int __devinit ipmi_of_probe(struct of_device *dev, | |||
2469 | struct smi_info *info; | 2502 | struct smi_info *info; |
2470 | struct resource resource; | 2503 | struct resource resource; |
2471 | const int *regsize, *regspacing, *regshift; | 2504 | const int *regsize, *regspacing, *regshift; |
2472 | struct device_node *np = dev->node; | 2505 | struct device_node *np = dev->dev.of_node; |
2473 | int ret; | 2506 | int ret; |
2474 | int proplen; | 2507 | int proplen; |
2475 | 2508 | ||
2476 | dev_info(&dev->dev, PFX "probing via device tree\n"); | 2509 | dev_info(&dev->dev, "probing via device tree\n"); |
2477 | 2510 | ||
2478 | ret = of_address_to_resource(np, 0, &resource); | 2511 | ret = of_address_to_resource(np, 0, &resource); |
2479 | if (ret) { | 2512 | if (ret) { |
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev, | |||
2503 | 2536 | ||
2504 | if (!info) { | 2537 | if (!info) { |
2505 | dev_err(&dev->dev, | 2538 | dev_err(&dev->dev, |
2506 | PFX "could not allocate memory for OF probe\n"); | 2539 | "could not allocate memory for OF probe\n"); |
2507 | return -ENOMEM; | 2540 | return -ENOMEM; |
2508 | } | 2541 | } |
2509 | 2542 | ||
2510 | info->si_type = (enum si_type) match->data; | 2543 | info->si_type = (enum si_type) match->data; |
2511 | info->addr_source = "device-tree"; | 2544 | info->addr_source = SI_DEVICETREE; |
2512 | info->irq_setup = std_irq_setup; | 2545 | info->irq_setup = std_irq_setup; |
2513 | 2546 | ||
2514 | if (resource.flags & IORESOURCE_IO) { | 2547 | if (resource.flags & IORESOURCE_IO) { |
@@ -2525,16 +2558,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev, | |||
2525 | info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING; | 2558 | info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING; |
2526 | info->io.regshift = regshift ? *regshift : 0; | 2559 | info->io.regshift = regshift ? *regshift : 0; |
2527 | 2560 | ||
2528 | info->irq = irq_of_parse_and_map(dev->node, 0); | 2561 | info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); |
2529 | info->dev = &dev->dev; | 2562 | info->dev = &dev->dev; |
2530 | 2563 | ||
2531 | dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", | 2564 | dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", |
2532 | info->io.addr_data, info->io.regsize, info->io.regspacing, | 2565 | info->io.addr_data, info->io.regsize, info->io.regspacing, |
2533 | info->irq); | 2566 | info->irq); |
2534 | 2567 | ||
2535 | dev_set_drvdata(&dev->dev, info); | 2568 | dev_set_drvdata(&dev->dev, info); |
2536 | 2569 | ||
2537 | return try_smi_init(info); | 2570 | return add_smi(info); |
2538 | } | 2571 | } |
2539 | 2572 | ||
2540 | static int __devexit ipmi_of_remove(struct of_device *dev) | 2573 | static int __devexit ipmi_of_remove(struct of_device *dev) |
@@ -2555,8 +2588,11 @@ static struct of_device_id ipmi_match[] = | |||
2555 | }; | 2588 | }; |
2556 | 2589 | ||
2557 | static struct of_platform_driver ipmi_of_platform_driver = { | 2590 | static struct of_platform_driver ipmi_of_platform_driver = { |
2558 | .name = "ipmi", | 2591 | .driver = { |
2559 | .match_table = ipmi_match, | 2592 | .name = "ipmi", |
2593 | .owner = THIS_MODULE, | ||
2594 | .of_match_table = ipmi_match, | ||
2595 | }, | ||
2560 | .probe = ipmi_of_probe, | 2596 | .probe = ipmi_of_probe, |
2561 | .remove = __devexit_p(ipmi_of_remove), | 2597 | .remove = __devexit_p(ipmi_of_remove), |
2562 | }; | 2598 | }; |
@@ -2640,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) | |||
2640 | 2676 | ||
2641 | rv = wait_for_msg_done(smi_info); | 2677 | rv = wait_for_msg_done(smi_info); |
2642 | if (rv) { | 2678 | if (rv) { |
2643 | printk(KERN_WARNING | 2679 | printk(KERN_WARNING PFX "Error getting response from get" |
2644 | "ipmi_si: Error getting response from get global," | 2680 | " global enables command, the event buffer is not" |
2645 | " enables command, the event buffer is not" | ||
2646 | " enabled.\n"); | 2681 | " enabled.\n"); |
2647 | goto out; | 2682 | goto out; |
2648 | } | 2683 | } |
@@ -2654,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) | |||
2654 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || | 2689 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || |
2655 | resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || | 2690 | resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || |
2656 | resp[2] != 0) { | 2691 | resp[2] != 0) { |
2657 | printk(KERN_WARNING | 2692 | printk(KERN_WARNING PFX "Invalid return from get global" |
2658 | "ipmi_si: Invalid return from get global" | 2693 | " enables command, cannot enable the event buffer.\n"); |
2659 | " enables command, cannot enable the event" | ||
2660 | " buffer.\n"); | ||
2661 | rv = -EINVAL; | 2694 | rv = -EINVAL; |
2662 | goto out; | 2695 | goto out; |
2663 | } | 2696 | } |
@@ -2673,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) | |||
2673 | 2706 | ||
2674 | rv = wait_for_msg_done(smi_info); | 2707 | rv = wait_for_msg_done(smi_info); |
2675 | if (rv) { | 2708 | if (rv) { |
2676 | printk(KERN_WARNING | 2709 | printk(KERN_WARNING PFX "Error getting response from set" |
2677 | "ipmi_si: Error getting response from set global," | 2710 | " global, enables command, the event buffer is not" |
2678 | " enables command, the event buffer is not" | ||
2679 | " enabled.\n"); | 2711 | " enabled.\n"); |
2680 | goto out; | 2712 | goto out; |
2681 | } | 2713 | } |
@@ -2686,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) | |||
2686 | if (resp_len < 3 || | 2718 | if (resp_len < 3 || |
2687 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || | 2719 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || |
2688 | resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { | 2720 | resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { |
2689 | printk(KERN_WARNING | 2721 | printk(KERN_WARNING PFX "Invalid return from get global," |
2690 | "ipmi_si: Invalid return from get global," | 2722 | "enables command, not enable the event buffer.\n"); |
2691 | "enables command, not enable the event" | ||
2692 | " buffer.\n"); | ||
2693 | rv = -EINVAL; | 2723 | rv = -EINVAL; |
2694 | goto out; | 2724 | goto out; |
2695 | } | 2725 | } |
@@ -2948,7 +2978,7 @@ static __devinit void default_find_bmc(void) | |||
2948 | if (!info) | 2978 | if (!info) |
2949 | return; | 2979 | return; |
2950 | 2980 | ||
2951 | info->addr_source = NULL; | 2981 | info->addr_source = SI_DEFAULT; |
2952 | 2982 | ||
2953 | info->si_type = ipmi_defaults[i].type; | 2983 | info->si_type = ipmi_defaults[i].type; |
2954 | info->io_setup = port_setup; | 2984 | info->io_setup = port_setup; |
@@ -2960,14 +2990,16 @@ static __devinit void default_find_bmc(void) | |||
2960 | info->io.regsize = DEFAULT_REGSPACING; | 2990 | info->io.regsize = DEFAULT_REGSPACING; |
2961 | info->io.regshift = 0; | 2991 | info->io.regshift = 0; |
2962 | 2992 | ||
2963 | if (try_smi_init(info) == 0) { | 2993 | if (add_smi(info) == 0) { |
2964 | /* Found one... */ | 2994 | if ((try_smi_init(info)) == 0) { |
2965 | printk(KERN_INFO "ipmi_si: Found default %s state" | 2995 | /* Found one... */ |
2966 | " machine at %s address 0x%lx\n", | 2996 | printk(KERN_INFO PFX "Found default %s" |
2967 | si_to_str[info->si_type], | 2997 | " state machine at %s address 0x%lx\n", |
2968 | addr_space_to_str[info->io.addr_type], | 2998 | si_to_str[info->si_type], |
2969 | info->io.addr_data); | 2999 | addr_space_to_str[info->io.addr_type], |
2970 | return; | 3000 | info->io.addr_data); |
3001 | } else | ||
3002 | cleanup_one_si(info); | ||
2971 | } | 3003 | } |
2972 | } | 3004 | } |
2973 | } | 3005 | } |
@@ -2986,34 +3018,48 @@ static int is_new_interface(struct smi_info *info) | |||
2986 | return 1; | 3018 | return 1; |
2987 | } | 3019 | } |
2988 | 3020 | ||
2989 | static int try_smi_init(struct smi_info *new_smi) | 3021 | static int add_smi(struct smi_info *new_smi) |
2990 | { | 3022 | { |
2991 | int rv; | 3023 | int rv = 0; |
2992 | int i; | ||
2993 | |||
2994 | if (new_smi->addr_source) { | ||
2995 | printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" | ||
2996 | " machine at %s address 0x%lx, slave address 0x%x," | ||
2997 | " irq %d\n", | ||
2998 | new_smi->addr_source, | ||
2999 | si_to_str[new_smi->si_type], | ||
3000 | addr_space_to_str[new_smi->io.addr_type], | ||
3001 | new_smi->io.addr_data, | ||
3002 | new_smi->slave_addr, new_smi->irq); | ||
3003 | } | ||
3004 | 3024 | ||
3025 | printk(KERN_INFO PFX "Adding %s-specified %s state machine", | ||
3026 | ipmi_addr_src_to_str[new_smi->addr_source], | ||
3027 | si_to_str[new_smi->si_type]); | ||
3005 | mutex_lock(&smi_infos_lock); | 3028 | mutex_lock(&smi_infos_lock); |
3006 | if (!is_new_interface(new_smi)) { | 3029 | if (!is_new_interface(new_smi)) { |
3007 | printk(KERN_WARNING "ipmi_si: duplicate interface\n"); | 3030 | printk(KERN_CONT PFX "duplicate interface\n"); |
3008 | rv = -EBUSY; | 3031 | rv = -EBUSY; |
3009 | goto out_err; | 3032 | goto out_err; |
3010 | } | 3033 | } |
3011 | 3034 | ||
3035 | printk(KERN_CONT "\n"); | ||
3036 | |||
3012 | /* So we know not to free it unless we have allocated one. */ | 3037 | /* So we know not to free it unless we have allocated one. */ |
3013 | new_smi->intf = NULL; | 3038 | new_smi->intf = NULL; |
3014 | new_smi->si_sm = NULL; | 3039 | new_smi->si_sm = NULL; |
3015 | new_smi->handlers = NULL; | 3040 | new_smi->handlers = NULL; |
3016 | 3041 | ||
3042 | list_add_tail(&new_smi->link, &smi_infos); | ||
3043 | |||
3044 | out_err: | ||
3045 | mutex_unlock(&smi_infos_lock); | ||
3046 | return rv; | ||
3047 | } | ||
3048 | |||
3049 | static int try_smi_init(struct smi_info *new_smi) | ||
3050 | { | ||
3051 | int rv = 0; | ||
3052 | int i; | ||
3053 | |||
3054 | printk(KERN_INFO PFX "Trying %s-specified %s state" | ||
3055 | " machine at %s address 0x%lx, slave address 0x%x," | ||
3056 | " irq %d\n", | ||
3057 | ipmi_addr_src_to_str[new_smi->addr_source], | ||
3058 | si_to_str[new_smi->si_type], | ||
3059 | addr_space_to_str[new_smi->io.addr_type], | ||
3060 | new_smi->io.addr_data, | ||
3061 | new_smi->slave_addr, new_smi->irq); | ||
3062 | |||
3017 | switch (new_smi->si_type) { | 3063 | switch (new_smi->si_type) { |
3018 | case SI_KCS: | 3064 | case SI_KCS: |
3019 | new_smi->handlers = &kcs_smi_handlers; | 3065 | new_smi->handlers = &kcs_smi_handlers; |
@@ -3036,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3036 | /* Allocate the state machine's data and initialize it. */ | 3082 | /* Allocate the state machine's data and initialize it. */ |
3037 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); | 3083 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); |
3038 | if (!new_smi->si_sm) { | 3084 | if (!new_smi->si_sm) { |
3039 | printk(KERN_ERR "Could not allocate state machine memory\n"); | 3085 | printk(KERN_ERR PFX |
3086 | "Could not allocate state machine memory\n"); | ||
3040 | rv = -ENOMEM; | 3087 | rv = -ENOMEM; |
3041 | goto out_err; | 3088 | goto out_err; |
3042 | } | 3089 | } |
@@ -3046,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3046 | /* Now that we know the I/O size, we can set up the I/O. */ | 3093 | /* Now that we know the I/O size, we can set up the I/O. */ |
3047 | rv = new_smi->io_setup(new_smi); | 3094 | rv = new_smi->io_setup(new_smi); |
3048 | if (rv) { | 3095 | if (rv) { |
3049 | printk(KERN_ERR "Could not set up I/O space\n"); | 3096 | printk(KERN_ERR PFX "Could not set up I/O space\n"); |
3050 | goto out_err; | 3097 | goto out_err; |
3051 | } | 3098 | } |
3052 | 3099 | ||
@@ -3056,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3056 | /* Do low-level detection first. */ | 3103 | /* Do low-level detection first. */ |
3057 | if (new_smi->handlers->detect(new_smi->si_sm)) { | 3104 | if (new_smi->handlers->detect(new_smi->si_sm)) { |
3058 | if (new_smi->addr_source) | 3105 | if (new_smi->addr_source) |
3059 | printk(KERN_INFO "ipmi_si: Interface detection" | 3106 | printk(KERN_INFO PFX "Interface detection failed\n"); |
3060 | " failed\n"); | ||
3061 | rv = -ENODEV; | 3107 | rv = -ENODEV; |
3062 | goto out_err; | 3108 | goto out_err; |
3063 | } | 3109 | } |
@@ -3069,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3069 | rv = try_get_dev_id(new_smi); | 3115 | rv = try_get_dev_id(new_smi); |
3070 | if (rv) { | 3116 | if (rv) { |
3071 | if (new_smi->addr_source) | 3117 | if (new_smi->addr_source) |
3072 | printk(KERN_INFO "ipmi_si: There appears to be no BMC" | 3118 | printk(KERN_INFO PFX "There appears to be no BMC" |
3073 | " at this location\n"); | 3119 | " at this location\n"); |
3074 | goto out_err; | 3120 | goto out_err; |
3075 | } | 3121 | } |
@@ -3085,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3085 | for (i = 0; i < SI_NUM_STATS; i++) | 3131 | for (i = 0; i < SI_NUM_STATS; i++) |
3086 | atomic_set(&new_smi->stats[i], 0); | 3132 | atomic_set(&new_smi->stats[i], 0); |
3087 | 3133 | ||
3088 | new_smi->interrupt_disabled = 0; | 3134 | new_smi->interrupt_disabled = 1; |
3089 | atomic_set(&new_smi->stop_operation, 0); | 3135 | atomic_set(&new_smi->stop_operation, 0); |
3090 | new_smi->intf_num = smi_num; | 3136 | new_smi->intf_num = smi_num; |
3091 | smi_num++; | 3137 | smi_num++; |
@@ -3111,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3111 | new_smi->pdev = platform_device_alloc("ipmi_si", | 3157 | new_smi->pdev = platform_device_alloc("ipmi_si", |
3112 | new_smi->intf_num); | 3158 | new_smi->intf_num); |
3113 | if (!new_smi->pdev) { | 3159 | if (!new_smi->pdev) { |
3114 | printk(KERN_ERR | 3160 | printk(KERN_ERR PFX |
3115 | "ipmi_si_intf:" | 3161 | "Unable to allocate platform device\n"); |
3116 | " Unable to allocate platform device\n"); | ||
3117 | goto out_err; | 3162 | goto out_err; |
3118 | } | 3163 | } |
3119 | new_smi->dev = &new_smi->pdev->dev; | 3164 | new_smi->dev = &new_smi->pdev->dev; |
@@ -3121,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3121 | 3166 | ||
3122 | rv = platform_device_add(new_smi->pdev); | 3167 | rv = platform_device_add(new_smi->pdev); |
3123 | if (rv) { | 3168 | if (rv) { |
3124 | printk(KERN_ERR | 3169 | printk(KERN_ERR PFX |
3125 | "ipmi_si_intf:" | 3170 | "Unable to register system interface device:" |
3126 | " Unable to register system interface device:" | ||
3127 | " %d\n", | 3171 | " %d\n", |
3128 | rv); | 3172 | rv); |
3129 | goto out_err; | 3173 | goto out_err; |
@@ -3138,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3138 | "bmc", | 3182 | "bmc", |
3139 | new_smi->slave_addr); | 3183 | new_smi->slave_addr); |
3140 | if (rv) { | 3184 | if (rv) { |
3141 | printk(KERN_ERR | 3185 | dev_err(new_smi->dev, "Unable to register device: error %d\n", |
3142 | "ipmi_si: Unable to register device: error %d\n", | 3186 | rv); |
3143 | rv); | ||
3144 | goto out_err_stop_timer; | 3187 | goto out_err_stop_timer; |
3145 | } | 3188 | } |
3146 | 3189 | ||
@@ -3148,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3148 | type_file_read_proc, | 3191 | type_file_read_proc, |
3149 | new_smi); | 3192 | new_smi); |
3150 | if (rv) { | 3193 | if (rv) { |
3151 | printk(KERN_ERR | 3194 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
3152 | "ipmi_si: Unable to create proc entry: %d\n", | ||
3153 | rv); | ||
3154 | goto out_err_stop_timer; | 3195 | goto out_err_stop_timer; |
3155 | } | 3196 | } |
3156 | 3197 | ||
@@ -3158,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3158 | stat_file_read_proc, | 3199 | stat_file_read_proc, |
3159 | new_smi); | 3200 | new_smi); |
3160 | if (rv) { | 3201 | if (rv) { |
3161 | printk(KERN_ERR | 3202 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
3162 | "ipmi_si: Unable to create proc entry: %d\n", | ||
3163 | rv); | ||
3164 | goto out_err_stop_timer; | 3203 | goto out_err_stop_timer; |
3165 | } | 3204 | } |
3166 | 3205 | ||
@@ -3168,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3168 | param_read_proc, | 3207 | param_read_proc, |
3169 | new_smi); | 3208 | new_smi); |
3170 | if (rv) { | 3209 | if (rv) { |
3171 | printk(KERN_ERR | 3210 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
3172 | "ipmi_si: Unable to create proc entry: %d\n", | ||
3173 | rv); | ||
3174 | goto out_err_stop_timer; | 3211 | goto out_err_stop_timer; |
3175 | } | 3212 | } |
3176 | 3213 | ||
3177 | list_add_tail(&new_smi->link, &smi_infos); | 3214 | dev_info(new_smi->dev, "IPMI %s interface initialized\n", |
3178 | 3215 | si_to_str[new_smi->si_type]); | |
3179 | mutex_unlock(&smi_infos_lock); | ||
3180 | |||
3181 | printk(KERN_INFO "IPMI %s interface initialized\n", | ||
3182 | si_to_str[new_smi->si_type]); | ||
3183 | 3216 | ||
3184 | return 0; | 3217 | return 0; |
3185 | 3218 | ||
@@ -3188,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3188 | wait_for_timer_and_thread(new_smi); | 3221 | wait_for_timer_and_thread(new_smi); |
3189 | 3222 | ||
3190 | out_err: | 3223 | out_err: |
3191 | if (new_smi->intf) | 3224 | new_smi->interrupt_disabled = 1; |
3225 | |||
3226 | if (new_smi->intf) { | ||
3192 | ipmi_unregister_smi(new_smi->intf); | 3227 | ipmi_unregister_smi(new_smi->intf); |
3228 | new_smi->intf = NULL; | ||
3229 | } | ||
3193 | 3230 | ||
3194 | if (new_smi->irq_cleanup) | 3231 | if (new_smi->irq_cleanup) { |
3195 | new_smi->irq_cleanup(new_smi); | 3232 | new_smi->irq_cleanup(new_smi); |
3233 | new_smi->irq_cleanup = NULL; | ||
3234 | } | ||
3196 | 3235 | ||
3197 | /* | 3236 | /* |
3198 | * Wait until we know that we are out of any interrupt | 3237 | * Wait until we know that we are out of any interrupt |
@@ -3205,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3205 | if (new_smi->handlers) | 3244 | if (new_smi->handlers) |
3206 | new_smi->handlers->cleanup(new_smi->si_sm); | 3245 | new_smi->handlers->cleanup(new_smi->si_sm); |
3207 | kfree(new_smi->si_sm); | 3246 | kfree(new_smi->si_sm); |
3247 | new_smi->si_sm = NULL; | ||
3208 | } | 3248 | } |
3209 | if (new_smi->addr_source_cleanup) | 3249 | if (new_smi->addr_source_cleanup) { |
3210 | new_smi->addr_source_cleanup(new_smi); | 3250 | new_smi->addr_source_cleanup(new_smi); |
3211 | if (new_smi->io_cleanup) | 3251 | new_smi->addr_source_cleanup = NULL; |
3252 | } | ||
3253 | if (new_smi->io_cleanup) { | ||
3212 | new_smi->io_cleanup(new_smi); | 3254 | new_smi->io_cleanup(new_smi); |
3255 | new_smi->io_cleanup = NULL; | ||
3256 | } | ||
3213 | 3257 | ||
3214 | if (new_smi->dev_registered) | 3258 | if (new_smi->dev_registered) { |
3215 | platform_device_unregister(new_smi->pdev); | 3259 | platform_device_unregister(new_smi->pdev); |
3216 | 3260 | new_smi->dev_registered = 0; | |
3217 | kfree(new_smi); | 3261 | } |
3218 | |||
3219 | mutex_unlock(&smi_infos_lock); | ||
3220 | 3262 | ||
3221 | return rv; | 3263 | return rv; |
3222 | } | 3264 | } |
@@ -3226,6 +3268,8 @@ static __devinit int init_ipmi_si(void) | |||
3226 | int i; | 3268 | int i; |
3227 | char *str; | 3269 | char *str; |
3228 | int rv; | 3270 | int rv; |
3271 | struct smi_info *e; | ||
3272 | enum ipmi_addr_src type = SI_INVALID; | ||
3229 | 3273 | ||
3230 | if (initialized) | 3274 | if (initialized) |
3231 | return 0; | 3275 | return 0; |
@@ -3234,9 +3278,7 @@ static __devinit int init_ipmi_si(void) | |||
3234 | /* Register the device drivers. */ | 3278 | /* Register the device drivers. */ |
3235 | rv = driver_register(&ipmi_driver.driver); | 3279 | rv = driver_register(&ipmi_driver.driver); |
3236 | if (rv) { | 3280 | if (rv) { |
3237 | printk(KERN_ERR | 3281 | printk(KERN_ERR PFX "Unable to register driver: %d\n", rv); |
3238 | "init_ipmi_si: Unable to register driver: %d\n", | ||
3239 | rv); | ||
3240 | return rv; | 3282 | return rv; |
3241 | } | 3283 | } |
3242 | 3284 | ||
@@ -3260,38 +3302,81 @@ static __devinit int init_ipmi_si(void) | |||
3260 | 3302 | ||
3261 | hardcode_find_bmc(); | 3303 | hardcode_find_bmc(); |
3262 | 3304 | ||
3263 | #ifdef CONFIG_DMI | 3305 | /* If the user gave us a device, they presumably want us to use it */ |
3264 | dmi_find_bmc(); | 3306 | mutex_lock(&smi_infos_lock); |
3265 | #endif | 3307 | if (!list_empty(&smi_infos)) { |
3308 | mutex_unlock(&smi_infos_lock); | ||
3309 | return 0; | ||
3310 | } | ||
3311 | mutex_unlock(&smi_infos_lock); | ||
3266 | 3312 | ||
3267 | #ifdef CONFIG_ACPI | 3313 | #ifdef CONFIG_PCI |
3268 | spmi_find_bmc(); | 3314 | rv = pci_register_driver(&ipmi_pci_driver); |
3315 | if (rv) | ||
3316 | printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); | ||
3269 | #endif | 3317 | #endif |
3318 | |||
3270 | #ifdef CONFIG_ACPI | 3319 | #ifdef CONFIG_ACPI |
3271 | pnp_register_driver(&ipmi_pnp_driver); | 3320 | pnp_register_driver(&ipmi_pnp_driver); |
3272 | #endif | 3321 | #endif |
3273 | 3322 | ||
3274 | #ifdef CONFIG_PCI | 3323 | #ifdef CONFIG_DMI |
3275 | rv = pci_register_driver(&ipmi_pci_driver); | 3324 | dmi_find_bmc(); |
3276 | if (rv) | 3325 | #endif |
3277 | printk(KERN_ERR | 3326 | |
3278 | "init_ipmi_si: Unable to register PCI driver: %d\n", | 3327 | #ifdef CONFIG_ACPI |
3279 | rv); | 3328 | spmi_find_bmc(); |
3280 | #endif | 3329 | #endif |
3281 | 3330 | ||
3282 | #ifdef CONFIG_PPC_OF | 3331 | #ifdef CONFIG_PPC_OF |
3283 | of_register_platform_driver(&ipmi_of_platform_driver); | 3332 | of_register_platform_driver(&ipmi_of_platform_driver); |
3284 | #endif | 3333 | #endif |
3285 | 3334 | ||
3335 | /* We prefer devices with interrupts, but in the case of a machine | ||
3336 | with multiple BMCs we assume that there will be several instances | ||
3337 | of a given type so if we succeed in registering a type then also | ||
3338 | try to register everything else of the same type */ | ||
3339 | |||
3340 | mutex_lock(&smi_infos_lock); | ||
3341 | list_for_each_entry(e, &smi_infos, link) { | ||
3342 | /* Try to register a device if it has an IRQ and we either | ||
3343 | haven't successfully registered a device yet or this | ||
3344 | device has the same type as one we successfully registered */ | ||
3345 | if (e->irq && (!type || e->addr_source == type)) { | ||
3346 | if (!try_smi_init(e)) { | ||
3347 | type = e->addr_source; | ||
3348 | } | ||
3349 | } | ||
3350 | } | ||
3351 | |||
3352 | /* type will only have been set if we successfully registered an si */ | ||
3353 | if (type) { | ||
3354 | mutex_unlock(&smi_infos_lock); | ||
3355 | return 0; | ||
3356 | } | ||
3357 | |||
3358 | /* Fall back to the preferred device */ | ||
3359 | |||
3360 | list_for_each_entry(e, &smi_infos, link) { | ||
3361 | if (!e->irq && (!type || e->addr_source == type)) { | ||
3362 | if (!try_smi_init(e)) { | ||
3363 | type = e->addr_source; | ||
3364 | } | ||
3365 | } | ||
3366 | } | ||
3367 | mutex_unlock(&smi_infos_lock); | ||
3368 | |||
3369 | if (type) | ||
3370 | return 0; | ||
3371 | |||
3286 | if (si_trydefaults) { | 3372 | if (si_trydefaults) { |
3287 | mutex_lock(&smi_infos_lock); | 3373 | mutex_lock(&smi_infos_lock); |
3288 | if (list_empty(&smi_infos)) { | 3374 | if (list_empty(&smi_infos)) { |
3289 | /* No BMC was found, try defaults. */ | 3375 | /* No BMC was found, try defaults. */ |
3290 | mutex_unlock(&smi_infos_lock); | 3376 | mutex_unlock(&smi_infos_lock); |
3291 | default_find_bmc(); | 3377 | default_find_bmc(); |
3292 | } else { | 3378 | } else |
3293 | mutex_unlock(&smi_infos_lock); | 3379 | mutex_unlock(&smi_infos_lock); |
3294 | } | ||
3295 | } | 3380 | } |
3296 | 3381 | ||
3297 | mutex_lock(&smi_infos_lock); | 3382 | mutex_lock(&smi_infos_lock); |
@@ -3305,8 +3390,8 @@ static __devinit int init_ipmi_si(void) | |||
3305 | of_unregister_platform_driver(&ipmi_of_platform_driver); | 3390 | of_unregister_platform_driver(&ipmi_of_platform_driver); |
3306 | #endif | 3391 | #endif |
3307 | driver_unregister(&ipmi_driver.driver); | 3392 | driver_unregister(&ipmi_driver.driver); |
3308 | printk(KERN_WARNING | 3393 | printk(KERN_WARNING PFX |
3309 | "ipmi_si: Unable to find any System Interface(s)\n"); | 3394 | "Unable to find any System Interface(s)\n"); |
3310 | return -ENODEV; | 3395 | return -ENODEV; |
3311 | } else { | 3396 | } else { |
3312 | mutex_unlock(&smi_infos_lock); | 3397 | mutex_unlock(&smi_infos_lock); |
@@ -3317,7 +3402,7 @@ module_init(init_ipmi_si); | |||
3317 | 3402 | ||
3318 | static void cleanup_one_si(struct smi_info *to_clean) | 3403 | static void cleanup_one_si(struct smi_info *to_clean) |
3319 | { | 3404 | { |
3320 | int rv; | 3405 | int rv = 0; |
3321 | unsigned long flags; | 3406 | unsigned long flags; |
3322 | 3407 | ||
3323 | if (!to_clean) | 3408 | if (!to_clean) |
@@ -3361,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
3361 | schedule_timeout_uninterruptible(1); | 3446 | schedule_timeout_uninterruptible(1); |
3362 | } | 3447 | } |
3363 | 3448 | ||
3364 | rv = ipmi_unregister_smi(to_clean->intf); | 3449 | if (to_clean->intf) |
3450 | rv = ipmi_unregister_smi(to_clean->intf); | ||
3451 | |||
3365 | if (rv) { | 3452 | if (rv) { |
3366 | printk(KERN_ERR | 3453 | printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", |
3367 | "ipmi_si: Unable to unregister device: errno=%d\n", | ||
3368 | rv); | 3454 | rv); |
3369 | } | 3455 | } |
3370 | 3456 | ||
3371 | to_clean->handlers->cleanup(to_clean->si_sm); | 3457 | if (to_clean->handlers) |
3458 | to_clean->handlers->cleanup(to_clean->si_sm); | ||
3372 | 3459 | ||
3373 | kfree(to_clean->si_sm); | 3460 | kfree(to_clean->si_sm); |
3374 | 3461 | ||
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index a4d57e31f713..82bcdb262a3a 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -659,7 +659,7 @@ static struct watchdog_info ident = { | |||
659 | .identity = "IPMI" | 659 | .identity = "IPMI" |
660 | }; | 660 | }; |
661 | 661 | ||
662 | static int ipmi_ioctl(struct inode *inode, struct file *file, | 662 | static int ipmi_ioctl(struct file *file, |
663 | unsigned int cmd, unsigned long arg) | 663 | unsigned int cmd, unsigned long arg) |
664 | { | 664 | { |
665 | void __user *argp = (void __user *)arg; | 665 | void __user *argp = (void __user *)arg; |
@@ -730,6 +730,19 @@ static int ipmi_ioctl(struct inode *inode, struct file *file, | |||
730 | } | 730 | } |
731 | } | 731 | } |
732 | 732 | ||
733 | static long ipmi_unlocked_ioctl(struct file *file, | ||
734 | unsigned int cmd, | ||
735 | unsigned long arg) | ||
736 | { | ||
737 | int ret; | ||
738 | |||
739 | lock_kernel(); | ||
740 | ret = ipmi_ioctl(file, cmd, arg); | ||
741 | unlock_kernel(); | ||
742 | |||
743 | return ret; | ||
744 | } | ||
745 | |||
733 | static ssize_t ipmi_write(struct file *file, | 746 | static ssize_t ipmi_write(struct file *file, |
734 | const char __user *buf, | 747 | const char __user *buf, |
735 | size_t len, | 748 | size_t len, |
@@ -880,7 +893,7 @@ static const struct file_operations ipmi_wdog_fops = { | |||
880 | .read = ipmi_read, | 893 | .read = ipmi_read, |
881 | .poll = ipmi_poll, | 894 | .poll = ipmi_poll, |
882 | .write = ipmi_write, | 895 | .write = ipmi_write, |
883 | .ioctl = ipmi_ioctl, | 896 | .unlocked_ioctl = ipmi_unlocked_ioctl, |
884 | .open = ipmi_open, | 897 | .open = ipmi_open, |
885 | .release = ipmi_close, | 898 | .release = ipmi_close, |
886 | .fasync = ipmi_fasync, | 899 | .fasync = ipmi_fasync, |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index c1ab303455cf..98310e1aae30 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -1573,11 +1573,16 @@ static int __devinit isicom_probe(struct pci_dev *pdev, | |||
1573 | dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); | 1573 | dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); |
1574 | 1574 | ||
1575 | /* allot the first empty slot in the array */ | 1575 | /* allot the first empty slot in the array */ |
1576 | for (index = 0; index < BOARD_COUNT; index++) | 1576 | for (index = 0; index < BOARD_COUNT; index++) { |
1577 | if (isi_card[index].base == 0) { | 1577 | if (isi_card[index].base == 0) { |
1578 | board = &isi_card[index]; | 1578 | board = &isi_card[index]; |
1579 | break; | 1579 | break; |
1580 | } | 1580 | } |
1581 | } | ||
1582 | if (index == BOARD_COUNT) { | ||
1583 | retval = -ENODEV; | ||
1584 | goto err_disable; | ||
1585 | } | ||
1581 | 1586 | ||
1582 | board->index = index; | 1587 | board->index = index; |
1583 | board->base = pci_resource_start(pdev, 3); | 1588 | board->base = pci_resource_start(pdev, 3); |
@@ -1624,6 +1629,7 @@ errunrr: | |||
1624 | errdec: | 1629 | errdec: |
1625 | board->base = 0; | 1630 | board->base = 0; |
1626 | card_count--; | 1631 | card_count--; |
1632 | err_disable: | ||
1627 | pci_disable_device(pdev); | 1633 | pci_disable_device(pdev); |
1628 | err: | 1634 | err: |
1629 | return retval; | 1635 | return retval; |
diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 92ab03d28294..cd650ca8c679 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c | |||
@@ -144,6 +144,7 @@ static int misc_open(struct inode * inode, struct file * file) | |||
144 | old_fops = file->f_op; | 144 | old_fops = file->f_op; |
145 | file->f_op = new_fops; | 145 | file->f_op = new_fops; |
146 | if (file->f_op->open) { | 146 | if (file->f_op->open) { |
147 | file->private_data = c; | ||
147 | err=file->f_op->open(inode,file); | 148 | err=file->f_op->open(inode,file); |
148 | if (err) { | 149 | if (err) { |
149 | fops_put(file->f_op); | 150 | fops_put(file->f_op); |
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c new file mode 100644 index 000000000000..c4161d5e053d --- /dev/null +++ b/drivers/char/n_gsm.c | |||
@@ -0,0 +1,2763 @@ | |||
1 | /* | ||
2 | * n_gsm.c GSM 0710 tty multiplexor | ||
3 | * Copyright (c) 2009/10 Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | * | ||
18 | * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE * | ||
19 | * | ||
20 | * TO DO: | ||
21 | * Mostly done: ioctls for setting modes/timing | ||
22 | * Partly done: hooks so you can pull off frames to non tty devs | ||
23 | * Restart DLCI 0 when it closes ? | ||
24 | * Test basic encoding | ||
25 | * Improve the tx engine | ||
26 | * Resolve tx side locking by adding a queue_head and routing | ||
27 | * all control traffic via it | ||
28 | * General tidy/document | ||
29 | * Review the locking/move to refcounts more (mux now moved to an | ||
30 | * alloc/free model ready) | ||
31 | * Use newest tty open/close port helpers and install hooks | ||
32 | * What to do about power functions ? | ||
33 | * Termios setting and negotiation | ||
34 | * Do we need a 'which mux are you' ioctl to correlate mux and tty sets | ||
35 | * | ||
36 | */ | ||
37 | |||
38 | #include <linux/types.h> | ||
39 | #include <linux/major.h> | ||
40 | #include <linux/errno.h> | ||
41 | #include <linux/signal.h> | ||
42 | #include <linux/fcntl.h> | ||
43 | #include <linux/sched.h> | ||
44 | #include <linux/interrupt.h> | ||
45 | #include <linux/tty.h> | ||
46 | #include <linux/timer.h> | ||
47 | #include <linux/ctype.h> | ||
48 | #include <linux/mm.h> | ||
49 | #include <linux/string.h> | ||
50 | #include <linux/slab.h> | ||
51 | #include <linux/poll.h> | ||
52 | #include <linux/bitops.h> | ||
53 | #include <linux/file.h> | ||
54 | #include <linux/uaccess.h> | ||
55 | #include <linux/module.h> | ||
56 | #include <linux/timer.h> | ||
57 | #include <linux/tty_flip.h> | ||
58 | #include <linux/tty_driver.h> | ||
59 | #include <linux/serial.h> | ||
60 | #include <linux/kfifo.h> | ||
61 | #include <linux/skbuff.h> | ||
62 | #include <linux/gsmmux.h> | ||
63 | |||
64 | static int debug; | ||
65 | module_param(debug, int, 0600); | ||
66 | |||
67 | #define T1 (HZ/10) | ||
68 | #define T2 (HZ/3) | ||
69 | #define N2 3 | ||
70 | |||
71 | /* Use long timers for testing at low speed with debug on */ | ||
72 | #ifdef DEBUG_TIMING | ||
73 | #define T1 HZ | ||
74 | #define T2 (2 * HZ) | ||
75 | #endif | ||
76 | |||
77 | /* Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte | ||
78 | limits so this is plenty */ | ||
79 | #define MAX_MRU 512 | ||
80 | #define MAX_MTU 512 | ||
81 | |||
82 | /* | ||
83 | * Each block of data we have queued to go out is in the form of | ||
84 | * a gsm_msg which holds everything we need in a link layer independant | ||
85 | * format | ||
86 | */ | ||
87 | |||
88 | struct gsm_msg { | ||
89 | struct gsm_msg *next; | ||
90 | u8 addr; /* DLCI address + flags */ | ||
91 | u8 ctrl; /* Control byte + flags */ | ||
92 | unsigned int len; /* Length of data block (can be zero) */ | ||
93 | unsigned char *data; /* Points into buffer but not at the start */ | ||
94 | unsigned char buffer[0]; | ||
95 | }; | ||
96 | |||
97 | /* | ||
98 | * Each active data link has a gsm_dlci structure associated which ties | ||
99 | * the link layer to an optional tty (if the tty side is open). To avoid | ||
100 | * complexity right now these are only ever freed up when the mux is | ||
101 | * shut down. | ||
102 | * | ||
103 | * At the moment we don't free DLCI objects until the mux is torn down | ||
104 | * this avoid object life time issues but might be worth review later. | ||
105 | */ | ||
106 | |||
107 | struct gsm_dlci { | ||
108 | struct gsm_mux *gsm; | ||
109 | int addr; | ||
110 | int state; | ||
111 | #define DLCI_CLOSED 0 | ||
112 | #define DLCI_OPENING 1 /* Sending SABM not seen UA */ | ||
113 | #define DLCI_OPEN 2 /* SABM/UA complete */ | ||
114 | #define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */ | ||
115 | |||
116 | /* Link layer */ | ||
117 | spinlock_t lock; /* Protects the internal state */ | ||
118 | struct timer_list t1; /* Retransmit timer for SABM and UA */ | ||
119 | int retries; | ||
120 | /* Uplink tty if active */ | ||
121 | struct tty_port port; /* The tty bound to this DLCI if there is one */ | ||
122 | struct kfifo *fifo; /* Queue fifo for the DLCI */ | ||
123 | struct kfifo _fifo; /* For new fifo API porting only */ | ||
124 | int adaption; /* Adaption layer in use */ | ||
125 | u32 modem_rx; /* Our incoming virtual modem lines */ | ||
126 | u32 modem_tx; /* Our outgoing modem lines */ | ||
127 | int dead; /* Refuse re-open */ | ||
128 | /* Flow control */ | ||
129 | int throttled; /* Private copy of throttle state */ | ||
130 | int constipated; /* Throttle status for outgoing */ | ||
131 | /* Packetised I/O */ | ||
132 | struct sk_buff *skb; /* Frame being sent */ | ||
133 | struct sk_buff_head skb_list; /* Queued frames */ | ||
134 | /* Data handling callback */ | ||
135 | void (*data)(struct gsm_dlci *dlci, u8 *data, int len); | ||
136 | }; | ||
137 | |||
138 | /* DLCI 0, 62/63 are special or reseved see gsmtty_open */ | ||
139 | |||
140 | #define NUM_DLCI 64 | ||
141 | |||
142 | /* | ||
143 | * DLCI 0 is used to pass control blocks out of band of the data | ||
144 | * flow (and with a higher link priority). One command can be outstanding | ||
145 | * at a time and we use this structure to manage them. They are created | ||
146 | * and destroyed by the user context, and updated by the receive paths | ||
147 | * and timers | ||
148 | */ | ||
149 | |||
150 | struct gsm_control { | ||
151 | u8 cmd; /* Command we are issuing */ | ||
152 | u8 *data; /* Data for the command in case we retransmit */ | ||
153 | int len; /* Length of block for retransmission */ | ||
154 | int done; /* Done flag */ | ||
155 | int error; /* Error if any */ | ||
156 | }; | ||
157 | |||
158 | /* | ||
159 | * Each GSM mux we have is represented by this structure. If we are | ||
160 | * operating as an ldisc then we use this structure as our ldisc | ||
161 | * state. We need to sort out lifetimes and locking with respect | ||
162 | * to the gsm mux array. For now we don't free DLCI objects that | ||
163 | * have been instantiated until the mux itself is terminated. | ||
164 | * | ||
165 | * To consider further: tty open versus mux shutdown. | ||
166 | */ | ||
167 | |||
168 | struct gsm_mux { | ||
169 | struct tty_struct *tty; /* The tty our ldisc is bound to */ | ||
170 | spinlock_t lock; | ||
171 | |||
172 | /* Events on the GSM channel */ | ||
173 | wait_queue_head_t event; | ||
174 | |||
175 | /* Bits for GSM mode decoding */ | ||
176 | |||
177 | /* Framing Layer */ | ||
178 | unsigned char *buf; | ||
179 | int state; | ||
180 | #define GSM_SEARCH 0 | ||
181 | #define GSM_START 1 | ||
182 | #define GSM_ADDRESS 2 | ||
183 | #define GSM_CONTROL 3 | ||
184 | #define GSM_LEN 4 | ||
185 | #define GSM_DATA 5 | ||
186 | #define GSM_FCS 6 | ||
187 | #define GSM_OVERRUN 7 | ||
188 | unsigned int len; | ||
189 | unsigned int address; | ||
190 | unsigned int count; | ||
191 | int escape; | ||
192 | int encoding; | ||
193 | u8 control; | ||
194 | u8 fcs; | ||
195 | u8 *txframe; /* TX framing buffer */ | ||
196 | |||
197 | /* Methods for the receiver side */ | ||
198 | void (*receive)(struct gsm_mux *gsm, u8 ch); | ||
199 | void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag); | ||
200 | /* And transmit side */ | ||
201 | int (*output)(struct gsm_mux *mux, u8 *data, int len); | ||
202 | |||
203 | /* Link Layer */ | ||
204 | unsigned int mru; | ||
205 | unsigned int mtu; | ||
206 | int initiator; /* Did we initiate connection */ | ||
207 | int dead; /* Has the mux been shut down */ | ||
208 | struct gsm_dlci *dlci[NUM_DLCI]; | ||
209 | int constipated; /* Asked by remote to shut up */ | ||
210 | |||
211 | spinlock_t tx_lock; | ||
212 | unsigned int tx_bytes; /* TX data outstanding */ | ||
213 | #define TX_THRESH_HI 8192 | ||
214 | #define TX_THRESH_LO 2048 | ||
215 | struct gsm_msg *tx_head; /* Pending data packets */ | ||
216 | struct gsm_msg *tx_tail; | ||
217 | |||
218 | /* Control messages */ | ||
219 | struct timer_list t2_timer; /* Retransmit timer for commands */ | ||
220 | int cretries; /* Command retry counter */ | ||
221 | struct gsm_control *pending_cmd;/* Our current pending command */ | ||
222 | spinlock_t control_lock; /* Protects the pending command */ | ||
223 | |||
224 | /* Configuration */ | ||
225 | int adaption; /* 1 or 2 supported */ | ||
226 | u8 ftype; /* UI or UIH */ | ||
227 | int t1, t2; /* Timers in 1/100th of a sec */ | ||
228 | int n2; /* Retry count */ | ||
229 | |||
230 | /* Statistics (not currently exposed) */ | ||
231 | unsigned long bad_fcs; | ||
232 | unsigned long malformed; | ||
233 | unsigned long io_error; | ||
234 | unsigned long bad_size; | ||
235 | unsigned long unsupported; | ||
236 | }; | ||
237 | |||
238 | |||
239 | /* | ||
240 | * Mux objects - needed so that we can translate a tty index into the | ||
241 | * relevant mux and DLCI. | ||
242 | */ | ||
243 | |||
244 | #define MAX_MUX 4 /* 256 minors */ | ||
245 | static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */ | ||
246 | static spinlock_t gsm_mux_lock; | ||
247 | |||
248 | /* | ||
249 | * This section of the driver logic implements the GSM encodings | ||
250 | * both the basic and the 'advanced'. Reliable transport is not | ||
251 | * supported. | ||
252 | */ | ||
253 | |||
254 | #define CR 0x02 | ||
255 | #define EA 0x01 | ||
256 | #define PF 0x10 | ||
257 | |||
258 | /* I is special: the rest are ..*/ | ||
259 | #define RR 0x01 | ||
260 | #define UI 0x03 | ||
261 | #define RNR 0x05 | ||
262 | #define REJ 0x09 | ||
263 | #define DM 0x0F | ||
264 | #define SABM 0x2F | ||
265 | #define DISC 0x43 | ||
266 | #define UA 0x63 | ||
267 | #define UIH 0xEF | ||
268 | |||
269 | /* Channel commands */ | ||
270 | #define CMD_NSC 0x09 | ||
271 | #define CMD_TEST 0x11 | ||
272 | #define CMD_PSC 0x21 | ||
273 | #define CMD_RLS 0x29 | ||
274 | #define CMD_FCOFF 0x31 | ||
275 | #define CMD_PN 0x41 | ||
276 | #define CMD_RPN 0x49 | ||
277 | #define CMD_FCON 0x51 | ||
278 | #define CMD_CLD 0x61 | ||
279 | #define CMD_SNC 0x69 | ||
280 | #define CMD_MSC 0x71 | ||
281 | |||
282 | /* Virtual modem bits */ | ||
283 | #define MDM_FC 0x01 | ||
284 | #define MDM_RTC 0x02 | ||
285 | #define MDM_RTR 0x04 | ||
286 | #define MDM_IC 0x20 | ||
287 | #define MDM_DV 0x40 | ||
288 | |||
289 | #define GSM0_SOF 0xF9 | ||
290 | #define GSM1_SOF 0x7E | ||
291 | #define GSM1_ESCAPE 0x7D | ||
292 | #define GSM1_ESCAPE_BITS 0x20 | ||
293 | #define XON 0x11 | ||
294 | #define XOFF 0x13 | ||
295 | |||
296 | static const struct tty_port_operations gsm_port_ops; | ||
297 | |||
298 | /* | ||
299 | * CRC table for GSM 0710 | ||
300 | */ | ||
301 | |||
302 | static const u8 gsm_fcs8[256] = { | ||
303 | 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75, | ||
304 | 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B, | ||
305 | 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69, | ||
306 | 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67, | ||
307 | 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D, | ||
308 | 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43, | ||
309 | 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51, | ||
310 | 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F, | ||
311 | 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05, | ||
312 | 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B, | ||
313 | 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19, | ||
314 | 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17, | ||
315 | 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D, | ||
316 | 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33, | ||
317 | 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21, | ||
318 | 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F, | ||
319 | 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95, | ||
320 | 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B, | ||
321 | 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89, | ||
322 | 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87, | ||
323 | 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD, | ||
324 | 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3, | ||
325 | 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1, | ||
326 | 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF, | ||
327 | 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5, | ||
328 | 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB, | ||
329 | 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9, | ||
330 | 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7, | ||
331 | 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD, | ||
332 | 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3, | ||
333 | 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1, | ||
334 | 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF | ||
335 | }; | ||
336 | |||
337 | #define INIT_FCS 0xFF | ||
338 | #define GOOD_FCS 0xCF | ||
339 | |||
340 | /** | ||
341 | * gsm_fcs_add - update FCS | ||
342 | * @fcs: Current FCS | ||
343 | * @c: Next data | ||
344 | * | ||
345 | * Update the FCS to include c. Uses the algorithm in the specification | ||
346 | * notes. | ||
347 | */ | ||
348 | |||
349 | static inline u8 gsm_fcs_add(u8 fcs, u8 c) | ||
350 | { | ||
351 | return gsm_fcs8[fcs ^ c]; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * gsm_fcs_add_block - update FCS for a block | ||
356 | * @fcs: Current FCS | ||
357 | * @c: buffer of data | ||
358 | * @len: length of buffer | ||
359 | * | ||
360 | * Update the FCS to include c. Uses the algorithm in the specification | ||
361 | * notes. | ||
362 | */ | ||
363 | |||
364 | static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len) | ||
365 | { | ||
366 | while (len--) | ||
367 | fcs = gsm_fcs8[fcs ^ *c++]; | ||
368 | return fcs; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * gsm_read_ea - read a byte into an EA | ||
373 | * @val: variable holding value | ||
374 | * c: byte going into the EA | ||
375 | * | ||
376 | * Processes one byte of an EA. Updates the passed variable | ||
377 | * and returns 1 if the EA is now completely read | ||
378 | */ | ||
379 | |||
380 | static int gsm_read_ea(unsigned int *val, u8 c) | ||
381 | { | ||
382 | /* Add the next 7 bits into the value */ | ||
383 | *val <<= 7; | ||
384 | *val |= c >> 1; | ||
385 | /* Was this the last byte of the EA 1 = yes*/ | ||
386 | return c & EA; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * gsm_encode_modem - encode modem data bits | ||
391 | * @dlci: DLCI to encode from | ||
392 | * | ||
393 | * Returns the correct GSM encoded modem status bits (6 bit field) for | ||
394 | * the current status of the DLCI and attached tty object | ||
395 | */ | ||
396 | |||
397 | static u8 gsm_encode_modem(const struct gsm_dlci *dlci) | ||
398 | { | ||
399 | u8 modembits = 0; | ||
400 | /* FC is true flow control not modem bits */ | ||
401 | if (dlci->throttled) | ||
402 | modembits |= MDM_FC; | ||
403 | if (dlci->modem_tx & TIOCM_DTR) | ||
404 | modembits |= MDM_RTC; | ||
405 | if (dlci->modem_tx & TIOCM_RTS) | ||
406 | modembits |= MDM_RTR; | ||
407 | if (dlci->modem_tx & TIOCM_RI) | ||
408 | modembits |= MDM_IC; | ||
409 | if (dlci->modem_tx & TIOCM_CD) | ||
410 | modembits |= MDM_DV; | ||
411 | return modembits; | ||
412 | } | ||
413 | |||
414 | /** | ||
415 | * gsm_print_packet - display a frame for debug | ||
416 | * @hdr: header to print before decode | ||
417 | * @addr: address EA from the frame | ||
418 | * @cr: C/R bit from the frame | ||
419 | * @control: control including PF bit | ||
420 | * @data: following data bytes | ||
421 | * @dlen: length of data | ||
422 | * | ||
423 | * Displays a packet in human readable format for debugging purposes. The | ||
424 | * style is based on amateur radio LAP-B dump display. | ||
425 | */ | ||
426 | |||
427 | static void gsm_print_packet(const char *hdr, int addr, int cr, | ||
428 | u8 control, const u8 *data, int dlen) | ||
429 | { | ||
430 | if (!(debug & 1)) | ||
431 | return; | ||
432 | |||
433 | printk(KERN_INFO "%s %d) %c: ", hdr, addr, "RC"[cr]); | ||
434 | |||
435 | switch (control & ~PF) { | ||
436 | case SABM: | ||
437 | printk(KERN_CONT "SABM"); | ||
438 | break; | ||
439 | case UA: | ||
440 | printk(KERN_CONT "UA"); | ||
441 | break; | ||
442 | case DISC: | ||
443 | printk(KERN_CONT "DISC"); | ||
444 | break; | ||
445 | case DM: | ||
446 | printk(KERN_CONT "DM"); | ||
447 | break; | ||
448 | case UI: | ||
449 | printk(KERN_CONT "UI"); | ||
450 | break; | ||
451 | case UIH: | ||
452 | printk(KERN_CONT "UIH"); | ||
453 | break; | ||
454 | default: | ||
455 | if (!(control & 0x01)) { | ||
456 | printk(KERN_CONT "I N(S)%d N(R)%d", | ||
457 | (control & 0x0E) >> 1, (control & 0xE)>> 5); | ||
458 | } else switch (control & 0x0F) { | ||
459 | case RR: | ||
460 | printk("RR(%d)", (control & 0xE0) >> 5); | ||
461 | break; | ||
462 | case RNR: | ||
463 | printk("RNR(%d)", (control & 0xE0) >> 5); | ||
464 | break; | ||
465 | case REJ: | ||
466 | printk("REJ(%d)", (control & 0xE0) >> 5); | ||
467 | break; | ||
468 | default: | ||
469 | printk(KERN_CONT "[%02X]", control); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | if (control & PF) | ||
474 | printk(KERN_CONT "(P)"); | ||
475 | else | ||
476 | printk(KERN_CONT "(F)"); | ||
477 | |||
478 | if (dlen) { | ||
479 | int ct = 0; | ||
480 | while (dlen--) { | ||
481 | if (ct % 8 == 0) | ||
482 | printk(KERN_CONT "\n "); | ||
483 | printk(KERN_CONT "%02X ", *data++); | ||
484 | ct++; | ||
485 | } | ||
486 | } | ||
487 | printk(KERN_CONT "\n"); | ||
488 | } | ||
489 | |||
490 | |||
491 | /* | ||
492 | * Link level transmission side | ||
493 | */ | ||
494 | |||
495 | /** | ||
496 | * gsm_stuff_packet - bytestuff a packet | ||
497 | * @ibuf: input | ||
498 | * @obuf: output | ||
499 | * @len: length of input | ||
500 | * | ||
501 | * Expand a buffer by bytestuffing it. The worst case size change | ||
502 | * is doubling and the caller is responsible for handing out | ||
503 | * suitable sized buffers. | ||
504 | */ | ||
505 | |||
506 | static int gsm_stuff_frame(const u8 *input, u8 *output, int len) | ||
507 | { | ||
508 | int olen = 0; | ||
509 | while (len--) { | ||
510 | if (*input == GSM1_SOF || *input == GSM1_ESCAPE | ||
511 | || *input == XON || *input == XOFF) { | ||
512 | *output++ = GSM1_ESCAPE; | ||
513 | *output++ = *input++ ^ GSM1_ESCAPE_BITS; | ||
514 | olen++; | ||
515 | } else | ||
516 | *output++ = *input++; | ||
517 | olen++; | ||
518 | } | ||
519 | return olen; | ||
520 | } | ||
521 | |||
522 | static void hex_packet(const unsigned char *p, int len) | ||
523 | { | ||
524 | int i; | ||
525 | for (i = 0; i < len; i++) { | ||
526 | if (i && (i % 16) == 0) | ||
527 | printk("\n"); | ||
528 | printk("%02X ", *p++); | ||
529 | } | ||
530 | printk("\n"); | ||
531 | } | ||
532 | |||
533 | /** | ||
534 | * gsm_send - send a control frame | ||
535 | * @gsm: our GSM mux | ||
536 | * @addr: address for control frame | ||
537 | * @cr: command/response bit | ||
538 | * @control: control byte including PF bit | ||
539 | * | ||
540 | * Format up and transmit a control frame. These do not go via the | ||
541 | * queueing logic as they should be transmitted ahead of data when | ||
542 | * they are needed. | ||
543 | * | ||
544 | * FIXME: Lock versus data TX path | ||
545 | */ | ||
546 | |||
547 | static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) | ||
548 | { | ||
549 | int len; | ||
550 | u8 cbuf[10]; | ||
551 | u8 ibuf[3]; | ||
552 | |||
553 | switch (gsm->encoding) { | ||
554 | case 0: | ||
555 | cbuf[0] = GSM0_SOF; | ||
556 | cbuf[1] = (addr << 2) | (cr << 1) | EA; | ||
557 | cbuf[2] = control; | ||
558 | cbuf[3] = EA; /* Length of data = 0 */ | ||
559 | cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3); | ||
560 | cbuf[5] = GSM0_SOF; | ||
561 | len = 6; | ||
562 | break; | ||
563 | case 1: | ||
564 | case 2: | ||
565 | /* Control frame + packing (but not frame stuffing) in mode 1 */ | ||
566 | ibuf[0] = (addr << 2) | (cr << 1) | EA; | ||
567 | ibuf[1] = control; | ||
568 | ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2); | ||
569 | /* Stuffing may double the size worst case */ | ||
570 | len = gsm_stuff_frame(ibuf, cbuf + 1, 3); | ||
571 | /* Now add the SOF markers */ | ||
572 | cbuf[0] = GSM1_SOF; | ||
573 | cbuf[len + 1] = GSM1_SOF; | ||
574 | /* FIXME: we can omit the lead one in many cases */ | ||
575 | len += 2; | ||
576 | break; | ||
577 | default: | ||
578 | WARN_ON(1); | ||
579 | return; | ||
580 | } | ||
581 | gsm->output(gsm, cbuf, len); | ||
582 | gsm_print_packet("-->", addr, cr, control, NULL, 0); | ||
583 | } | ||
584 | |||
585 | /** | ||
586 | * gsm_response - send a control response | ||
587 | * @gsm: our GSM mux | ||
588 | * @addr: address for control frame | ||
589 | * @control: control byte including PF bit | ||
590 | * | ||
591 | * Format up and transmit a link level response frame. | ||
592 | */ | ||
593 | |||
594 | static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) | ||
595 | { | ||
596 | gsm_send(gsm, addr, 0, control); | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * gsm_command - send a control command | ||
601 | * @gsm: our GSM mux | ||
602 | * @addr: address for control frame | ||
603 | * @control: control byte including PF bit | ||
604 | * | ||
605 | * Format up and transmit a link level command frame. | ||
606 | */ | ||
607 | |||
608 | static inline void gsm_command(struct gsm_mux *gsm, int addr, int control) | ||
609 | { | ||
610 | gsm_send(gsm, addr, 1, control); | ||
611 | } | ||
612 | |||
613 | /* Data transmission */ | ||
614 | |||
615 | #define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */ | ||
616 | |||
617 | /** | ||
618 | * gsm_data_alloc - allocate data frame | ||
619 | * @gsm: GSM mux | ||
620 | * @addr: DLCI address | ||
621 | * @len: length excluding header and FCS | ||
622 | * @ctrl: control byte | ||
623 | * | ||
624 | * Allocate a new data buffer for sending frames with data. Space is left | ||
625 | * at the front for header bytes but that is treated as an implementation | ||
626 | * detail and not for the high level code to use | ||
627 | */ | ||
628 | |||
629 | static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, | ||
630 | u8 ctrl) | ||
631 | { | ||
632 | struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN, | ||
633 | GFP_ATOMIC); | ||
634 | if (m == NULL) | ||
635 | return NULL; | ||
636 | m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */ | ||
637 | m->len = len; | ||
638 | m->addr = addr; | ||
639 | m->ctrl = ctrl; | ||
640 | m->next = NULL; | ||
641 | return m; | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * gsm_data_kick - poke the queue | ||
646 | * @gsm: GSM Mux | ||
647 | * | ||
648 | * The tty device has called us to indicate that room has appeared in | ||
649 | * the transmit queue. Ram more data into the pipe if we have any | ||
650 | * | ||
651 | * FIXME: lock against link layer control transmissions | ||
652 | */ | ||
653 | |||
654 | static void gsm_data_kick(struct gsm_mux *gsm) | ||
655 | { | ||
656 | struct gsm_msg *msg = gsm->tx_head; | ||
657 | int len; | ||
658 | int skip_sof = 0; | ||
659 | |||
660 | /* FIXME: We need to apply this solely to data messages */ | ||
661 | if (gsm->constipated) | ||
662 | return; | ||
663 | |||
664 | while (gsm->tx_head != NULL) { | ||
665 | msg = gsm->tx_head; | ||
666 | if (gsm->encoding != 0) { | ||
667 | gsm->txframe[0] = GSM1_SOF; | ||
668 | len = gsm_stuff_frame(msg->data, | ||
669 | gsm->txframe + 1, msg->len); | ||
670 | gsm->txframe[len + 1] = GSM1_SOF; | ||
671 | len += 2; | ||
672 | } else { | ||
673 | gsm->txframe[0] = GSM0_SOF; | ||
674 | memcpy(gsm->txframe + 1 , msg->data, msg->len); | ||
675 | gsm->txframe[msg->len + 1] = GSM0_SOF; | ||
676 | len = msg->len + 2; | ||
677 | } | ||
678 | |||
679 | if (debug & 4) { | ||
680 | printk("gsm_data_kick: \n"); | ||
681 | hex_packet(gsm->txframe, len); | ||
682 | } | ||
683 | |||
684 | if (gsm->output(gsm, gsm->txframe + skip_sof, | ||
685 | len - skip_sof) < 0) | ||
686 | break; | ||
687 | /* FIXME: Can eliminate one SOF in many more cases */ | ||
688 | gsm->tx_head = msg->next; | ||
689 | if (gsm->tx_head == NULL) | ||
690 | gsm->tx_tail = NULL; | ||
691 | gsm->tx_bytes -= msg->len; | ||
692 | kfree(msg); | ||
693 | /* For a burst of frames skip the extra SOF within the | ||
694 | burst */ | ||
695 | skip_sof = 1; | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /** | ||
700 | * __gsm_data_queue - queue a UI or UIH frame | ||
701 | * @dlci: DLCI sending the data | ||
702 | * @msg: message queued | ||
703 | * | ||
704 | * Add data to the transmit queue and try and get stuff moving | ||
705 | * out of the mux tty if not already doing so. The Caller must hold | ||
706 | * the gsm tx lock. | ||
707 | */ | ||
708 | |||
709 | static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) | ||
710 | { | ||
711 | struct gsm_mux *gsm = dlci->gsm; | ||
712 | u8 *dp = msg->data; | ||
713 | u8 *fcs = dp + msg->len; | ||
714 | |||
715 | /* Fill in the header */ | ||
716 | if (gsm->encoding == 0) { | ||
717 | if (msg->len < 128) | ||
718 | *--dp = (msg->len << 1) | EA; | ||
719 | else { | ||
720 | *--dp = (msg->len >> 6) | EA; | ||
721 | *--dp = (msg->len & 127) << 1; | ||
722 | } | ||
723 | } | ||
724 | |||
725 | *--dp = msg->ctrl; | ||
726 | if (gsm->initiator) | ||
727 | *--dp = (msg->addr << 2) | 2 | EA; | ||
728 | else | ||
729 | *--dp = (msg->addr << 2) | EA; | ||
730 | *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp); | ||
731 | /* Ugly protocol layering violation */ | ||
732 | if (msg->ctrl == UI || msg->ctrl == (UI|PF)) | ||
733 | *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len); | ||
734 | *fcs = 0xFF - *fcs; | ||
735 | |||
736 | gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl, | ||
737 | msg->data, msg->len); | ||
738 | |||
739 | /* Move the header back and adjust the length, also allow for the FCS | ||
740 | now tacked on the end */ | ||
741 | msg->len += (msg->data - dp) + 1; | ||
742 | msg->data = dp; | ||
743 | |||
744 | /* Add to the actual output queue */ | ||
745 | if (gsm->tx_tail) | ||
746 | gsm->tx_tail->next = msg; | ||
747 | else | ||
748 | gsm->tx_head = msg; | ||
749 | gsm->tx_tail = msg; | ||
750 | gsm->tx_bytes += msg->len; | ||
751 | gsm_data_kick(gsm); | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * gsm_data_queue - queue a UI or UIH frame | ||
756 | * @dlci: DLCI sending the data | ||
757 | * @msg: message queued | ||
758 | * | ||
759 | * Add data to the transmit queue and try and get stuff moving | ||
760 | * out of the mux tty if not already doing so. Take the | ||
761 | * the gsm tx lock and dlci lock. | ||
762 | */ | ||
763 | |||
764 | static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) | ||
765 | { | ||
766 | unsigned long flags; | ||
767 | spin_lock_irqsave(&dlci->gsm->tx_lock, flags); | ||
768 | __gsm_data_queue(dlci, msg); | ||
769 | spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); | ||
770 | } | ||
771 | |||
772 | /** | ||
773 | * gsm_dlci_data_output - try and push data out of a DLCI | ||
774 | * @gsm: mux | ||
775 | * @dlci: the DLCI to pull data from | ||
776 | * | ||
777 | * Pull data from a DLCI and send it into the transmit queue if there | ||
778 | * is data. Keep to the MRU of the mux. This path handles the usual tty | ||
779 | * interface which is a byte stream with optional modem data. | ||
780 | * | ||
781 | * Caller must hold the tx_lock of the mux. | ||
782 | */ | ||
783 | |||
784 | static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) | ||
785 | { | ||
786 | struct gsm_msg *msg; | ||
787 | u8 *dp; | ||
788 | int len, size; | ||
789 | int h = dlci->adaption - 1; | ||
790 | |||
791 | len = kfifo_len(dlci->fifo); | ||
792 | if (len == 0) | ||
793 | return 0; | ||
794 | |||
795 | /* MTU/MRU count only the data bits */ | ||
796 | if (len > gsm->mtu) | ||
797 | len = gsm->mtu; | ||
798 | |||
799 | size = len + h; | ||
800 | |||
801 | msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); | ||
802 | /* FIXME: need a timer or something to kick this so it can't | ||
803 | get stuck with no work outstanding and no buffer free */ | ||
804 | if (msg == NULL) | ||
805 | return -ENOMEM; | ||
806 | dp = msg->data; | ||
807 | switch (dlci->adaption) { | ||
808 | case 1: /* Unstructured */ | ||
809 | break; | ||
810 | case 2: /* Unstructed with modem bits. Always one byte as we never | ||
811 | send inline break data */ | ||
812 | *dp += gsm_encode_modem(dlci); | ||
813 | len--; | ||
814 | break; | ||
815 | } | ||
816 | WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len); | ||
817 | __gsm_data_queue(dlci, msg); | ||
818 | /* Bytes of data we used up */ | ||
819 | return size; | ||
820 | } | ||
821 | |||
822 | /** | ||
823 | * gsm_dlci_data_output_framed - try and push data out of a DLCI | ||
824 | * @gsm: mux | ||
825 | * @dlci: the DLCI to pull data from | ||
826 | * | ||
827 | * Pull data from a DLCI and send it into the transmit queue if there | ||
828 | * is data. Keep to the MRU of the mux. This path handles framed data | ||
829 | * queued as skbuffs to the DLCI. | ||
830 | * | ||
831 | * Caller must hold the tx_lock of the mux. | ||
832 | */ | ||
833 | |||
834 | static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, | ||
835 | struct gsm_dlci *dlci) | ||
836 | { | ||
837 | struct gsm_msg *msg; | ||
838 | u8 *dp; | ||
839 | int len, size; | ||
840 | int last = 0, first = 0; | ||
841 | int overhead = 0; | ||
842 | |||
843 | /* One byte per frame is used for B/F flags */ | ||
844 | if (dlci->adaption == 4) | ||
845 | overhead = 1; | ||
846 | |||
847 | /* dlci->skb is locked by tx_lock */ | ||
848 | if (dlci->skb == NULL) { | ||
849 | dlci->skb = skb_dequeue(&dlci->skb_list); | ||
850 | if (dlci->skb == NULL) | ||
851 | return 0; | ||
852 | first = 1; | ||
853 | } | ||
854 | len = dlci->skb->len + overhead; | ||
855 | |||
856 | /* MTU/MRU count only the data bits */ | ||
857 | if (len > gsm->mtu) { | ||
858 | if (dlci->adaption == 3) { | ||
859 | /* Over long frame, bin it */ | ||
860 | kfree_skb(dlci->skb); | ||
861 | dlci->skb = NULL; | ||
862 | return 0; | ||
863 | } | ||
864 | len = gsm->mtu; | ||
865 | } else | ||
866 | last = 1; | ||
867 | |||
868 | size = len + overhead; | ||
869 | msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); | ||
870 | |||
871 | /* FIXME: need a timer or something to kick this so it can't | ||
872 | get stuck with no work outstanding and no buffer free */ | ||
873 | if (msg == NULL) | ||
874 | return -ENOMEM; | ||
875 | dp = msg->data; | ||
876 | |||
877 | if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */ | ||
878 | /* Flag byte to carry the start/end info */ | ||
879 | *dp++ = last << 7 | first << 6 | 1; /* EA */ | ||
880 | len--; | ||
881 | } | ||
882 | memcpy(dp, skb_pull(dlci->skb, len), len); | ||
883 | __gsm_data_queue(dlci, msg); | ||
884 | if (last) | ||
885 | dlci->skb = NULL; | ||
886 | return size; | ||
887 | } | ||
888 | |||
889 | /** | ||
890 | * gsm_dlci_data_sweep - look for data to send | ||
891 | * @gsm: the GSM mux | ||
892 | * | ||
893 | * Sweep the GSM mux channels in priority order looking for ones with | ||
894 | * data to send. We could do with optimising this scan a bit. We aim | ||
895 | * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit | ||
896 | * TX_THRESH_LO we get called again | ||
897 | * | ||
898 | * FIXME: We should round robin between groups and in theory you can | ||
899 | * renegotiate DLCI priorities with optional stuff. Needs optimising. | ||
900 | */ | ||
901 | |||
902 | static void gsm_dlci_data_sweep(struct gsm_mux *gsm) | ||
903 | { | ||
904 | int len; | ||
905 | /* Priority ordering: We should do priority with RR of the groups */ | ||
906 | int i = 1; | ||
907 | unsigned long flags; | ||
908 | |||
909 | spin_lock_irqsave(&gsm->tx_lock, flags); | ||
910 | while (i < NUM_DLCI) { | ||
911 | struct gsm_dlci *dlci; | ||
912 | |||
913 | if (gsm->tx_bytes > TX_THRESH_HI) | ||
914 | break; | ||
915 | dlci = gsm->dlci[i]; | ||
916 | if (dlci == NULL || dlci->constipated) { | ||
917 | i++; | ||
918 | continue; | ||
919 | } | ||
920 | if (dlci->adaption < 3) | ||
921 | len = gsm_dlci_data_output(gsm, dlci); | ||
922 | else | ||
923 | len = gsm_dlci_data_output_framed(gsm, dlci); | ||
924 | if (len < 0) | ||
925 | return; | ||
926 | /* DLCI empty - try the next */ | ||
927 | if (len == 0) | ||
928 | i++; | ||
929 | } | ||
930 | spin_unlock_irqrestore(&gsm->tx_lock, flags); | ||
931 | } | ||
932 | |||
933 | /** | ||
934 | * gsm_dlci_data_kick - transmit if possible | ||
935 | * @dlci: DLCI to kick | ||
936 | * | ||
937 | * Transmit data from this DLCI if the queue is empty. We can't rely on | ||
938 | * a tty wakeup except when we filled the pipe so we need to fire off | ||
939 | * new data ourselves in other cases. | ||
940 | */ | ||
941 | |||
942 | static void gsm_dlci_data_kick(struct gsm_dlci *dlci) | ||
943 | { | ||
944 | unsigned long flags; | ||
945 | |||
946 | spin_lock_irqsave(&dlci->gsm->tx_lock, flags); | ||
947 | /* If we have nothing running then we need to fire up */ | ||
948 | if (dlci->gsm->tx_bytes == 0) | ||
949 | gsm_dlci_data_output(dlci->gsm, dlci); | ||
950 | else if (dlci->gsm->tx_bytes < TX_THRESH_LO) | ||
951 | gsm_dlci_data_sweep(dlci->gsm); | ||
952 | spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Control message processing | ||
957 | */ | ||
958 | |||
959 | |||
960 | /** | ||
961 | * gsm_control_reply - send a response frame to a control | ||
962 | * @gsm: gsm channel | ||
963 | * @cmd: the command to use | ||
964 | * @data: data to follow encoded info | ||
965 | * @dlen: length of data | ||
966 | * | ||
967 | * Encode up and queue a UI/UIH frame containing our response. | ||
968 | */ | ||
969 | |||
970 | static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data, | ||
971 | int dlen) | ||
972 | { | ||
973 | struct gsm_msg *msg; | ||
974 | msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); | ||
975 | msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ | ||
976 | msg->data[1] = (dlen << 1) | EA; | ||
977 | memcpy(msg->data + 2, data, dlen); | ||
978 | gsm_data_queue(gsm->dlci[0], msg); | ||
979 | } | ||
980 | |||
981 | /** | ||
982 | * gsm_process_modem - process received modem status | ||
983 | * @tty: virtual tty bound to the DLCI | ||
984 | * @dlci: DLCI to affect | ||
985 | * @modem: modem bits (full EA) | ||
986 | * | ||
987 | * Used when a modem control message or line state inline in adaption | ||
988 | * layer 2 is processed. Sort out the local modem state and throttles | ||
989 | */ | ||
990 | |||
991 | static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, | ||
992 | u32 modem) | ||
993 | { | ||
994 | int mlines = 0; | ||
995 | u8 brk = modem >> 6; | ||
996 | |||
997 | /* Flow control/ready to communicate */ | ||
998 | if (modem & MDM_FC) { | ||
999 | /* Need to throttle our output on this device */ | ||
1000 | dlci->constipated = 1; | ||
1001 | } | ||
1002 | if (modem & MDM_RTC) { | ||
1003 | mlines |= TIOCM_DSR | TIOCM_DTR; | ||
1004 | dlci->constipated = 0; | ||
1005 | gsm_dlci_data_kick(dlci); | ||
1006 | } | ||
1007 | /* Map modem bits */ | ||
1008 | if (modem & MDM_RTR) | ||
1009 | mlines |= TIOCM_RTS | TIOCM_CTS; | ||
1010 | if (modem & MDM_IC) | ||
1011 | mlines |= TIOCM_RI; | ||
1012 | if (modem & MDM_DV) | ||
1013 | mlines |= TIOCM_CD; | ||
1014 | |||
1015 | /* Carrier drop -> hangup */ | ||
1016 | if (tty) { | ||
1017 | if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD)) | ||
1018 | if (!(tty->termios->c_cflag & CLOCAL)) | ||
1019 | tty_hangup(tty); | ||
1020 | if (brk & 0x01) | ||
1021 | tty_insert_flip_char(tty, 0, TTY_BREAK); | ||
1022 | } | ||
1023 | dlci->modem_rx = mlines; | ||
1024 | } | ||
1025 | |||
1026 | /** | ||
1027 | * gsm_control_modem - modem status received | ||
1028 | * @gsm: GSM channel | ||
1029 | * @data: data following command | ||
1030 | * @clen: command length | ||
1031 | * | ||
1032 | * We have received a modem status control message. This is used by | ||
1033 | * the GSM mux protocol to pass virtual modem line status and optionally | ||
1034 | * to indicate break signals. Unpack it, convert to Linux representation | ||
1035 | * and if need be stuff a break message down the tty. | ||
1036 | */ | ||
1037 | |||
1038 | static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) | ||
1039 | { | ||
1040 | unsigned int addr = 0; | ||
1041 | unsigned int modem = 0; | ||
1042 | struct gsm_dlci *dlci; | ||
1043 | int len = clen; | ||
1044 | u8 *dp = data; | ||
1045 | struct tty_struct *tty; | ||
1046 | |||
1047 | while (gsm_read_ea(&addr, *dp++) == 0) { | ||
1048 | len--; | ||
1049 | if (len == 0) | ||
1050 | return; | ||
1051 | } | ||
1052 | /* Must be at least one byte following the EA */ | ||
1053 | len--; | ||
1054 | if (len <= 0) | ||
1055 | return; | ||
1056 | |||
1057 | addr >>= 1; | ||
1058 | /* Closed port, or invalid ? */ | ||
1059 | if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) | ||
1060 | return; | ||
1061 | dlci = gsm->dlci[addr]; | ||
1062 | |||
1063 | while (gsm_read_ea(&modem, *dp++) == 0) { | ||
1064 | len--; | ||
1065 | if (len == 0) | ||
1066 | return; | ||
1067 | } | ||
1068 | tty = tty_port_tty_get(&dlci->port); | ||
1069 | gsm_process_modem(tty, dlci, modem); | ||
1070 | if (tty) { | ||
1071 | tty_wakeup(tty); | ||
1072 | tty_kref_put(tty); | ||
1073 | } | ||
1074 | gsm_control_reply(gsm, CMD_MSC, data, clen); | ||
1075 | } | ||
1076 | |||
1077 | /** | ||
1078 | * gsm_control_rls - remote line status | ||
1079 | * @gsm: GSM channel | ||
1080 | * @data: data bytes | ||
1081 | * @clen: data length | ||
1082 | * | ||
1083 | * The modem sends us a two byte message on the control channel whenever | ||
1084 | * it wishes to send us an error state from the virtual link. Stuff | ||
1085 | * this into the uplink tty if present | ||
1086 | */ | ||
1087 | |||
1088 | static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen) | ||
1089 | { | ||
1090 | struct tty_struct *tty; | ||
1091 | unsigned int addr = 0 ; | ||
1092 | u8 bits; | ||
1093 | int len = clen; | ||
1094 | u8 *dp = data; | ||
1095 | |||
1096 | while (gsm_read_ea(&addr, *dp++) == 0) { | ||
1097 | len--; | ||
1098 | if (len == 0) | ||
1099 | return; | ||
1100 | } | ||
1101 | /* Must be at least one byte following ea */ | ||
1102 | len--; | ||
1103 | if (len <= 0) | ||
1104 | return; | ||
1105 | addr >>= 1; | ||
1106 | /* Closed port, or invalid ? */ | ||
1107 | if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) | ||
1108 | return; | ||
1109 | /* No error ? */ | ||
1110 | bits = *dp; | ||
1111 | if ((bits & 1) == 0) | ||
1112 | return; | ||
1113 | /* See if we have an uplink tty */ | ||
1114 | tty = tty_port_tty_get(&gsm->dlci[addr]->port); | ||
1115 | |||
1116 | if (tty) { | ||
1117 | if (bits & 2) | ||
1118 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | ||
1119 | if (bits & 4) | ||
1120 | tty_insert_flip_char(tty, 0, TTY_PARITY); | ||
1121 | if (bits & 8) | ||
1122 | tty_insert_flip_char(tty, 0, TTY_FRAME); | ||
1123 | tty_flip_buffer_push(tty); | ||
1124 | tty_kref_put(tty); | ||
1125 | } | ||
1126 | gsm_control_reply(gsm, CMD_RLS, data, clen); | ||
1127 | } | ||
1128 | |||
1129 | static void gsm_dlci_begin_close(struct gsm_dlci *dlci); | ||
1130 | |||
1131 | /** | ||
1132 | * gsm_control_message - DLCI 0 control processing | ||
1133 | * @gsm: our GSM mux | ||
1134 | * @command: the command EA | ||
1135 | * @data: data beyond the command/length EAs | ||
1136 | * @clen: length | ||
1137 | * | ||
1138 | * Input processor for control messages from the other end of the link. | ||
1139 | * Processes the incoming request and queues a response frame or an | ||
1140 | * NSC response if not supported | ||
1141 | */ | ||
1142 | |||
1143 | static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, | ||
1144 | u8 *data, int clen) | ||
1145 | { | ||
1146 | u8 buf[1]; | ||
1147 | switch (command) { | ||
1148 | case CMD_CLD: { | ||
1149 | struct gsm_dlci *dlci = gsm->dlci[0]; | ||
1150 | /* Modem wishes to close down */ | ||
1151 | if (dlci) { | ||
1152 | dlci->dead = 1; | ||
1153 | gsm->dead = 1; | ||
1154 | gsm_dlci_begin_close(dlci); | ||
1155 | } | ||
1156 | } | ||
1157 | break; | ||
1158 | case CMD_TEST: | ||
1159 | /* Modem wishes to test, reply with the data */ | ||
1160 | gsm_control_reply(gsm, CMD_TEST, data, clen); | ||
1161 | break; | ||
1162 | case CMD_FCON: | ||
1163 | /* Modem wants us to STFU */ | ||
1164 | gsm->constipated = 1; | ||
1165 | gsm_control_reply(gsm, CMD_FCON, NULL, 0); | ||
1166 | break; | ||
1167 | case CMD_FCOFF: | ||
1168 | /* Modem can accept data again */ | ||
1169 | gsm->constipated = 0; | ||
1170 | gsm_control_reply(gsm, CMD_FCOFF, NULL, 0); | ||
1171 | /* Kick the link in case it is idling */ | ||
1172 | gsm_data_kick(gsm); | ||
1173 | break; | ||
1174 | case CMD_MSC: | ||
1175 | /* Out of band modem line change indicator for a DLCI */ | ||
1176 | gsm_control_modem(gsm, data, clen); | ||
1177 | break; | ||
1178 | case CMD_RLS: | ||
1179 | /* Out of band error reception for a DLCI */ | ||
1180 | gsm_control_rls(gsm, data, clen); | ||
1181 | break; | ||
1182 | case CMD_PSC: | ||
1183 | /* Modem wishes to enter power saving state */ | ||
1184 | gsm_control_reply(gsm, CMD_PSC, NULL, 0); | ||
1185 | break; | ||
1186 | /* Optional unsupported commands */ | ||
1187 | case CMD_PN: /* Parameter negotiation */ | ||
1188 | case CMD_RPN: /* Remote port negotation */ | ||
1189 | case CMD_SNC: /* Service negotation command */ | ||
1190 | default: | ||
1191 | /* Reply to bad commands with an NSC */ | ||
1192 | buf[0] = command; | ||
1193 | gsm_control_reply(gsm, CMD_NSC, buf, 1); | ||
1194 | break; | ||
1195 | } | ||
1196 | } | ||
1197 | |||
1198 | /** | ||
1199 | * gsm_control_response - process a response to our control | ||
1200 | * @gsm: our GSM mux | ||
1201 | * @command: the command (response) EA | ||
1202 | * @data: data beyond the command/length EA | ||
1203 | * @clen: length | ||
1204 | * | ||
1205 | * Process a response to an outstanding command. We only allow a single | ||
1206 | * control message in flight so this is fairly easy. All the clean up | ||
1207 | * is done by the caller, we just update the fields, flag it as done | ||
1208 | * and return | ||
1209 | */ | ||
1210 | |||
1211 | static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, | ||
1212 | u8 *data, int clen) | ||
1213 | { | ||
1214 | struct gsm_control *ctrl; | ||
1215 | unsigned long flags; | ||
1216 | |||
1217 | spin_lock_irqsave(&gsm->control_lock, flags); | ||
1218 | |||
1219 | ctrl = gsm->pending_cmd; | ||
1220 | /* Does the reply match our command */ | ||
1221 | command |= 1; | ||
1222 | if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { | ||
1223 | /* Our command was replied to, kill the retry timer */ | ||
1224 | del_timer(&gsm->t2_timer); | ||
1225 | gsm->pending_cmd = NULL; | ||
1226 | /* Rejected by the other end */ | ||
1227 | if (command == CMD_NSC) | ||
1228 | ctrl->error = -EOPNOTSUPP; | ||
1229 | ctrl->done = 1; | ||
1230 | wake_up(&gsm->event); | ||
1231 | } | ||
1232 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * gsm_control_transmit - send control packet | ||
1237 | * @gsm: gsm mux | ||
1238 | * @ctrl: frame to send | ||
1239 | * | ||
1240 | * Send out a pending control command (called under control lock) | ||
1241 | */ | ||
1242 | |||
1243 | static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) | ||
1244 | { | ||
1245 | struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, | ||
1246 | gsm->ftype|PF); | ||
1247 | if (msg == NULL) | ||
1248 | return; | ||
1249 | msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */ | ||
1250 | memcpy(msg->data + 1, ctrl->data, ctrl->len); | ||
1251 | gsm_data_queue(gsm->dlci[0], msg); | ||
1252 | } | ||
1253 | |||
1254 | /** | ||
1255 | * gsm_control_retransmit - retransmit a control frame | ||
1256 | * @data: pointer to our gsm object | ||
1257 | * | ||
1258 | * Called off the T2 timer expiry in order to retransmit control frames | ||
1259 | * that have been lost in the system somewhere. The control_lock protects | ||
1260 | * us from colliding with another sender or a receive completion event. | ||
1261 | * In that situation the timer may still occur in a small window but | ||
1262 | * gsm->pending_cmd will be NULL and we just let the timer expire. | ||
1263 | */ | ||
1264 | |||
1265 | static void gsm_control_retransmit(unsigned long data) | ||
1266 | { | ||
1267 | struct gsm_mux *gsm = (struct gsm_mux *)data; | ||
1268 | struct gsm_control *ctrl; | ||
1269 | unsigned long flags; | ||
1270 | spin_lock_irqsave(&gsm->control_lock, flags); | ||
1271 | ctrl = gsm->pending_cmd; | ||
1272 | if (ctrl) { | ||
1273 | gsm->cretries--; | ||
1274 | if (gsm->cretries == 0) { | ||
1275 | gsm->pending_cmd = NULL; | ||
1276 | ctrl->error = -ETIMEDOUT; | ||
1277 | ctrl->done = 1; | ||
1278 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1279 | wake_up(&gsm->event); | ||
1280 | return; | ||
1281 | } | ||
1282 | gsm_control_transmit(gsm, ctrl); | ||
1283 | mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); | ||
1284 | } | ||
1285 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1286 | } | ||
1287 | |||
1288 | /** | ||
1289 | * gsm_control_send - send a control frame on DLCI 0 | ||
1290 | * @gsm: the GSM channel | ||
1291 | * @command: command to send including CR bit | ||
1292 | * @data: bytes of data (must be kmalloced) | ||
1293 | * @len: length of the block to send | ||
1294 | * | ||
1295 | * Queue and dispatch a control command. Only one command can be | ||
1296 | * active at a time. In theory more can be outstanding but the matching | ||
1297 | * gets really complicated so for now stick to one outstanding. | ||
1298 | */ | ||
1299 | |||
1300 | static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, | ||
1301 | unsigned int command, u8 *data, int clen) | ||
1302 | { | ||
1303 | struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), | ||
1304 | GFP_KERNEL); | ||
1305 | unsigned long flags; | ||
1306 | if (ctrl == NULL) | ||
1307 | return NULL; | ||
1308 | retry: | ||
1309 | wait_event(gsm->event, gsm->pending_cmd == NULL); | ||
1310 | spin_lock_irqsave(&gsm->control_lock, flags); | ||
1311 | if (gsm->pending_cmd != NULL) { | ||
1312 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1313 | goto retry; | ||
1314 | } | ||
1315 | ctrl->cmd = command; | ||
1316 | ctrl->data = data; | ||
1317 | ctrl->len = clen; | ||
1318 | gsm->pending_cmd = ctrl; | ||
1319 | gsm->cretries = gsm->n2; | ||
1320 | mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); | ||
1321 | gsm_control_transmit(gsm, ctrl); | ||
1322 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1323 | return ctrl; | ||
1324 | } | ||
1325 | |||
1326 | /** | ||
1327 | * gsm_control_wait - wait for a control to finish | ||
1328 | * @gsm: GSM mux | ||
1329 | * @control: control we are waiting on | ||
1330 | * | ||
1331 | * Waits for the control to complete or time out. Frees any used | ||
1332 | * resources and returns 0 for success, or an error if the remote | ||
1333 | * rejected or ignored the request. | ||
1334 | */ | ||
1335 | |||
1336 | static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control) | ||
1337 | { | ||
1338 | int err; | ||
1339 | wait_event(gsm->event, control->done == 1); | ||
1340 | err = control->error; | ||
1341 | kfree(control); | ||
1342 | return err; | ||
1343 | } | ||
1344 | |||
1345 | |||
1346 | /* | ||
1347 | * DLCI level handling: Needs krefs | ||
1348 | */ | ||
1349 | |||
1350 | /* | ||
1351 | * State transitions and timers | ||
1352 | */ | ||
1353 | |||
1354 | /** | ||
1355 | * gsm_dlci_close - a DLCI has closed | ||
1356 | * @dlci: DLCI that closed | ||
1357 | * | ||
1358 | * Perform processing when moving a DLCI into closed state. If there | ||
1359 | * is an attached tty this is hung up | ||
1360 | */ | ||
1361 | |||
1362 | static void gsm_dlci_close(struct gsm_dlci *dlci) | ||
1363 | { | ||
1364 | del_timer(&dlci->t1); | ||
1365 | if (debug & 8) | ||
1366 | printk("DLCI %d goes closed.\n", dlci->addr); | ||
1367 | dlci->state = DLCI_CLOSED; | ||
1368 | if (dlci->addr != 0) { | ||
1369 | struct tty_struct *tty = tty_port_tty_get(&dlci->port); | ||
1370 | if (tty) { | ||
1371 | tty_hangup(tty); | ||
1372 | tty_kref_put(tty); | ||
1373 | } | ||
1374 | kfifo_reset(dlci->fifo); | ||
1375 | } else | ||
1376 | dlci->gsm->dead = 1; | ||
1377 | wake_up(&dlci->gsm->event); | ||
1378 | /* A DLCI 0 close is a MUX termination so we need to kick that | ||
1379 | back to userspace somehow */ | ||
1380 | } | ||
1381 | |||
1382 | /** | ||
1383 | * gsm_dlci_open - a DLCI has opened | ||
1384 | * @dlci: DLCI that opened | ||
1385 | * | ||
1386 | * Perform processing when moving a DLCI into open state. | ||
1387 | */ | ||
1388 | |||
1389 | static void gsm_dlci_open(struct gsm_dlci *dlci) | ||
1390 | { | ||
1391 | /* Note that SABM UA .. SABM UA first UA lost can mean that we go | ||
1392 | open -> open */ | ||
1393 | del_timer(&dlci->t1); | ||
1394 | /* This will let a tty open continue */ | ||
1395 | dlci->state = DLCI_OPEN; | ||
1396 | if (debug & 8) | ||
1397 | printk("DLCI %d goes open.\n", dlci->addr); | ||
1398 | wake_up(&dlci->gsm->event); | ||
1399 | } | ||
1400 | |||
1401 | /** | ||
1402 | * gsm_dlci_t1 - T1 timer expiry | ||
1403 | * @dlci: DLCI that opened | ||
1404 | * | ||
1405 | * The T1 timer handles retransmits of control frames (essentially of | ||
1406 | * SABM and DISC). We resend the command until the retry count runs out | ||
1407 | * in which case an opening port goes back to closed and a closing port | ||
1408 | * is simply put into closed state (any further frames from the other | ||
1409 | * end will get a DM response) | ||
1410 | */ | ||
1411 | |||
1412 | static void gsm_dlci_t1(unsigned long data) | ||
1413 | { | ||
1414 | struct gsm_dlci *dlci = (struct gsm_dlci *)data; | ||
1415 | struct gsm_mux *gsm = dlci->gsm; | ||
1416 | |||
1417 | switch (dlci->state) { | ||
1418 | case DLCI_OPENING: | ||
1419 | dlci->retries--; | ||
1420 | if (dlci->retries) { | ||
1421 | gsm_command(dlci->gsm, dlci->addr, SABM|PF); | ||
1422 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1423 | } else | ||
1424 | gsm_dlci_close(dlci); | ||
1425 | break; | ||
1426 | case DLCI_CLOSING: | ||
1427 | dlci->retries--; | ||
1428 | if (dlci->retries) { | ||
1429 | gsm_command(dlci->gsm, dlci->addr, DISC|PF); | ||
1430 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1431 | } else | ||
1432 | gsm_dlci_close(dlci); | ||
1433 | break; | ||
1434 | } | ||
1435 | } | ||
1436 | |||
1437 | /** | ||
1438 | * gsm_dlci_begin_open - start channel open procedure | ||
1439 | * @dlci: DLCI to open | ||
1440 | * | ||
1441 | * Commence opening a DLCI from the Linux side. We issue SABM messages | ||
1442 | * to the modem which should then reply with a UA, at which point we | ||
1443 | * will move into open state. Opening is done asynchronously with retry | ||
1444 | * running off timers and the responses. | ||
1445 | */ | ||
1446 | |||
1447 | static void gsm_dlci_begin_open(struct gsm_dlci *dlci) | ||
1448 | { | ||
1449 | struct gsm_mux *gsm = dlci->gsm; | ||
1450 | if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING) | ||
1451 | return; | ||
1452 | dlci->retries = gsm->n2; | ||
1453 | dlci->state = DLCI_OPENING; | ||
1454 | gsm_command(dlci->gsm, dlci->addr, SABM|PF); | ||
1455 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1456 | } | ||
1457 | |||
1458 | /** | ||
1459 | * gsm_dlci_begin_close - start channel open procedure | ||
1460 | * @dlci: DLCI to open | ||
1461 | * | ||
1462 | * Commence closing a DLCI from the Linux side. We issue DISC messages | ||
1463 | * to the modem which should then reply with a UA, at which point we | ||
1464 | * will move into closed state. Closing is done asynchronously with retry | ||
1465 | * off timers. We may also receive a DM reply from the other end which | ||
1466 | * indicates the channel was already closed. | ||
1467 | */ | ||
1468 | |||
1469 | static void gsm_dlci_begin_close(struct gsm_dlci *dlci) | ||
1470 | { | ||
1471 | struct gsm_mux *gsm = dlci->gsm; | ||
1472 | if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING) | ||
1473 | return; | ||
1474 | dlci->retries = gsm->n2; | ||
1475 | dlci->state = DLCI_CLOSING; | ||
1476 | gsm_command(dlci->gsm, dlci->addr, DISC|PF); | ||
1477 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1478 | } | ||
1479 | |||
1480 | /** | ||
1481 | * gsm_dlci_data - data arrived | ||
1482 | * @dlci: channel | ||
1483 | * @data: block of bytes received | ||
1484 | * @len: length of received block | ||
1485 | * | ||
1486 | * A UI or UIH frame has arrived which contains data for a channel | ||
1487 | * other than the control channel. If the relevant virtual tty is | ||
1488 | * open we shovel the bits down it, if not we drop them. | ||
1489 | */ | ||
1490 | |||
1491 | static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len) | ||
1492 | { | ||
1493 | /* krefs .. */ | ||
1494 | struct tty_port *port = &dlci->port; | ||
1495 | struct tty_struct *tty = tty_port_tty_get(port); | ||
1496 | unsigned int modem = 0; | ||
1497 | |||
1498 | if (debug & 16) | ||
1499 | printk("%d bytes for tty %p\n", len, tty); | ||
1500 | if (tty) { | ||
1501 | switch (dlci->adaption) { | ||
1502 | /* Unsupported types */ | ||
1503 | /* Packetised interruptible data */ | ||
1504 | case 4: | ||
1505 | break; | ||
1506 | /* Packetised uininterruptible voice/data */ | ||
1507 | case 3: | ||
1508 | break; | ||
1509 | /* Asynchronous serial with line state in each frame */ | ||
1510 | case 2: | ||
1511 | while (gsm_read_ea(&modem, *data++) == 0) { | ||
1512 | len--; | ||
1513 | if (len == 0) | ||
1514 | return; | ||
1515 | } | ||
1516 | gsm_process_modem(tty, dlci, modem); | ||
1517 | /* Line state will go via DLCI 0 controls only */ | ||
1518 | case 1: | ||
1519 | default: | ||
1520 | tty_insert_flip_string(tty, data, len); | ||
1521 | tty_flip_buffer_push(tty); | ||
1522 | } | ||
1523 | tty_kref_put(tty); | ||
1524 | } | ||
1525 | } | ||
1526 | |||
1527 | /** | ||
1528 | * gsm_dlci_control - data arrived on control channel | ||
1529 | * @dlci: channel | ||
1530 | * @data: block of bytes received | ||
1531 | * @len: length of received block | ||
1532 | * | ||
1533 | * A UI or UIH frame has arrived which contains data for DLCI 0 the | ||
1534 | * control channel. This should contain a command EA followed by | ||
1535 | * control data bytes. The command EA contains a command/response bit | ||
1536 | * and we divide up the work accordingly. | ||
1537 | */ | ||
1538 | |||
1539 | static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len) | ||
1540 | { | ||
1541 | /* See what command is involved */ | ||
1542 | unsigned int command = 0; | ||
1543 | while (len-- > 0) { | ||
1544 | if (gsm_read_ea(&command, *data++) == 1) { | ||
1545 | int clen = *data++; | ||
1546 | len--; | ||
1547 | /* FIXME: this is properly an EA */ | ||
1548 | clen >>= 1; | ||
1549 | /* Malformed command ? */ | ||
1550 | if (clen > len) | ||
1551 | return; | ||
1552 | if (command & 1) | ||
1553 | gsm_control_message(dlci->gsm, command, | ||
1554 | data, clen); | ||
1555 | else | ||
1556 | gsm_control_response(dlci->gsm, command, | ||
1557 | data, clen); | ||
1558 | return; | ||
1559 | } | ||
1560 | } | ||
1561 | } | ||
1562 | |||
1563 | /* | ||
1564 | * Allocate/Free DLCI channels | ||
1565 | */ | ||
1566 | |||
1567 | /** | ||
1568 | * gsm_dlci_alloc - allocate a DLCI | ||
1569 | * @gsm: GSM mux | ||
1570 | * @addr: address of the DLCI | ||
1571 | * | ||
1572 | * Allocate and install a new DLCI object into the GSM mux. | ||
1573 | * | ||
1574 | * FIXME: review locking races | ||
1575 | */ | ||
1576 | |||
1577 | static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) | ||
1578 | { | ||
1579 | struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC); | ||
1580 | if (dlci == NULL) | ||
1581 | return NULL; | ||
1582 | spin_lock_init(&dlci->lock); | ||
1583 | dlci->fifo = &dlci->_fifo; | ||
1584 | if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) { | ||
1585 | kfree(dlci); | ||
1586 | return NULL; | ||
1587 | } | ||
1588 | |||
1589 | skb_queue_head_init(&dlci->skb_list); | ||
1590 | init_timer(&dlci->t1); | ||
1591 | dlci->t1.function = gsm_dlci_t1; | ||
1592 | dlci->t1.data = (unsigned long)dlci; | ||
1593 | tty_port_init(&dlci->port); | ||
1594 | dlci->port.ops = &gsm_port_ops; | ||
1595 | dlci->gsm = gsm; | ||
1596 | dlci->addr = addr; | ||
1597 | dlci->adaption = gsm->adaption; | ||
1598 | dlci->state = DLCI_CLOSED; | ||
1599 | if (addr) | ||
1600 | dlci->data = gsm_dlci_data; | ||
1601 | else | ||
1602 | dlci->data = gsm_dlci_command; | ||
1603 | gsm->dlci[addr] = dlci; | ||
1604 | return dlci; | ||
1605 | } | ||
1606 | |||
1607 | /** | ||
1608 | * gsm_dlci_free - release DLCI | ||
1609 | * @dlci: DLCI to destroy | ||
1610 | * | ||
1611 | * Free up a DLCI. Currently to keep the lifetime rules sane we only | ||
1612 | * clean up DLCI objects when the MUX closes rather than as the port | ||
1613 | * is closed down on both the tty and mux levels. | ||
1614 | * | ||
1615 | * Can sleep. | ||
1616 | */ | ||
1617 | static void gsm_dlci_free(struct gsm_dlci *dlci) | ||
1618 | { | ||
1619 | struct tty_struct *tty = tty_port_tty_get(&dlci->port); | ||
1620 | if (tty) { | ||
1621 | tty_vhangup(tty); | ||
1622 | tty_kref_put(tty); | ||
1623 | } | ||
1624 | del_timer_sync(&dlci->t1); | ||
1625 | dlci->gsm->dlci[dlci->addr] = NULL; | ||
1626 | kfifo_free(dlci->fifo); | ||
1627 | kfree(dlci); | ||
1628 | } | ||
1629 | |||
1630 | |||
1631 | /* | ||
1632 | * LAPBish link layer logic | ||
1633 | */ | ||
1634 | |||
1635 | /** | ||
1636 | * gsm_queue - a GSM frame is ready to process | ||
1637 | * @gsm: pointer to our gsm mux | ||
1638 | * | ||
1639 | * At this point in time a frame has arrived and been demangled from | ||
1640 | * the line encoding. All the differences between the encodings have | ||
1641 | * been handled below us and the frame is unpacked into the structures. | ||
1642 | * The fcs holds the header FCS but any data FCS must be added here. | ||
1643 | */ | ||
1644 | |||
1645 | static void gsm_queue(struct gsm_mux *gsm) | ||
1646 | { | ||
1647 | struct gsm_dlci *dlci; | ||
1648 | u8 cr; | ||
1649 | int address; | ||
1650 | /* We have to sneak a look at the packet body to do the FCS. | ||
1651 | A somewhat layering violation in the spec */ | ||
1652 | |||
1653 | if ((gsm->control & ~PF) == UI) | ||
1654 | gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); | ||
1655 | if (gsm->fcs != GOOD_FCS) { | ||
1656 | gsm->bad_fcs++; | ||
1657 | if (debug & 4) | ||
1658 | printk("BAD FCS %02x\n", gsm->fcs); | ||
1659 | return; | ||
1660 | } | ||
1661 | address = gsm->address >> 1; | ||
1662 | if (address >= NUM_DLCI) | ||
1663 | goto invalid; | ||
1664 | |||
1665 | cr = gsm->address & 1; /* C/R bit */ | ||
1666 | |||
1667 | gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); | ||
1668 | |||
1669 | cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */ | ||
1670 | dlci = gsm->dlci[address]; | ||
1671 | |||
1672 | switch (gsm->control) { | ||
1673 | case SABM|PF: | ||
1674 | if (cr == 0) | ||
1675 | goto invalid; | ||
1676 | if (dlci == NULL) | ||
1677 | dlci = gsm_dlci_alloc(gsm, address); | ||
1678 | if (dlci == NULL) | ||
1679 | return; | ||
1680 | if (dlci->dead) | ||
1681 | gsm_response(gsm, address, DM); | ||
1682 | else { | ||
1683 | gsm_response(gsm, address, UA); | ||
1684 | gsm_dlci_open(dlci); | ||
1685 | } | ||
1686 | break; | ||
1687 | case DISC|PF: | ||
1688 | if (cr == 0) | ||
1689 | goto invalid; | ||
1690 | if (dlci == NULL || dlci->state == DLCI_CLOSED) { | ||
1691 | gsm_response(gsm, address, DM); | ||
1692 | return; | ||
1693 | } | ||
1694 | /* Real close complete */ | ||
1695 | gsm_response(gsm, address, UA); | ||
1696 | gsm_dlci_close(dlci); | ||
1697 | break; | ||
1698 | case UA: | ||
1699 | case UA|PF: | ||
1700 | if (cr == 0 || dlci == NULL) | ||
1701 | break; | ||
1702 | switch (dlci->state) { | ||
1703 | case DLCI_CLOSING: | ||
1704 | gsm_dlci_close(dlci); | ||
1705 | break; | ||
1706 | case DLCI_OPENING: | ||
1707 | gsm_dlci_open(dlci); | ||
1708 | break; | ||
1709 | } | ||
1710 | break; | ||
1711 | case DM: /* DM can be valid unsolicited */ | ||
1712 | case DM|PF: | ||
1713 | if (cr) | ||
1714 | goto invalid; | ||
1715 | if (dlci == NULL) | ||
1716 | return; | ||
1717 | gsm_dlci_close(dlci); | ||
1718 | break; | ||
1719 | case UI: | ||
1720 | case UI|PF: | ||
1721 | case UIH: | ||
1722 | case UIH|PF: | ||
1723 | #if 0 | ||
1724 | if (cr) | ||
1725 | goto invalid; | ||
1726 | #endif | ||
1727 | if (dlci == NULL || dlci->state != DLCI_OPEN) { | ||
1728 | gsm_command(gsm, address, DM|PF); | ||
1729 | return; | ||
1730 | } | ||
1731 | dlci->data(dlci, gsm->buf, gsm->len); | ||
1732 | break; | ||
1733 | default: | ||
1734 | goto invalid; | ||
1735 | } | ||
1736 | return; | ||
1737 | invalid: | ||
1738 | gsm->malformed++; | ||
1739 | return; | ||
1740 | } | ||
1741 | |||
1742 | |||
1743 | /** | ||
1744 | * gsm0_receive - perform processing for non-transparency | ||
1745 | * @gsm: gsm data for this ldisc instance | ||
1746 | * @c: character | ||
1747 | * | ||
1748 | * Receive bytes in gsm mode 0 | ||
1749 | */ | ||
1750 | |||
1751 | static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) | ||
1752 | { | ||
1753 | switch (gsm->state) { | ||
1754 | case GSM_SEARCH: /* SOF marker */ | ||
1755 | if (c == GSM0_SOF) { | ||
1756 | gsm->state = GSM_ADDRESS; | ||
1757 | gsm->address = 0; | ||
1758 | gsm->len = 0; | ||
1759 | gsm->fcs = INIT_FCS; | ||
1760 | } | ||
1761 | break; /* Address EA */ | ||
1762 | case GSM_ADDRESS: | ||
1763 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1764 | if (gsm_read_ea(&gsm->address, c)) | ||
1765 | gsm->state = GSM_CONTROL; | ||
1766 | break; | ||
1767 | case GSM_CONTROL: /* Control Byte */ | ||
1768 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1769 | gsm->control = c; | ||
1770 | gsm->state = GSM_LEN; | ||
1771 | break; | ||
1772 | case GSM_LEN: /* Length EA */ | ||
1773 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1774 | if (gsm_read_ea(&gsm->len, c)) { | ||
1775 | if (gsm->len > gsm->mru) { | ||
1776 | gsm->bad_size++; | ||
1777 | gsm->state = GSM_SEARCH; | ||
1778 | break; | ||
1779 | } | ||
1780 | gsm->count = 0; | ||
1781 | gsm->state = GSM_DATA; | ||
1782 | } | ||
1783 | break; | ||
1784 | case GSM_DATA: /* Data */ | ||
1785 | gsm->buf[gsm->count++] = c; | ||
1786 | if (gsm->count == gsm->len) | ||
1787 | gsm->state = GSM_FCS; | ||
1788 | break; | ||
1789 | case GSM_FCS: /* FCS follows the packet */ | ||
1790 | gsm->fcs = c; | ||
1791 | gsm_queue(gsm); | ||
1792 | /* And then back for the next frame */ | ||
1793 | gsm->state = GSM_SEARCH; | ||
1794 | break; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | /** | ||
1799 | * gsm0_receive - perform processing for non-transparency | ||
1800 | * @gsm: gsm data for this ldisc instance | ||
1801 | * @c: character | ||
1802 | * | ||
1803 | * Receive bytes in mode 1 (Advanced option) | ||
1804 | */ | ||
1805 | |||
1806 | static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) | ||
1807 | { | ||
1808 | if (c == GSM1_SOF) { | ||
1809 | /* EOF is only valid in frame if we have got to the data state | ||
1810 | and received at least one byte (the FCS) */ | ||
1811 | if (gsm->state == GSM_DATA && gsm->count) { | ||
1812 | /* Extract the FCS */ | ||
1813 | gsm->count--; | ||
1814 | gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); | ||
1815 | gsm->len = gsm->count; | ||
1816 | gsm_queue(gsm); | ||
1817 | gsm->state = GSM_START; | ||
1818 | return; | ||
1819 | } | ||
1820 | /* Any partial frame was a runt so go back to start */ | ||
1821 | if (gsm->state != GSM_START) { | ||
1822 | gsm->malformed++; | ||
1823 | gsm->state = GSM_START; | ||
1824 | } | ||
1825 | /* A SOF in GSM_START means we are still reading idling or | ||
1826 | framing bytes */ | ||
1827 | return; | ||
1828 | } | ||
1829 | |||
1830 | if (c == GSM1_ESCAPE) { | ||
1831 | gsm->escape = 1; | ||
1832 | return; | ||
1833 | } | ||
1834 | |||
1835 | /* Only an unescaped SOF gets us out of GSM search */ | ||
1836 | if (gsm->state == GSM_SEARCH) | ||
1837 | return; | ||
1838 | |||
1839 | if (gsm->escape) { | ||
1840 | c ^= GSM1_ESCAPE_BITS; | ||
1841 | gsm->escape = 0; | ||
1842 | } | ||
1843 | switch (gsm->state) { | ||
1844 | case GSM_START: /* First byte after SOF */ | ||
1845 | gsm->address = 0; | ||
1846 | gsm->state = GSM_ADDRESS; | ||
1847 | gsm->fcs = INIT_FCS; | ||
1848 | /* Drop through */ | ||
1849 | case GSM_ADDRESS: /* Address continuation */ | ||
1850 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1851 | if (gsm_read_ea(&gsm->address, c)) | ||
1852 | gsm->state = GSM_CONTROL; | ||
1853 | break; | ||
1854 | case GSM_CONTROL: /* Control Byte */ | ||
1855 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1856 | gsm->control = c; | ||
1857 | gsm->count = 0; | ||
1858 | gsm->state = GSM_DATA; | ||
1859 | break; | ||
1860 | case GSM_DATA: /* Data */ | ||
1861 | if (gsm->count > gsm->mru ) { /* Allow one for the FCS */ | ||
1862 | gsm->state = GSM_OVERRUN; | ||
1863 | gsm->bad_size++; | ||
1864 | } else | ||
1865 | gsm->buf[gsm->count++] = c; | ||
1866 | break; | ||
1867 | case GSM_OVERRUN: /* Over-long - eg a dropped SOF */ | ||
1868 | break; | ||
1869 | } | ||
1870 | } | ||
1871 | |||
1872 | /** | ||
1873 | * gsm_error - handle tty error | ||
1874 | * @gsm: ldisc data | ||
1875 | * @data: byte received (may be invalid) | ||
1876 | * @flag: error received | ||
1877 | * | ||
1878 | * Handle an error in the receipt of data for a frame. Currently we just | ||
1879 | * go back to hunting for a SOF. | ||
1880 | * | ||
1881 | * FIXME: better diagnostics ? | ||
1882 | */ | ||
1883 | |||
1884 | static void gsm_error(struct gsm_mux *gsm, | ||
1885 | unsigned char data, unsigned char flag) | ||
1886 | { | ||
1887 | gsm->state = GSM_SEARCH; | ||
1888 | gsm->io_error++; | ||
1889 | } | ||
1890 | |||
1891 | /** | ||
1892 | * gsm_cleanup_mux - generic GSM protocol cleanup | ||
1893 | * @gsm: our mux | ||
1894 | * | ||
1895 | * Clean up the bits of the mux which are the same for all framing | ||
1896 | * protocols. Remove the mux from the mux table, stop all the timers | ||
1897 | * and then shut down each device hanging up the channels as we go. | ||
1898 | */ | ||
1899 | |||
1900 | void gsm_cleanup_mux(struct gsm_mux *gsm) | ||
1901 | { | ||
1902 | int i; | ||
1903 | struct gsm_dlci *dlci = gsm->dlci[0]; | ||
1904 | struct gsm_msg *txq; | ||
1905 | |||
1906 | gsm->dead = 1; | ||
1907 | |||
1908 | spin_lock(&gsm_mux_lock); | ||
1909 | for (i = 0; i < MAX_MUX; i++) { | ||
1910 | if (gsm_mux[i] == gsm) { | ||
1911 | gsm_mux[i] = NULL; | ||
1912 | break; | ||
1913 | } | ||
1914 | } | ||
1915 | spin_unlock(&gsm_mux_lock); | ||
1916 | WARN_ON(i == MAX_MUX); | ||
1917 | |||
1918 | del_timer_sync(&gsm->t2_timer); | ||
1919 | /* Now we are sure T2 has stopped */ | ||
1920 | if (dlci) { | ||
1921 | dlci->dead = 1; | ||
1922 | gsm_dlci_begin_close(dlci); | ||
1923 | wait_event_interruptible(gsm->event, | ||
1924 | dlci->state == DLCI_CLOSED); | ||
1925 | } | ||
1926 | /* Free up any link layer users */ | ||
1927 | for (i = 0; i < NUM_DLCI; i++) | ||
1928 | if (gsm->dlci[i]) | ||
1929 | gsm_dlci_free(gsm->dlci[i]); | ||
1930 | /* Now wipe the queues */ | ||
1931 | for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) { | ||
1932 | gsm->tx_head = txq->next; | ||
1933 | kfree(txq); | ||
1934 | } | ||
1935 | gsm->tx_tail = NULL; | ||
1936 | } | ||
1937 | EXPORT_SYMBOL_GPL(gsm_cleanup_mux); | ||
1938 | |||
1939 | /** | ||
1940 | * gsm_activate_mux - generic GSM setup | ||
1941 | * @gsm: our mux | ||
1942 | * | ||
1943 | * Set up the bits of the mux which are the same for all framing | ||
1944 | * protocols. Add the mux to the mux table so it can be opened and | ||
1945 | * finally kick off connecting to DLCI 0 on the modem. | ||
1946 | */ | ||
1947 | |||
1948 | int gsm_activate_mux(struct gsm_mux *gsm) | ||
1949 | { | ||
1950 | struct gsm_dlci *dlci; | ||
1951 | int i = 0; | ||
1952 | |||
1953 | init_timer(&gsm->t2_timer); | ||
1954 | gsm->t2_timer.function = gsm_control_retransmit; | ||
1955 | gsm->t2_timer.data = (unsigned long)gsm; | ||
1956 | init_waitqueue_head(&gsm->event); | ||
1957 | spin_lock_init(&gsm->control_lock); | ||
1958 | spin_lock_init(&gsm->tx_lock); | ||
1959 | |||
1960 | if (gsm->encoding == 0) | ||
1961 | gsm->receive = gsm0_receive; | ||
1962 | else | ||
1963 | gsm->receive = gsm1_receive; | ||
1964 | gsm->error = gsm_error; | ||
1965 | |||
1966 | spin_lock(&gsm_mux_lock); | ||
1967 | for (i = 0; i < MAX_MUX; i++) { | ||
1968 | if (gsm_mux[i] == NULL) { | ||
1969 | gsm_mux[i] = gsm; | ||
1970 | break; | ||
1971 | } | ||
1972 | } | ||
1973 | spin_unlock(&gsm_mux_lock); | ||
1974 | if (i == MAX_MUX) | ||
1975 | return -EBUSY; | ||
1976 | |||
1977 | dlci = gsm_dlci_alloc(gsm, 0); | ||
1978 | if (dlci == NULL) | ||
1979 | return -ENOMEM; | ||
1980 | gsm->dead = 0; /* Tty opens are now permissible */ | ||
1981 | return 0; | ||
1982 | } | ||
1983 | EXPORT_SYMBOL_GPL(gsm_activate_mux); | ||
1984 | |||
1985 | /** | ||
1986 | * gsm_free_mux - free up a mux | ||
1987 | * @mux: mux to free | ||
1988 | * | ||
1989 | * Dispose of allocated resources for a dead mux. No refcounting | ||
1990 | * at present so the mux must be truely dead. | ||
1991 | */ | ||
1992 | void gsm_free_mux(struct gsm_mux *gsm) | ||
1993 | { | ||
1994 | kfree(gsm->txframe); | ||
1995 | kfree(gsm->buf); | ||
1996 | kfree(gsm); | ||
1997 | } | ||
1998 | EXPORT_SYMBOL_GPL(gsm_free_mux); | ||
1999 | |||
2000 | /** | ||
2001 | * gsm_alloc_mux - allocate a mux | ||
2002 | * | ||
2003 | * Creates a new mux ready for activation. | ||
2004 | */ | ||
2005 | |||
2006 | struct gsm_mux *gsm_alloc_mux(void) | ||
2007 | { | ||
2008 | struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); | ||
2009 | if (gsm == NULL) | ||
2010 | return NULL; | ||
2011 | gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL); | ||
2012 | if (gsm->buf == NULL) { | ||
2013 | kfree(gsm); | ||
2014 | return NULL; | ||
2015 | } | ||
2016 | gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL); | ||
2017 | if (gsm->txframe == NULL) { | ||
2018 | kfree(gsm->buf); | ||
2019 | kfree(gsm); | ||
2020 | return NULL; | ||
2021 | } | ||
2022 | spin_lock_init(&gsm->lock); | ||
2023 | |||
2024 | gsm->t1 = T1; | ||
2025 | gsm->t2 = T2; | ||
2026 | gsm->n2 = N2; | ||
2027 | gsm->ftype = UIH; | ||
2028 | gsm->initiator = 0; | ||
2029 | gsm->adaption = 1; | ||
2030 | gsm->encoding = 1; | ||
2031 | gsm->mru = 64; /* Default to encoding 1 so these should be 64 */ | ||
2032 | gsm->mtu = 64; | ||
2033 | gsm->dead = 1; /* Avoid early tty opens */ | ||
2034 | |||
2035 | return gsm; | ||
2036 | } | ||
2037 | EXPORT_SYMBOL_GPL(gsm_alloc_mux); | ||
2038 | |||
2039 | |||
2040 | |||
2041 | |||
2042 | /** | ||
2043 | * gsmld_output - write to link | ||
2044 | * @gsm: our mux | ||
2045 | * @data: bytes to output | ||
2046 | * @len: size | ||
2047 | * | ||
2048 | * Write a block of data from the GSM mux to the data channel. This | ||
2049 | * will eventually be serialized from above but at the moment isn't. | ||
2050 | */ | ||
2051 | |||
2052 | static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len) | ||
2053 | { | ||
2054 | if (tty_write_room(gsm->tty) < len) { | ||
2055 | set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags); | ||
2056 | return -ENOSPC; | ||
2057 | } | ||
2058 | if (debug & 4) { | ||
2059 | printk("-->%d bytes out\n", len); | ||
2060 | hex_packet(data, len); | ||
2061 | } | ||
2062 | gsm->tty->ops->write(gsm->tty, data, len); | ||
2063 | return len; | ||
2064 | } | ||
2065 | |||
2066 | /** | ||
2067 | * gsmld_attach_gsm - mode set up | ||
2068 | * @tty: our tty structure | ||
2069 | * @gsm: our mux | ||
2070 | * | ||
2071 | * Set up the MUX for basic mode and commence connecting to the | ||
2072 | * modem. Currently called from the line discipline set up but | ||
2073 | * will need moving to an ioctl path. | ||
2074 | */ | ||
2075 | |||
2076 | static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) | ||
2077 | { | ||
2078 | int ret; | ||
2079 | |||
2080 | gsm->tty = tty_kref_get(tty); | ||
2081 | gsm->output = gsmld_output; | ||
2082 | ret = gsm_activate_mux(gsm); | ||
2083 | if (ret != 0) | ||
2084 | tty_kref_put(gsm->tty); | ||
2085 | return ret; | ||
2086 | } | ||
2087 | |||
2088 | |||
2089 | /** | ||
2090 | * gsmld_detach_gsm - stop doing 0710 mux | ||
2091 | * @tty: tty atttached to the mux | ||
2092 | * @gsm: mux | ||
2093 | * | ||
2094 | * Shutdown and then clean up the resources used by the line discipline | ||
2095 | */ | ||
2096 | |||
2097 | static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) | ||
2098 | { | ||
2099 | WARN_ON(tty != gsm->tty); | ||
2100 | gsm_cleanup_mux(gsm); | ||
2101 | tty_kref_put(gsm->tty); | ||
2102 | gsm->tty = NULL; | ||
2103 | } | ||
2104 | |||
2105 | static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp, | ||
2106 | char *fp, int count) | ||
2107 | { | ||
2108 | struct gsm_mux *gsm = tty->disc_data; | ||
2109 | const unsigned char *dp; | ||
2110 | char *f; | ||
2111 | int i; | ||
2112 | char buf[64]; | ||
2113 | char flags; | ||
2114 | |||
2115 | if (debug & 4) { | ||
2116 | printk("Inbytes %dd\n", count); | ||
2117 | hex_packet(cp, count); | ||
2118 | } | ||
2119 | |||
2120 | for (i = count, dp = cp, f = fp; i; i--, dp++) { | ||
2121 | flags = *f++; | ||
2122 | switch (flags) { | ||
2123 | case TTY_NORMAL: | ||
2124 | gsm->receive(gsm, *dp); | ||
2125 | break; | ||
2126 | case TTY_OVERRUN: | ||
2127 | case TTY_BREAK: | ||
2128 | case TTY_PARITY: | ||
2129 | case TTY_FRAME: | ||
2130 | gsm->error(gsm, *dp, flags); | ||
2131 | break; | ||
2132 | default: | ||
2133 | printk(KERN_ERR "%s: unknown flag %d\n", | ||
2134 | tty_name(tty, buf), flags); | ||
2135 | break; | ||
2136 | } | ||
2137 | } | ||
2138 | /* FASYNC if needed ? */ | ||
2139 | /* If clogged call tty_throttle(tty); */ | ||
2140 | } | ||
2141 | |||
2142 | /** | ||
2143 | * gsmld_chars_in_buffer - report available bytes | ||
2144 | * @tty: tty device | ||
2145 | * | ||
2146 | * Report the number of characters buffered to be delivered to user | ||
2147 | * at this instant in time. | ||
2148 | * | ||
2149 | * Locking: gsm lock | ||
2150 | */ | ||
2151 | |||
2152 | static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty) | ||
2153 | { | ||
2154 | return 0; | ||
2155 | } | ||
2156 | |||
2157 | /** | ||
2158 | * gsmld_flush_buffer - clean input queue | ||
2159 | * @tty: terminal device | ||
2160 | * | ||
2161 | * Flush the input buffer. Called when the line discipline is | ||
2162 | * being closed, when the tty layer wants the buffer flushed (eg | ||
2163 | * at hangup). | ||
2164 | */ | ||
2165 | |||
2166 | static void gsmld_flush_buffer(struct tty_struct *tty) | ||
2167 | { | ||
2168 | } | ||
2169 | |||
2170 | /** | ||
2171 | * gsmld_close - close the ldisc for this tty | ||
2172 | * @tty: device | ||
2173 | * | ||
2174 | * Called from the terminal layer when this line discipline is | ||
2175 | * being shut down, either because of a close or becsuse of a | ||
2176 | * discipline change. The function will not be called while other | ||
2177 | * ldisc methods are in progress. | ||
2178 | */ | ||
2179 | |||
2180 | static void gsmld_close(struct tty_struct *tty) | ||
2181 | { | ||
2182 | struct gsm_mux *gsm = tty->disc_data; | ||
2183 | |||
2184 | gsmld_detach_gsm(tty, gsm); | ||
2185 | |||
2186 | gsmld_flush_buffer(tty); | ||
2187 | /* Do other clean up here */ | ||
2188 | gsm_free_mux(gsm); | ||
2189 | } | ||
2190 | |||
2191 | /** | ||
2192 | * gsmld_open - open an ldisc | ||
2193 | * @tty: terminal to open | ||
2194 | * | ||
2195 | * Called when this line discipline is being attached to the | ||
2196 | * terminal device. Can sleep. Called serialized so that no | ||
2197 | * other events will occur in parallel. No further open will occur | ||
2198 | * until a close. | ||
2199 | */ | ||
2200 | |||
2201 | static int gsmld_open(struct tty_struct *tty) | ||
2202 | { | ||
2203 | struct gsm_mux *gsm; | ||
2204 | |||
2205 | if (tty->ops->write == NULL) | ||
2206 | return -EINVAL; | ||
2207 | |||
2208 | /* Attach our ldisc data */ | ||
2209 | gsm = gsm_alloc_mux(); | ||
2210 | if (gsm == NULL) | ||
2211 | return -ENOMEM; | ||
2212 | |||
2213 | tty->disc_data = gsm; | ||
2214 | tty->receive_room = 65536; | ||
2215 | |||
2216 | /* Attach the initial passive connection */ | ||
2217 | gsm->encoding = 1; | ||
2218 | return gsmld_attach_gsm(tty, gsm); | ||
2219 | } | ||
2220 | |||
2221 | /** | ||
2222 | * gsmld_write_wakeup - asynchronous I/O notifier | ||
2223 | * @tty: tty device | ||
2224 | * | ||
2225 | * Required for the ptys, serial driver etc. since processes | ||
2226 | * that attach themselves to the master and rely on ASYNC | ||
2227 | * IO must be woken up | ||
2228 | */ | ||
2229 | |||
2230 | static void gsmld_write_wakeup(struct tty_struct *tty) | ||
2231 | { | ||
2232 | struct gsm_mux *gsm = tty->disc_data; | ||
2233 | |||
2234 | /* Queue poll */ | ||
2235 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | ||
2236 | gsm_data_kick(gsm); | ||
2237 | if (gsm->tx_bytes < TX_THRESH_LO) | ||
2238 | gsm_dlci_data_sweep(gsm); | ||
2239 | } | ||
2240 | |||
2241 | /** | ||
2242 | * gsmld_read - read function for tty | ||
2243 | * @tty: tty device | ||
2244 | * @file: file object | ||
2245 | * @buf: userspace buffer pointer | ||
2246 | * @nr: size of I/O | ||
2247 | * | ||
2248 | * Perform reads for the line discipline. We are guaranteed that the | ||
2249 | * line discipline will not be closed under us but we may get multiple | ||
2250 | * parallel readers and must handle this ourselves. We may also get | ||
2251 | * a hangup. Always called in user context, may sleep. | ||
2252 | * | ||
2253 | * This code must be sure never to sleep through a hangup. | ||
2254 | */ | ||
2255 | |||
2256 | static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, | ||
2257 | unsigned char __user *buf, size_t nr) | ||
2258 | { | ||
2259 | return -EOPNOTSUPP; | ||
2260 | } | ||
2261 | |||
2262 | /** | ||
2263 | * gsmld_write - write function for tty | ||
2264 | * @tty: tty device | ||
2265 | * @file: file object | ||
2266 | * @buf: userspace buffer pointer | ||
2267 | * @nr: size of I/O | ||
2268 | * | ||
2269 | * Called when the owner of the device wants to send a frame | ||
2270 | * itself (or some other control data). The data is transferred | ||
2271 | * as-is and must be properly framed and checksummed as appropriate | ||
2272 | * by userspace. Frames are either sent whole or not at all as this | ||
2273 | * avoids pain user side. | ||
2274 | */ | ||
2275 | |||
2276 | static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, | ||
2277 | const unsigned char *buf, size_t nr) | ||
2278 | { | ||
2279 | int space = tty_write_room(tty); | ||
2280 | if (space >= nr) | ||
2281 | return tty->ops->write(tty, buf, nr); | ||
2282 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | ||
2283 | return -ENOBUFS; | ||
2284 | } | ||
2285 | |||
2286 | /** | ||
2287 | * gsmld_poll - poll method for N_GSM0710 | ||
2288 | * @tty: terminal device | ||
2289 | * @file: file accessing it | ||
2290 | * @wait: poll table | ||
2291 | * | ||
2292 | * Called when the line discipline is asked to poll() for data or | ||
2293 | * for special events. This code is not serialized with respect to | ||
2294 | * other events save open/close. | ||
2295 | * | ||
2296 | * This code must be sure never to sleep through a hangup. | ||
2297 | * Called without the kernel lock held - fine | ||
2298 | */ | ||
2299 | |||
2300 | static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file, | ||
2301 | poll_table *wait) | ||
2302 | { | ||
2303 | unsigned int mask = 0; | ||
2304 | struct gsm_mux *gsm = tty->disc_data; | ||
2305 | |||
2306 | poll_wait(file, &tty->read_wait, wait); | ||
2307 | poll_wait(file, &tty->write_wait, wait); | ||
2308 | if (tty_hung_up_p(file)) | ||
2309 | mask |= POLLHUP; | ||
2310 | if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) | ||
2311 | mask |= POLLOUT | POLLWRNORM; | ||
2312 | if (gsm->dead) | ||
2313 | mask |= POLLHUP; | ||
2314 | return mask; | ||
2315 | } | ||
2316 | |||
2317 | static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, | ||
2318 | struct gsm_config *c) | ||
2319 | { | ||
2320 | int need_close = 0; | ||
2321 | int need_restart = 0; | ||
2322 | |||
2323 | /* Stuff we don't support yet - UI or I frame transport, windowing */ | ||
2324 | if ((c->adaption !=1 && c->adaption != 2) || c->k) | ||
2325 | return -EOPNOTSUPP; | ||
2326 | /* Check the MRU/MTU range looks sane */ | ||
2327 | if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) | ||
2328 | return -EINVAL; | ||
2329 | if (c->n2 < 3) | ||
2330 | return -EINVAL; | ||
2331 | if (c->encapsulation > 1) /* Basic, advanced, no I */ | ||
2332 | return -EINVAL; | ||
2333 | if (c->initiator > 1) | ||
2334 | return -EINVAL; | ||
2335 | if (c->i == 0 || c->i > 2) /* UIH and UI only */ | ||
2336 | return -EINVAL; | ||
2337 | /* | ||
2338 | * See what is needed for reconfiguration | ||
2339 | */ | ||
2340 | |||
2341 | /* Timing fields */ | ||
2342 | if (c->t1 != 0 && c->t1 != gsm->t1) | ||
2343 | need_restart = 1; | ||
2344 | if (c->t2 != 0 && c->t2 != gsm->t2) | ||
2345 | need_restart = 1; | ||
2346 | if (c->encapsulation != gsm->encoding) | ||
2347 | need_restart = 1; | ||
2348 | if (c->adaption != gsm->adaption) | ||
2349 | need_restart = 1; | ||
2350 | /* Requires care */ | ||
2351 | if (c->initiator != gsm->initiator) | ||
2352 | need_close = 1; | ||
2353 | if (c->mru != gsm->mru) | ||
2354 | need_restart = 1; | ||
2355 | if (c->mtu != gsm->mtu) | ||
2356 | need_restart = 1; | ||
2357 | |||
2358 | /* | ||
2359 | * Close down what is needed, restart and initiate the new | ||
2360 | * configuration | ||
2361 | */ | ||
2362 | |||
2363 | if (need_close || need_restart) { | ||
2364 | gsm_dlci_begin_close(gsm->dlci[0]); | ||
2365 | /* This will timeout if the link is down due to N2 expiring */ | ||
2366 | wait_event_interruptible(gsm->event, | ||
2367 | gsm->dlci[0]->state == DLCI_CLOSED); | ||
2368 | if (signal_pending(current)) | ||
2369 | return -EINTR; | ||
2370 | } | ||
2371 | if (need_restart) | ||
2372 | gsm_cleanup_mux(gsm); | ||
2373 | |||
2374 | gsm->initiator = c->initiator; | ||
2375 | gsm->mru = c->mru; | ||
2376 | gsm->encoding = c->encapsulation; | ||
2377 | gsm->adaption = c->adaption; | ||
2378 | |||
2379 | if (c->i == 1) | ||
2380 | gsm->ftype = UIH; | ||
2381 | else if (c->i == 2) | ||
2382 | gsm->ftype = UI; | ||
2383 | |||
2384 | if (c->t1) | ||
2385 | gsm->t1 = c->t1; | ||
2386 | if (c->t2) | ||
2387 | gsm->t2 = c->t2; | ||
2388 | |||
2389 | /* FIXME: We need to separate activation/deactivation from adding | ||
2390 | and removing from the mux array */ | ||
2391 | if (need_restart) | ||
2392 | gsm_activate_mux(gsm); | ||
2393 | if (gsm->initiator && need_close) | ||
2394 | gsm_dlci_begin_open(gsm->dlci[0]); | ||
2395 | return 0; | ||
2396 | } | ||
2397 | |||
2398 | static int gsmld_ioctl(struct tty_struct *tty, struct file *file, | ||
2399 | unsigned int cmd, unsigned long arg) | ||
2400 | { | ||
2401 | struct gsm_config c; | ||
2402 | struct gsm_mux *gsm = tty->disc_data; | ||
2403 | |||
2404 | switch (cmd) { | ||
2405 | case GSMIOC_GETCONF: | ||
2406 | memset(&c, 0, sizeof(c)); | ||
2407 | c.adaption = gsm->adaption; | ||
2408 | c.encapsulation = gsm->encoding; | ||
2409 | c.initiator = gsm->initiator; | ||
2410 | c.t1 = gsm->t1; | ||
2411 | c.t2 = gsm->t2; | ||
2412 | c.t3 = 0; /* Not supported */ | ||
2413 | c.n2 = gsm->n2; | ||
2414 | if (gsm->ftype == UIH) | ||
2415 | c.i = 1; | ||
2416 | else | ||
2417 | c.i = 2; | ||
2418 | printk("Ftype %d i %d\n", gsm->ftype, c.i); | ||
2419 | c.mru = gsm->mru; | ||
2420 | c.mtu = gsm->mtu; | ||
2421 | c.k = 0; | ||
2422 | if (copy_to_user((void *)arg, &c, sizeof(c))) | ||
2423 | return -EFAULT; | ||
2424 | return 0; | ||
2425 | case GSMIOC_SETCONF: | ||
2426 | if (copy_from_user(&c, (void *)arg, sizeof(c))) | ||
2427 | return -EFAULT; | ||
2428 | return gsmld_config(tty, gsm, &c); | ||
2429 | default: | ||
2430 | return n_tty_ioctl_helper(tty, file, cmd, arg); | ||
2431 | } | ||
2432 | } | ||
2433 | |||
2434 | |||
2435 | /* Line discipline for real tty */ | ||
2436 | struct tty_ldisc_ops tty_ldisc_packet = { | ||
2437 | .owner = THIS_MODULE, | ||
2438 | .magic = TTY_LDISC_MAGIC, | ||
2439 | .name = "n_gsm", | ||
2440 | .open = gsmld_open, | ||
2441 | .close = gsmld_close, | ||
2442 | .flush_buffer = gsmld_flush_buffer, | ||
2443 | .chars_in_buffer = gsmld_chars_in_buffer, | ||
2444 | .read = gsmld_read, | ||
2445 | .write = gsmld_write, | ||
2446 | .ioctl = gsmld_ioctl, | ||
2447 | .poll = gsmld_poll, | ||
2448 | .receive_buf = gsmld_receive_buf, | ||
2449 | .write_wakeup = gsmld_write_wakeup | ||
2450 | }; | ||
2451 | |||
2452 | /* | ||
2453 | * Virtual tty side | ||
2454 | */ | ||
2455 | |||
2456 | #define TX_SIZE 512 | ||
2457 | |||
2458 | static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) | ||
2459 | { | ||
2460 | u8 modembits[5]; | ||
2461 | struct gsm_control *ctrl; | ||
2462 | int len = 2; | ||
2463 | |||
2464 | if (brk) | ||
2465 | len++; | ||
2466 | |||
2467 | modembits[0] = len << 1 | EA; /* Data bytes */ | ||
2468 | modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */ | ||
2469 | modembits[2] = gsm_encode_modem(dlci) << 1 | EA; | ||
2470 | if (brk) | ||
2471 | modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */ | ||
2472 | ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1); | ||
2473 | if (ctrl == NULL) | ||
2474 | return -ENOMEM; | ||
2475 | return gsm_control_wait(dlci->gsm, ctrl); | ||
2476 | } | ||
2477 | |||
2478 | static int gsm_carrier_raised(struct tty_port *port) | ||
2479 | { | ||
2480 | struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); | ||
2481 | /* Not yet open so no carrier info */ | ||
2482 | if (dlci->state != DLCI_OPEN) | ||
2483 | return 0; | ||
2484 | if (debug & 2) | ||
2485 | return 1; | ||
2486 | return dlci->modem_rx & TIOCM_CD; | ||
2487 | } | ||
2488 | |||
2489 | static void gsm_dtr_rts(struct tty_port *port, int onoff) | ||
2490 | { | ||
2491 | struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); | ||
2492 | unsigned int modem_tx = dlci->modem_tx; | ||
2493 | if (onoff) | ||
2494 | modem_tx |= TIOCM_DTR | TIOCM_RTS; | ||
2495 | else | ||
2496 | modem_tx &= ~(TIOCM_DTR | TIOCM_RTS); | ||
2497 | if (modem_tx != dlci->modem_tx) { | ||
2498 | dlci->modem_tx = modem_tx; | ||
2499 | gsmtty_modem_update(dlci, 0); | ||
2500 | } | ||
2501 | } | ||
2502 | |||
2503 | static const struct tty_port_operations gsm_port_ops = { | ||
2504 | .carrier_raised = gsm_carrier_raised, | ||
2505 | .dtr_rts = gsm_dtr_rts, | ||
2506 | }; | ||
2507 | |||
2508 | |||
2509 | static int gsmtty_open(struct tty_struct *tty, struct file *filp) | ||
2510 | { | ||
2511 | struct gsm_mux *gsm; | ||
2512 | struct gsm_dlci *dlci; | ||
2513 | struct tty_port *port; | ||
2514 | unsigned int line = tty->index; | ||
2515 | unsigned int mux = line >> 6; | ||
2516 | |||
2517 | line = line & 0x3F; | ||
2518 | |||
2519 | if (mux >= MAX_MUX) | ||
2520 | return -ENXIO; | ||
2521 | /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */ | ||
2522 | if (gsm_mux[mux] == NULL) | ||
2523 | return -EUNATCH; | ||
2524 | if (line == 0 || line > 61) /* 62/63 reserved */ | ||
2525 | return -ECHRNG; | ||
2526 | gsm = gsm_mux[mux]; | ||
2527 | if (gsm->dead) | ||
2528 | return -EL2HLT; | ||
2529 | dlci = gsm->dlci[line]; | ||
2530 | if (dlci == NULL) | ||
2531 | dlci = gsm_dlci_alloc(gsm, line); | ||
2532 | if (dlci == NULL) | ||
2533 | return -ENOMEM; | ||
2534 | port = &dlci->port; | ||
2535 | port->count++; | ||
2536 | tty->driver_data = dlci; | ||
2537 | tty_port_tty_set(port, tty); | ||
2538 | |||
2539 | dlci->modem_rx = 0; | ||
2540 | /* We could in theory open and close before we wait - eg if we get | ||
2541 | a DM straight back. This is ok as that will have caused a hangup */ | ||
2542 | set_bit(ASYNCB_INITIALIZED, &port->flags); | ||
2543 | /* Start sending off SABM messages */ | ||
2544 | gsm_dlci_begin_open(dlci); | ||
2545 | /* And wait for virtual carrier */ | ||
2546 | return tty_port_block_til_ready(port, tty, filp); | ||
2547 | } | ||
2548 | |||
2549 | static void gsmtty_close(struct tty_struct *tty, struct file *filp) | ||
2550 | { | ||
2551 | struct gsm_dlci *dlci = tty->driver_data; | ||
2552 | if (dlci == NULL) | ||
2553 | return; | ||
2554 | if (tty_port_close_start(&dlci->port, tty, filp) == 0) | ||
2555 | return; | ||
2556 | gsm_dlci_begin_close(dlci); | ||
2557 | tty_port_close_end(&dlci->port, tty); | ||
2558 | tty_port_tty_set(&dlci->port, NULL); | ||
2559 | } | ||
2560 | |||
2561 | static void gsmtty_hangup(struct tty_struct *tty) | ||
2562 | { | ||
2563 | struct gsm_dlci *dlci = tty->driver_data; | ||
2564 | tty_port_hangup(&dlci->port); | ||
2565 | gsm_dlci_begin_close(dlci); | ||
2566 | } | ||
2567 | |||
2568 | static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, | ||
2569 | int len) | ||
2570 | { | ||
2571 | struct gsm_dlci *dlci = tty->driver_data; | ||
2572 | /* Stuff the bytes into the fifo queue */ | ||
2573 | int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); | ||
2574 | /* Need to kick the channel */ | ||
2575 | gsm_dlci_data_kick(dlci); | ||
2576 | return sent; | ||
2577 | } | ||
2578 | |||
2579 | static int gsmtty_write_room(struct tty_struct *tty) | ||
2580 | { | ||
2581 | struct gsm_dlci *dlci = tty->driver_data; | ||
2582 | return TX_SIZE - kfifo_len(dlci->fifo); | ||
2583 | } | ||
2584 | |||
2585 | static int gsmtty_chars_in_buffer(struct tty_struct *tty) | ||
2586 | { | ||
2587 | struct gsm_dlci *dlci = tty->driver_data; | ||
2588 | return kfifo_len(dlci->fifo); | ||
2589 | } | ||
2590 | |||
2591 | static void gsmtty_flush_buffer(struct tty_struct *tty) | ||
2592 | { | ||
2593 | struct gsm_dlci *dlci = tty->driver_data; | ||
2594 | /* Caution needed: If we implement reliable transport classes | ||
2595 | then the data being transmitted can't simply be junked once | ||
2596 | it has first hit the stack. Until then we can just blow it | ||
2597 | away */ | ||
2598 | kfifo_reset(dlci->fifo); | ||
2599 | /* Need to unhook this DLCI from the transmit queue logic */ | ||
2600 | } | ||
2601 | |||
2602 | static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) | ||
2603 | { | ||
2604 | /* The FIFO handles the queue so the kernel will do the right | ||
2605 | thing waiting on chars_in_buffer before calling us. No work | ||
2606 | to do here */ | ||
2607 | } | ||
2608 | |||
2609 | static int gsmtty_tiocmget(struct tty_struct *tty, struct file *filp) | ||
2610 | { | ||
2611 | struct gsm_dlci *dlci = tty->driver_data; | ||
2612 | return dlci->modem_rx; | ||
2613 | } | ||
2614 | |||
2615 | static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp, | ||
2616 | unsigned int set, unsigned int clear) | ||
2617 | { | ||
2618 | struct gsm_dlci *dlci = tty->driver_data; | ||
2619 | unsigned int modem_tx = dlci->modem_tx; | ||
2620 | |||
2621 | modem_tx &= clear; | ||
2622 | modem_tx |= set; | ||
2623 | |||
2624 | if (modem_tx != dlci->modem_tx) { | ||
2625 | dlci->modem_tx = modem_tx; | ||
2626 | return gsmtty_modem_update(dlci, 0); | ||
2627 | } | ||
2628 | return 0; | ||
2629 | } | ||
2630 | |||
2631 | |||
2632 | static int gsmtty_ioctl(struct tty_struct *tty, struct file *filp, | ||
2633 | unsigned int cmd, unsigned long arg) | ||
2634 | { | ||
2635 | return -ENOIOCTLCMD; | ||
2636 | } | ||
2637 | |||
2638 | static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) | ||
2639 | { | ||
2640 | /* For the moment its fixed. In actual fact the speed information | ||
2641 | for the virtual channel can be propogated in both directions by | ||
2642 | the RPN control message. This however rapidly gets nasty as we | ||
2643 | then have to remap modem signals each way according to whether | ||
2644 | our virtual cable is null modem etc .. */ | ||
2645 | tty_termios_copy_hw(tty->termios, old); | ||
2646 | } | ||
2647 | |||
2648 | static void gsmtty_throttle(struct tty_struct *tty) | ||
2649 | { | ||
2650 | struct gsm_dlci *dlci = tty->driver_data; | ||
2651 | if (tty->termios->c_cflag & CRTSCTS) | ||
2652 | dlci->modem_tx &= ~TIOCM_DTR; | ||
2653 | dlci->throttled = 1; | ||
2654 | /* Send an MSC with DTR cleared */ | ||
2655 | gsmtty_modem_update(dlci, 0); | ||
2656 | } | ||
2657 | |||
2658 | static void gsmtty_unthrottle(struct tty_struct *tty) | ||
2659 | { | ||
2660 | struct gsm_dlci *dlci = tty->driver_data; | ||
2661 | if (tty->termios->c_cflag & CRTSCTS) | ||
2662 | dlci->modem_tx |= TIOCM_DTR; | ||
2663 | dlci->throttled = 0; | ||
2664 | /* Send an MSC with DTR set */ | ||
2665 | gsmtty_modem_update(dlci, 0); | ||
2666 | } | ||
2667 | |||
2668 | static int gsmtty_break_ctl(struct tty_struct *tty, int state) | ||
2669 | { | ||
2670 | struct gsm_dlci *dlci = tty->driver_data; | ||
2671 | int encode = 0; /* Off */ | ||
2672 | |||
2673 | if (state == -1) /* "On indefinitely" - we can't encode this | ||
2674 | properly */ | ||
2675 | encode = 0x0F; | ||
2676 | else if (state > 0) { | ||
2677 | encode = state / 200; /* mS to encoding */ | ||
2678 | if (encode > 0x0F) | ||
2679 | encode = 0x0F; /* Best effort */ | ||
2680 | } | ||
2681 | return gsmtty_modem_update(dlci, encode); | ||
2682 | } | ||
2683 | |||
2684 | static struct tty_driver *gsm_tty_driver; | ||
2685 | |||
2686 | /* Virtual ttys for the demux */ | ||
2687 | static const struct tty_operations gsmtty_ops = { | ||
2688 | .open = gsmtty_open, | ||
2689 | .close = gsmtty_close, | ||
2690 | .write = gsmtty_write, | ||
2691 | .write_room = gsmtty_write_room, | ||
2692 | .chars_in_buffer = gsmtty_chars_in_buffer, | ||
2693 | .flush_buffer = gsmtty_flush_buffer, | ||
2694 | .ioctl = gsmtty_ioctl, | ||
2695 | .throttle = gsmtty_throttle, | ||
2696 | .unthrottle = gsmtty_unthrottle, | ||
2697 | .set_termios = gsmtty_set_termios, | ||
2698 | .hangup = gsmtty_hangup, | ||
2699 | .wait_until_sent = gsmtty_wait_until_sent, | ||
2700 | .tiocmget = gsmtty_tiocmget, | ||
2701 | .tiocmset = gsmtty_tiocmset, | ||
2702 | .break_ctl = gsmtty_break_ctl, | ||
2703 | }; | ||
2704 | |||
2705 | |||
2706 | |||
2707 | static int __init gsm_init(void) | ||
2708 | { | ||
2709 | /* Fill in our line protocol discipline, and register it */ | ||
2710 | int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet); | ||
2711 | if (status != 0) { | ||
2712 | printk(KERN_ERR "n_gsm: can't register line discipline (err = %d)\n", status); | ||
2713 | return status; | ||
2714 | } | ||
2715 | |||
2716 | gsm_tty_driver = alloc_tty_driver(256); | ||
2717 | if (!gsm_tty_driver) { | ||
2718 | tty_unregister_ldisc(N_GSM0710); | ||
2719 | printk(KERN_ERR "gsm_init: tty allocation failed.\n"); | ||
2720 | return -EINVAL; | ||
2721 | } | ||
2722 | gsm_tty_driver->owner = THIS_MODULE; | ||
2723 | gsm_tty_driver->driver_name = "gsmtty"; | ||
2724 | gsm_tty_driver->name = "gsmtty"; | ||
2725 | gsm_tty_driver->major = 0; /* Dynamic */ | ||
2726 | gsm_tty_driver->minor_start = 0; | ||
2727 | gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | ||
2728 | gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL; | ||
2729 | gsm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | ||
2730 | | TTY_DRIVER_HARDWARE_BREAK; | ||
2731 | gsm_tty_driver->init_termios = tty_std_termios; | ||
2732 | /* Fixme */ | ||
2733 | gsm_tty_driver->init_termios.c_lflag &= ~ECHO; | ||
2734 | tty_set_operations(gsm_tty_driver, &gsmtty_ops); | ||
2735 | |||
2736 | spin_lock_init(&gsm_mux_lock); | ||
2737 | |||
2738 | if (tty_register_driver(gsm_tty_driver)) { | ||
2739 | put_tty_driver(gsm_tty_driver); | ||
2740 | tty_unregister_ldisc(N_GSM0710); | ||
2741 | printk(KERN_ERR "gsm_init: tty registration failed.\n"); | ||
2742 | return -EBUSY; | ||
2743 | } | ||
2744 | printk(KERN_INFO "gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start); | ||
2745 | return 0; | ||
2746 | } | ||
2747 | |||
2748 | static void __exit gsm_exit(void) | ||
2749 | { | ||
2750 | int status = tty_unregister_ldisc(N_GSM0710); | ||
2751 | if (status != 0) | ||
2752 | printk(KERN_ERR "n_gsm: can't unregister line discipline (err = %d)\n", status); | ||
2753 | tty_unregister_driver(gsm_tty_driver); | ||
2754 | put_tty_driver(gsm_tty_driver); | ||
2755 | printk(KERN_INFO "gsm_init: unloaded.\n"); | ||
2756 | } | ||
2757 | |||
2758 | module_init(gsm_init); | ||
2759 | module_exit(gsm_exit); | ||
2760 | |||
2761 | |||
2762 | MODULE_LICENSE("GPL"); | ||
2763 | MODULE_ALIAS_LDISC(N_GSM0710); | ||
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 47e8f7b0e4c1..66d2917b003f 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c | |||
@@ -296,8 +296,8 @@ checksum_err: | |||
296 | return -EIO; | 296 | return -EIO; |
297 | } | 297 | } |
298 | 298 | ||
299 | static int nvram_ioctl(struct inode *inode, struct file *file, | 299 | static long nvram_ioctl(struct file *file, unsigned int cmd, |
300 | unsigned int cmd, unsigned long arg) | 300 | unsigned long arg) |
301 | { | 301 | { |
302 | int i; | 302 | int i; |
303 | 303 | ||
@@ -308,6 +308,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file, | |||
308 | if (!capable(CAP_SYS_ADMIN)) | 308 | if (!capable(CAP_SYS_ADMIN)) |
309 | return -EACCES; | 309 | return -EACCES; |
310 | 310 | ||
311 | lock_kernel(); | ||
311 | spin_lock_irq(&rtc_lock); | 312 | spin_lock_irq(&rtc_lock); |
312 | 313 | ||
313 | for (i = 0; i < NVRAM_BYTES; ++i) | 314 | for (i = 0; i < NVRAM_BYTES; ++i) |
@@ -315,6 +316,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file, | |||
315 | __nvram_set_checksum(); | 316 | __nvram_set_checksum(); |
316 | 317 | ||
317 | spin_unlock_irq(&rtc_lock); | 318 | spin_unlock_irq(&rtc_lock); |
319 | unlock_kernel(); | ||
318 | return 0; | 320 | return 0; |
319 | 321 | ||
320 | case NVRAM_SETCKS: | 322 | case NVRAM_SETCKS: |
@@ -323,9 +325,11 @@ static int nvram_ioctl(struct inode *inode, struct file *file, | |||
323 | if (!capable(CAP_SYS_ADMIN)) | 325 | if (!capable(CAP_SYS_ADMIN)) |
324 | return -EACCES; | 326 | return -EACCES; |
325 | 327 | ||
328 | lock_kernel(); | ||
326 | spin_lock_irq(&rtc_lock); | 329 | spin_lock_irq(&rtc_lock); |
327 | __nvram_set_checksum(); | 330 | __nvram_set_checksum(); |
328 | spin_unlock_irq(&rtc_lock); | 331 | spin_unlock_irq(&rtc_lock); |
332 | unlock_kernel(); | ||
329 | return 0; | 333 | return 0; |
330 | 334 | ||
331 | default: | 335 | default: |
@@ -422,7 +426,7 @@ static const struct file_operations nvram_fops = { | |||
422 | .llseek = nvram_llseek, | 426 | .llseek = nvram_llseek, |
423 | .read = nvram_read, | 427 | .read = nvram_read, |
424 | .write = nvram_write, | 428 | .write = nvram_write, |
425 | .ioctl = nvram_ioctl, | 429 | .unlocked_ioctl = nvram_ioctl, |
426 | .open = nvram_open, | 430 | .open = nvram_open, |
427 | .release = nvram_release, | 431 | .release = nvram_release, |
428 | }; | 432 | }; |
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index f80810901db6..043a1c7b86be 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c | |||
@@ -94,8 +94,9 @@ static int get_flash_id(void) | |||
94 | return c2; | 94 | return c2; |
95 | } | 95 | } |
96 | 96 | ||
97 | static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg) | 97 | static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
98 | { | 98 | { |
99 | lock_kernel(); | ||
99 | switch (cmd) { | 100 | switch (cmd) { |
100 | case CMD_WRITE_DISABLE: | 101 | case CMD_WRITE_DISABLE: |
101 | gbWriteBase64Enable = 0; | 102 | gbWriteBase64Enable = 0; |
@@ -113,8 +114,10 @@ static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cm | |||
113 | default: | 114 | default: |
114 | gbWriteBase64Enable = 0; | 115 | gbWriteBase64Enable = 0; |
115 | gbWriteEnable = 0; | 116 | gbWriteEnable = 0; |
117 | unlock_kernel(); | ||
116 | return -EINVAL; | 118 | return -EINVAL; |
117 | } | 119 | } |
120 | unlock_kernel(); | ||
118 | return 0; | 121 | return 0; |
119 | } | 122 | } |
120 | 123 | ||
@@ -631,7 +634,7 @@ static const struct file_operations flash_fops = | |||
631 | .llseek = flash_llseek, | 634 | .llseek = flash_llseek, |
632 | .read = flash_read, | 635 | .read = flash_read, |
633 | .write = flash_write, | 636 | .write = flash_write, |
634 | .ioctl = flash_ioctl, | 637 | .unlocked_ioctl = flash_ioctl, |
635 | }; | 638 | }; |
636 | 639 | ||
637 | static struct miscdevice flash_miscdev = | 640 | static struct miscdevice flash_miscdev = |
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index fdd37543aa79..02abfddce45a 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c | |||
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp) | |||
287 | char *name; | 287 | char *name; |
288 | int fl; | 288 | int fl; |
289 | 289 | ||
290 | name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); | 290 | name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); |
291 | if (name == NULL) | 291 | if (name == NULL) |
292 | return -ENOMEM; | 292 | return -ENOMEM; |
293 | 293 | ||
294 | sprintf (name, CHRDEV "%x", minor); | ||
295 | |||
296 | port = parport_find_number (minor); | 294 | port = parport_find_number (minor); |
297 | if (!port) { | 295 | if (!port) { |
298 | printk (KERN_WARNING "%s: no associated port!\n", name); | 296 | printk (KERN_WARNING "%s: no associated port!\n", name); |
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c index 606048b72bcf..85c004a518ee 100644 --- a/drivers/char/ps3flash.c +++ b/drivers/char/ps3flash.c | |||
@@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id) | |||
305 | return ps3flash_writeback(ps3flash_dev); | 305 | return ps3flash_writeback(ps3flash_dev); |
306 | } | 306 | } |
307 | 307 | ||
308 | static int ps3flash_fsync(struct file *file, struct dentry *dentry, | 308 | static int ps3flash_fsync(struct file *file, int datasync) |
309 | int datasync) | ||
310 | { | 309 | { |
311 | return ps3flash_writeback(ps3flash_dev); | 310 | return ps3flash_writeback(ps3flash_dev); |
312 | } | 311 | } |
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c new file mode 100644 index 000000000000..74f00b5ffa36 --- /dev/null +++ b/drivers/char/ramoops.c | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * RAM Oops/Panic logger | ||
3 | * | ||
4 | * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
18 | * 02110-1301 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/kmsg_dump.h> | ||
25 | #include <linux/time.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/ioport.h> | ||
28 | |||
29 | #define RAMOOPS_KERNMSG_HDR "====" | ||
30 | #define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval)) | ||
31 | |||
32 | #define RECORD_SIZE 4096 | ||
33 | |||
34 | static ulong mem_address; | ||
35 | module_param(mem_address, ulong, 0400); | ||
36 | MODULE_PARM_DESC(mem_address, | ||
37 | "start of reserved RAM used to store oops/panic logs"); | ||
38 | |||
39 | static ulong mem_size; | ||
40 | module_param(mem_size, ulong, 0400); | ||
41 | MODULE_PARM_DESC(mem_size, | ||
42 | "size of reserved RAM used to store oops/panic logs"); | ||
43 | |||
44 | static int dump_oops = 1; | ||
45 | module_param(dump_oops, int, 0600); | ||
46 | MODULE_PARM_DESC(dump_oops, | ||
47 | "set to 1 to dump oopses, 0 to only dump panics (default 1)"); | ||
48 | |||
49 | static struct ramoops_context { | ||
50 | struct kmsg_dumper dump; | ||
51 | void *virt_addr; | ||
52 | phys_addr_t phys_addr; | ||
53 | unsigned long size; | ||
54 | int count; | ||
55 | int max_count; | ||
56 | } oops_cxt; | ||
57 | |||
58 | static void ramoops_do_dump(struct kmsg_dumper *dumper, | ||
59 | enum kmsg_dump_reason reason, const char *s1, unsigned long l1, | ||
60 | const char *s2, unsigned long l2) | ||
61 | { | ||
62 | struct ramoops_context *cxt = container_of(dumper, | ||
63 | struct ramoops_context, dump); | ||
64 | unsigned long s1_start, s2_start; | ||
65 | unsigned long l1_cpy, l2_cpy; | ||
66 | int res; | ||
67 | char *buf; | ||
68 | struct timeval timestamp; | ||
69 | |||
70 | /* Only dump oopses if dump_oops is set */ | ||
71 | if (reason == KMSG_DUMP_OOPS && !dump_oops) | ||
72 | return; | ||
73 | |||
74 | buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE)); | ||
75 | memset(buf, '\0', RECORD_SIZE); | ||
76 | res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); | ||
77 | buf += res; | ||
78 | do_gettimeofday(×tamp); | ||
79 | res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec); | ||
80 | buf += res; | ||
81 | |||
82 | l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE)); | ||
83 | l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy); | ||
84 | |||
85 | s2_start = l2 - l2_cpy; | ||
86 | s1_start = l1 - l1_cpy; | ||
87 | |||
88 | memcpy(buf, s1 + s1_start, l1_cpy); | ||
89 | memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy); | ||
90 | |||
91 | cxt->count = (cxt->count + 1) % cxt->max_count; | ||
92 | } | ||
93 | |||
94 | static int __init ramoops_init(void) | ||
95 | { | ||
96 | struct ramoops_context *cxt = &oops_cxt; | ||
97 | int err = -EINVAL; | ||
98 | |||
99 | if (!mem_size) { | ||
100 | printk(KERN_ERR "ramoops: invalid size specification"); | ||
101 | goto fail3; | ||
102 | } | ||
103 | |||
104 | rounddown_pow_of_two(mem_size); | ||
105 | |||
106 | if (mem_size < RECORD_SIZE) { | ||
107 | printk(KERN_ERR "ramoops: size too small"); | ||
108 | goto fail3; | ||
109 | } | ||
110 | |||
111 | cxt->max_count = mem_size / RECORD_SIZE; | ||
112 | cxt->count = 0; | ||
113 | cxt->size = mem_size; | ||
114 | cxt->phys_addr = mem_address; | ||
115 | |||
116 | if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { | ||
117 | printk(KERN_ERR "ramoops: request mem region failed"); | ||
118 | err = -EINVAL; | ||
119 | goto fail3; | ||
120 | } | ||
121 | |||
122 | cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size); | ||
123 | if (!cxt->virt_addr) { | ||
124 | printk(KERN_ERR "ramoops: ioremap failed"); | ||
125 | goto fail2; | ||
126 | } | ||
127 | |||
128 | cxt->dump.dump = ramoops_do_dump; | ||
129 | err = kmsg_dump_register(&cxt->dump); | ||
130 | if (err) { | ||
131 | printk(KERN_ERR "ramoops: registering kmsg dumper failed"); | ||
132 | goto fail1; | ||
133 | } | ||
134 | |||
135 | return 0; | ||
136 | |||
137 | fail1: | ||
138 | iounmap(cxt->virt_addr); | ||
139 | fail2: | ||
140 | release_mem_region(cxt->phys_addr, cxt->size); | ||
141 | fail3: | ||
142 | return err; | ||
143 | } | ||
144 | |||
145 | static void __exit ramoops_exit(void) | ||
146 | { | ||
147 | struct ramoops_context *cxt = &oops_cxt; | ||
148 | |||
149 | if (kmsg_dump_unregister(&cxt->dump) < 0) | ||
150 | printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper"); | ||
151 | |||
152 | iounmap(cxt->virt_addr); | ||
153 | release_mem_region(cxt->phys_addr, cxt->size); | ||
154 | } | ||
155 | |||
156 | |||
157 | module_init(ramoops_init); | ||
158 | module_exit(ramoops_exit); | ||
159 | |||
160 | MODULE_LICENSE("GPL"); | ||
161 | MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); | ||
162 | MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index 2fd3d39995d5..8d85587b6d4f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -257,6 +257,7 @@ | |||
257 | #define INPUT_POOL_WORDS 128 | 257 | #define INPUT_POOL_WORDS 128 |
258 | #define OUTPUT_POOL_WORDS 32 | 258 | #define OUTPUT_POOL_WORDS 32 |
259 | #define SEC_XFER_SIZE 512 | 259 | #define SEC_XFER_SIZE 512 |
260 | #define EXTRACT_SIZE 10 | ||
260 | 261 | ||
261 | /* | 262 | /* |
262 | * The minimum number of bits of entropy before we wake up a read on | 263 | * The minimum number of bits of entropy before we wake up a read on |
@@ -414,7 +415,7 @@ struct entropy_store { | |||
414 | unsigned add_ptr; | 415 | unsigned add_ptr; |
415 | int entropy_count; | 416 | int entropy_count; |
416 | int input_rotate; | 417 | int input_rotate; |
417 | __u8 *last_data; | 418 | __u8 last_data[EXTRACT_SIZE]; |
418 | }; | 419 | }; |
419 | 420 | ||
420 | static __u32 input_pool_data[INPUT_POOL_WORDS]; | 421 | static __u32 input_pool_data[INPUT_POOL_WORDS]; |
@@ -714,8 +715,6 @@ void add_disk_randomness(struct gendisk *disk) | |||
714 | } | 715 | } |
715 | #endif | 716 | #endif |
716 | 717 | ||
717 | #define EXTRACT_SIZE 10 | ||
718 | |||
719 | /********************************************************************* | 718 | /********************************************************************* |
720 | * | 719 | * |
721 | * Entropy extraction routines | 720 | * Entropy extraction routines |
@@ -862,7 +861,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, | |||
862 | while (nbytes) { | 861 | while (nbytes) { |
863 | extract_buf(r, tmp); | 862 | extract_buf(r, tmp); |
864 | 863 | ||
865 | if (r->last_data) { | 864 | if (fips_enabled) { |
866 | spin_lock_irqsave(&r->lock, flags); | 865 | spin_lock_irqsave(&r->lock, flags); |
867 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) | 866 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) |
868 | panic("Hardware RNG duplicated output!\n"); | 867 | panic("Hardware RNG duplicated output!\n"); |
@@ -951,9 +950,6 @@ static void init_std_data(struct entropy_store *r) | |||
951 | now = ktime_get_real(); | 950 | now = ktime_get_real(); |
952 | mix_pool_bytes(r, &now, sizeof(now)); | 951 | mix_pool_bytes(r, &now, sizeof(now)); |
953 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); | 952 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); |
954 | /* Enable continuous test in fips mode */ | ||
955 | if (fips_enabled) | ||
956 | r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL); | ||
957 | } | 953 | } |
958 | 954 | ||
959 | static int rand_initialize(void) | 955 | static int rand_initialize(void) |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 8756ab0daa8b..b38942f6bf31 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
@@ -121,13 +121,17 @@ static int raw_release(struct inode *inode, struct file *filp) | |||
121 | /* | 121 | /* |
122 | * Forward ioctls to the underlying block device. | 122 | * Forward ioctls to the underlying block device. |
123 | */ | 123 | */ |
124 | static int | 124 | static long |
125 | raw_ioctl(struct inode *inode, struct file *filp, | 125 | raw_ioctl(struct file *filp, unsigned int command, unsigned long arg) |
126 | unsigned int command, unsigned long arg) | ||
127 | { | 126 | { |
128 | struct block_device *bdev = filp->private_data; | 127 | struct block_device *bdev = filp->private_data; |
128 | int ret; | ||
129 | |||
130 | lock_kernel(); | ||
131 | ret = blkdev_ioctl(bdev, 0, command, arg); | ||
132 | unlock_kernel(); | ||
129 | 133 | ||
130 | return blkdev_ioctl(bdev, 0, command, arg); | 134 | return ret; |
131 | } | 135 | } |
132 | 136 | ||
133 | static void bind_device(struct raw_config_request *rq) | 137 | static void bind_device(struct raw_config_request *rq) |
@@ -141,13 +145,14 @@ static void bind_device(struct raw_config_request *rq) | |||
141 | * Deal with ioctls against the raw-device control interface, to bind | 145 | * Deal with ioctls against the raw-device control interface, to bind |
142 | * and unbind other raw devices. | 146 | * and unbind other raw devices. |
143 | */ | 147 | */ |
144 | static int raw_ctl_ioctl(struct inode *inode, struct file *filp, | 148 | static long raw_ctl_ioctl(struct file *filp, unsigned int command, |
145 | unsigned int command, unsigned long arg) | 149 | unsigned long arg) |
146 | { | 150 | { |
147 | struct raw_config_request rq; | 151 | struct raw_config_request rq; |
148 | struct raw_device_data *rawdev; | 152 | struct raw_device_data *rawdev; |
149 | int err = 0; | 153 | int err = 0; |
150 | 154 | ||
155 | lock_kernel(); | ||
151 | switch (command) { | 156 | switch (command) { |
152 | case RAW_SETBIND: | 157 | case RAW_SETBIND: |
153 | case RAW_GETBIND: | 158 | case RAW_GETBIND: |
@@ -240,25 +245,26 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, | |||
240 | break; | 245 | break; |
241 | } | 246 | } |
242 | out: | 247 | out: |
248 | unlock_kernel(); | ||
243 | return err; | 249 | return err; |
244 | } | 250 | } |
245 | 251 | ||
246 | static const struct file_operations raw_fops = { | 252 | static const struct file_operations raw_fops = { |
247 | .read = do_sync_read, | 253 | .read = do_sync_read, |
248 | .aio_read = generic_file_aio_read, | 254 | .aio_read = generic_file_aio_read, |
249 | .write = do_sync_write, | 255 | .write = do_sync_write, |
250 | .aio_write = blkdev_aio_write, | 256 | .aio_write = blkdev_aio_write, |
251 | .fsync = blkdev_fsync, | 257 | .fsync = blkdev_fsync, |
252 | .open = raw_open, | 258 | .open = raw_open, |
253 | .release= raw_release, | 259 | .release = raw_release, |
254 | .ioctl = raw_ioctl, | 260 | .unlocked_ioctl = raw_ioctl, |
255 | .owner = THIS_MODULE, | 261 | .owner = THIS_MODULE, |
256 | }; | 262 | }; |
257 | 263 | ||
258 | static const struct file_operations raw_ctl_fops = { | 264 | static const struct file_operations raw_ctl_fops = { |
259 | .ioctl = raw_ctl_ioctl, | 265 | .unlocked_ioctl = raw_ctl_ioctl, |
260 | .open = raw_open, | 266 | .open = raw_open, |
261 | .owner = THIS_MODULE, | 267 | .owner = THIS_MODULE, |
262 | }; | 268 | }; |
263 | 269 | ||
264 | static struct cdev raw_cdev; | 270 | static struct cdev raw_cdev; |
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index 78a62ebe75c7..ecbe479c7d68 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
@@ -176,23 +176,6 @@ static void config_setup(struct cyclades_port *); | |||
176 | static void show_status(int); | 176 | static void show_status(int); |
177 | #endif | 177 | #endif |
178 | 178 | ||
179 | #ifdef CONFIG_REMOTE_DEBUG | ||
180 | static void debug_setup(void); | ||
181 | void queueDebugChar(int c); | ||
182 | int getDebugChar(void); | ||
183 | |||
184 | #define DEBUG_PORT 1 | ||
185 | #define DEBUG_LEN 256 | ||
186 | |||
187 | typedef struct { | ||
188 | int in; | ||
189 | int out; | ||
190 | unsigned char buf[DEBUG_LEN]; | ||
191 | } debugq; | ||
192 | |||
193 | debugq debugiq; | ||
194 | #endif | ||
195 | |||
196 | /* | 179 | /* |
197 | * I have my own version of udelay(), as it is needed when initialising | 180 | * I have my own version of udelay(), as it is needed when initialising |
198 | * the chip, before the delay loop has been calibrated. Should probably | 181 | * the chip, before the delay loop has been calibrated. Should probably |
@@ -515,11 +498,6 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id) | |||
515 | /* determine the channel and change to that context */ | 498 | /* determine the channel and change to that context */ |
516 | channel = (u_short) (base_addr[CyLICR] >> 2); | 499 | channel = (u_short) (base_addr[CyLICR] >> 2); |
517 | 500 | ||
518 | #ifdef CONFIG_REMOTE_DEBUG | ||
519 | if (channel == DEBUG_PORT) { | ||
520 | panic("TxInt on debug port!!!"); | ||
521 | } | ||
522 | #endif | ||
523 | /* validate the port number (as configured and open) */ | 501 | /* validate the port number (as configured and open) */ |
524 | if ((channel < 0) || (NR_PORTS <= channel)) { | 502 | if ((channel < 0) || (NR_PORTS <= channel)) { |
525 | base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); | 503 | base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); |
@@ -634,14 +612,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id) | |||
634 | info->last_active = jiffies; | 612 | info->last_active = jiffies; |
635 | save_cnt = char_count = base_addr[CyRFOC]; | 613 | save_cnt = char_count = base_addr[CyRFOC]; |
636 | 614 | ||
637 | #ifdef CONFIG_REMOTE_DEBUG | ||
638 | if (channel == DEBUG_PORT) { | ||
639 | while (char_count--) { | ||
640 | data = base_addr[CyRDR]; | ||
641 | queueDebugChar(data); | ||
642 | } | ||
643 | } else | ||
644 | #endif | ||
645 | /* if there is nowhere to put the data, discard it */ | 615 | /* if there is nowhere to put the data, discard it */ |
646 | if (info->tty == 0) { | 616 | if (info->tty == 0) { |
647 | while (char_count--) { | 617 | while (char_count--) { |
@@ -2195,9 +2165,7 @@ static int __init serial167_init(void) | |||
2195 | port_num++; | 2165 | port_num++; |
2196 | info++; | 2166 | info++; |
2197 | } | 2167 | } |
2198 | #ifdef CONFIG_REMOTE_DEBUG | 2168 | |
2199 | debug_setup(); | ||
2200 | #endif | ||
2201 | ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0, | 2169 | ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0, |
2202 | "cd2401_errors", cd2401_rxerr_interrupt); | 2170 | "cd2401_errors", cd2401_rxerr_interrupt); |
2203 | if (ret) { | 2171 | if (ret) { |
@@ -2518,193 +2486,4 @@ static int __init serial167_console_init(void) | |||
2518 | 2486 | ||
2519 | console_initcall(serial167_console_init); | 2487 | console_initcall(serial167_console_init); |
2520 | 2488 | ||
2521 | #ifdef CONFIG_REMOTE_DEBUG | ||
2522 | void putDebugChar(int c) | ||
2523 | { | ||
2524 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | ||
2525 | unsigned long flags; | ||
2526 | volatile u_char sink; | ||
2527 | u_char ier; | ||
2528 | int port; | ||
2529 | |||
2530 | local_irq_save(flags); | ||
2531 | |||
2532 | /* Ensure transmitter is enabled! */ | ||
2533 | |||
2534 | port = DEBUG_PORT; | ||
2535 | base_addr[CyCAR] = (u_char) port; | ||
2536 | while (base_addr[CyCCR]) | ||
2537 | ; | ||
2538 | base_addr[CyCCR] = CyENB_XMTR; | ||
2539 | |||
2540 | ier = base_addr[CyIER]; | ||
2541 | base_addr[CyIER] = CyTxMpty; | ||
2542 | |||
2543 | while (1) { | ||
2544 | if (pcc2chip[PccSCCTICR] & 0x20) { | ||
2545 | /* We have a Tx int. Acknowledge it */ | ||
2546 | sink = pcc2chip[PccTPIACKR]; | ||
2547 | if ((base_addr[CyLICR] >> 2) == port) { | ||
2548 | base_addr[CyTDR] = c; | ||
2549 | base_addr[CyTEOIR] = 0; | ||
2550 | break; | ||
2551 | } else | ||
2552 | base_addr[CyTEOIR] = CyNOTRANS; | ||
2553 | } | ||
2554 | } | ||
2555 | |||
2556 | base_addr[CyIER] = ier; | ||
2557 | |||
2558 | local_irq_restore(flags); | ||
2559 | } | ||
2560 | |||
2561 | int getDebugChar() | ||
2562 | { | ||
2563 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | ||
2564 | unsigned long flags; | ||
2565 | volatile u_char sink; | ||
2566 | u_char ier; | ||
2567 | int port; | ||
2568 | int i, c; | ||
2569 | |||
2570 | i = debugiq.out; | ||
2571 | if (i != debugiq.in) { | ||
2572 | c = debugiq.buf[i]; | ||
2573 | if (++i == DEBUG_LEN) | ||
2574 | i = 0; | ||
2575 | debugiq.out = i; | ||
2576 | return c; | ||
2577 | } | ||
2578 | /* OK, nothing in queue, wait in poll loop */ | ||
2579 | |||
2580 | local_irq_save(flags); | ||
2581 | |||
2582 | /* Ensure receiver is enabled! */ | ||
2583 | |||
2584 | port = DEBUG_PORT; | ||
2585 | base_addr[CyCAR] = (u_char) port; | ||
2586 | #if 0 | ||
2587 | while (base_addr[CyCCR]) | ||
2588 | ; | ||
2589 | base_addr[CyCCR] = CyENB_RCVR; | ||
2590 | #endif | ||
2591 | ier = base_addr[CyIER]; | ||
2592 | base_addr[CyIER] = CyRxData; | ||
2593 | |||
2594 | while (1) { | ||
2595 | if (pcc2chip[PccSCCRICR] & 0x20) { | ||
2596 | /* We have a Rx int. Acknowledge it */ | ||
2597 | sink = pcc2chip[PccRPIACKR]; | ||
2598 | if ((base_addr[CyLICR] >> 2) == port) { | ||
2599 | int cnt = base_addr[CyRFOC]; | ||
2600 | while (cnt-- > 0) { | ||
2601 | c = base_addr[CyRDR]; | ||
2602 | if (c == 0) | ||
2603 | printk | ||
2604 | ("!! debug char is null (cnt=%d) !!", | ||
2605 | cnt); | ||
2606 | else | ||
2607 | queueDebugChar(c); | ||
2608 | } | ||
2609 | base_addr[CyREOIR] = 0; | ||
2610 | i = debugiq.out; | ||
2611 | if (i == debugiq.in) | ||
2612 | panic("Debug input queue empty!"); | ||
2613 | c = debugiq.buf[i]; | ||
2614 | if (++i == DEBUG_LEN) | ||
2615 | i = 0; | ||
2616 | debugiq.out = i; | ||
2617 | break; | ||
2618 | } else | ||
2619 | base_addr[CyREOIR] = CyNOTRANS; | ||
2620 | } | ||
2621 | } | ||
2622 | |||
2623 | base_addr[CyIER] = ier; | ||
2624 | |||
2625 | local_irq_restore(flags); | ||
2626 | |||
2627 | return (c); | ||
2628 | } | ||
2629 | |||
2630 | void queueDebugChar(int c) | ||
2631 | { | ||
2632 | int i; | ||
2633 | |||
2634 | i = debugiq.in; | ||
2635 | debugiq.buf[i] = c; | ||
2636 | if (++i == DEBUG_LEN) | ||
2637 | i = 0; | ||
2638 | if (i != debugiq.out) | ||
2639 | debugiq.in = i; | ||
2640 | } | ||
2641 | |||
2642 | static void debug_setup() | ||
2643 | { | ||
2644 | unsigned long flags; | ||
2645 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | ||
2646 | int i, cflag; | ||
2647 | |||
2648 | cflag = B19200; | ||
2649 | |||
2650 | local_irq_save(flags); | ||
2651 | |||
2652 | for (i = 0; i < 4; i++) { | ||
2653 | base_addr[CyCAR] = i; | ||
2654 | base_addr[CyLICR] = i << 2; | ||
2655 | } | ||
2656 | |||
2657 | debugiq.in = debugiq.out = 0; | ||
2658 | |||
2659 | base_addr[CyCAR] = DEBUG_PORT; | ||
2660 | |||
2661 | /* baud rate */ | ||
2662 | i = cflag & CBAUD; | ||
2663 | |||
2664 | base_addr[CyIER] = 0; | ||
2665 | |||
2666 | base_addr[CyCMR] = CyASYNC; | ||
2667 | base_addr[CyLICR] = DEBUG_PORT << 2; | ||
2668 | base_addr[CyLIVR] = 0x5c; | ||
2669 | |||
2670 | /* tx and rx baud rate */ | ||
2671 | |||
2672 | base_addr[CyTCOR] = baud_co[i]; | ||
2673 | base_addr[CyTBPR] = baud_bpr[i]; | ||
2674 | base_addr[CyRCOR] = baud_co[i] >> 5; | ||
2675 | base_addr[CyRBPR] = baud_bpr[i]; | ||
2676 | |||
2677 | /* set line characteristics according configuration */ | ||
2678 | |||
2679 | base_addr[CySCHR1] = 0; | ||
2680 | base_addr[CySCHR2] = 0; | ||
2681 | base_addr[CySCRL] = 0; | ||
2682 | base_addr[CySCRH] = 0; | ||
2683 | base_addr[CyCOR1] = Cy_8_BITS | CyPARITY_NONE; | ||
2684 | base_addr[CyCOR2] = 0; | ||
2685 | base_addr[CyCOR3] = Cy_1_STOP; | ||
2686 | base_addr[CyCOR4] = baud_cor4[i]; | ||
2687 | base_addr[CyCOR5] = 0; | ||
2688 | base_addr[CyCOR6] = 0; | ||
2689 | base_addr[CyCOR7] = 0; | ||
2690 | |||
2691 | write_cy_cmd(base_addr, CyINIT_CHAN); | ||
2692 | write_cy_cmd(base_addr, CyENB_RCVR); | ||
2693 | |||
2694 | base_addr[CyCAR] = DEBUG_PORT; /* !!! Is this needed? */ | ||
2695 | |||
2696 | base_addr[CyRTPRL] = 2; | ||
2697 | base_addr[CyRTPRH] = 0; | ||
2698 | |||
2699 | base_addr[CyMSVR1] = CyRTS; | ||
2700 | base_addr[CyMSVR2] = CyDTR; | ||
2701 | |||
2702 | base_addr[CyIER] = CyRxData; | ||
2703 | |||
2704 | local_irq_restore(flags); | ||
2705 | |||
2706 | } /* debug_setup */ | ||
2707 | |||
2708 | #endif | ||
2709 | |||
2710 | MODULE_LICENSE("GPL"); | 2489 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 7ee52164d474..cc1e9850d655 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c | |||
@@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room); | |||
238 | * @size: size | 238 | * @size: size |
239 | * | 239 | * |
240 | * Queue a series of bytes to the tty buffering. All the characters | 240 | * Queue a series of bytes to the tty buffering. All the characters |
241 | * passed are marked as without error. Returns the number added. | 241 | * passed are marked with the supplied flag. Returns the number added. |
242 | * | 242 | * |
243 | * Locking: Called functions may take tty->buf.lock | 243 | * Locking: Called functions may take tty->buf.lock |
244 | */ | 244 | */ |
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index 1144a04cda6e..42f7fa442ff8 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c | |||
@@ -866,7 +866,7 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
866 | { | 866 | { |
867 | int i = vdev->unit_address; | 867 | int i = vdev->unit_address; |
868 | int j; | 868 | int j; |
869 | struct device_node *node = vdev->dev.archdata.of_node; | 869 | struct device_node *node = vdev->dev.of_node; |
870 | 870 | ||
871 | if (i >= VIOTAPE_MAX_TAPE) | 871 | if (i >= VIOTAPE_MAX_TAPE) |
872 | return -ENODEV; | 872 | return -ENODEV; |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 196428c2287a..8c99bf1b5e9f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -33,35 +33,6 @@ | |||
33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
34 | #include "hvc_console.h" | 34 | #include "hvc_console.h" |
35 | 35 | ||
36 | /* Moved here from .h file in order to disable MULTIPORT. */ | ||
37 | #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ | ||
38 | |||
39 | struct virtio_console_multiport_conf { | ||
40 | struct virtio_console_config config; | ||
41 | /* max. number of ports this device can hold */ | ||
42 | __u32 max_nr_ports; | ||
43 | /* number of ports added so far */ | ||
44 | __u32 nr_ports; | ||
45 | } __attribute__((packed)); | ||
46 | |||
47 | /* | ||
48 | * A message that's passed between the Host and the Guest for a | ||
49 | * particular port. | ||
50 | */ | ||
51 | struct virtio_console_control { | ||
52 | __u32 id; /* Port number */ | ||
53 | __u16 event; /* The kind of control event (see below) */ | ||
54 | __u16 value; /* Extra information for the key */ | ||
55 | }; | ||
56 | |||
57 | /* Some events for control messages */ | ||
58 | #define VIRTIO_CONSOLE_PORT_READY 0 | ||
59 | #define VIRTIO_CONSOLE_CONSOLE_PORT 1 | ||
60 | #define VIRTIO_CONSOLE_RESIZE 2 | ||
61 | #define VIRTIO_CONSOLE_PORT_OPEN 3 | ||
62 | #define VIRTIO_CONSOLE_PORT_NAME 4 | ||
63 | #define VIRTIO_CONSOLE_PORT_REMOVE 5 | ||
64 | |||
65 | /* | 36 | /* |
66 | * This is a global struct for storing common data for all the devices | 37 | * This is a global struct for storing common data for all the devices |
67 | * this driver handles. | 38 | * this driver handles. |
@@ -107,6 +78,9 @@ struct console { | |||
107 | /* The hvc device associated with this console port */ | 78 | /* The hvc device associated with this console port */ |
108 | struct hvc_struct *hvc; | 79 | struct hvc_struct *hvc; |
109 | 80 | ||
81 | /* The size of the console */ | ||
82 | struct winsize ws; | ||
83 | |||
110 | /* | 84 | /* |
111 | * This number identifies the number that we used to register | 85 | * This number identifies the number that we used to register |
112 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the | 86 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the |
@@ -139,7 +113,6 @@ struct ports_device { | |||
139 | * notification | 113 | * notification |
140 | */ | 114 | */ |
141 | struct work_struct control_work; | 115 | struct work_struct control_work; |
142 | struct work_struct config_work; | ||
143 | 116 | ||
144 | struct list_head ports; | 117 | struct list_head ports; |
145 | 118 | ||
@@ -150,7 +123,7 @@ struct ports_device { | |||
150 | spinlock_t cvq_lock; | 123 | spinlock_t cvq_lock; |
151 | 124 | ||
152 | /* The current config space is stored here */ | 125 | /* The current config space is stored here */ |
153 | struct virtio_console_multiport_conf config; | 126 | struct virtio_console_config config; |
154 | 127 | ||
155 | /* The virtio device we're associated with */ | 128 | /* The virtio device we're associated with */ |
156 | struct virtio_device *vdev; | 129 | struct virtio_device *vdev; |
@@ -189,6 +162,9 @@ struct port { | |||
189 | */ | 162 | */ |
190 | spinlock_t inbuf_lock; | 163 | spinlock_t inbuf_lock; |
191 | 164 | ||
165 | /* Protect the operations on the out_vq. */ | ||
166 | spinlock_t outvq_lock; | ||
167 | |||
192 | /* The IO vqs for this port */ | 168 | /* The IO vqs for this port */ |
193 | struct virtqueue *in_vq, *out_vq; | 169 | struct virtqueue *in_vq, *out_vq; |
194 | 170 | ||
@@ -214,6 +190,8 @@ struct port { | |||
214 | /* The 'id' to identify the port with the Host */ | 190 | /* The 'id' to identify the port with the Host */ |
215 | u32 id; | 191 | u32 id; |
216 | 192 | ||
193 | bool outvq_full; | ||
194 | |||
217 | /* Is the host device open */ | 195 | /* Is the host device open */ |
218 | bool host_connected; | 196 | bool host_connected; |
219 | 197 | ||
@@ -328,7 +306,7 @@ static void *get_inbuf(struct port *port) | |||
328 | unsigned int len; | 306 | unsigned int len; |
329 | 307 | ||
330 | vq = port->in_vq; | 308 | vq = port->in_vq; |
331 | buf = vq->vq_ops->get_buf(vq, &len); | 309 | buf = virtqueue_get_buf(vq, &len); |
332 | if (buf) { | 310 | if (buf) { |
333 | buf->len = len; | 311 | buf->len = len; |
334 | buf->offset = 0; | 312 | buf->offset = 0; |
@@ -349,8 +327,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) | |||
349 | 327 | ||
350 | sg_init_one(sg, buf->buf, buf->size); | 328 | sg_init_one(sg, buf->buf, buf->size); |
351 | 329 | ||
352 | ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); | 330 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf); |
353 | vq->vq_ops->kick(vq); | 331 | virtqueue_kick(vq); |
354 | return ret; | 332 | return ret; |
355 | } | 333 | } |
356 | 334 | ||
@@ -366,7 +344,7 @@ static void discard_port_data(struct port *port) | |||
366 | if (port->inbuf) | 344 | if (port->inbuf) |
367 | buf = port->inbuf; | 345 | buf = port->inbuf; |
368 | else | 346 | else |
369 | buf = vq->vq_ops->get_buf(vq, &len); | 347 | buf = virtqueue_get_buf(vq, &len); |
370 | 348 | ||
371 | ret = 0; | 349 | ret = 0; |
372 | while (buf) { | 350 | while (buf) { |
@@ -374,7 +352,7 @@ static void discard_port_data(struct port *port) | |||
374 | ret++; | 352 | ret++; |
375 | free_buf(buf); | 353 | free_buf(buf); |
376 | } | 354 | } |
377 | buf = vq->vq_ops->get_buf(vq, &len); | 355 | buf = virtqueue_get_buf(vq, &len); |
378 | } | 356 | } |
379 | port->inbuf = NULL; | 357 | port->inbuf = NULL; |
380 | if (ret) | 358 | if (ret) |
@@ -403,57 +381,96 @@ out: | |||
403 | return ret; | 381 | return ret; |
404 | } | 382 | } |
405 | 383 | ||
406 | static ssize_t send_control_msg(struct port *port, unsigned int event, | 384 | static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, |
407 | unsigned int value) | 385 | unsigned int event, unsigned int value) |
408 | { | 386 | { |
409 | struct scatterlist sg[1]; | 387 | struct scatterlist sg[1]; |
410 | struct virtio_console_control cpkt; | 388 | struct virtio_console_control cpkt; |
411 | struct virtqueue *vq; | 389 | struct virtqueue *vq; |
412 | unsigned int len; | 390 | unsigned int len; |
413 | 391 | ||
414 | if (!use_multiport(port->portdev)) | 392 | if (!use_multiport(portdev)) |
415 | return 0; | 393 | return 0; |
416 | 394 | ||
417 | cpkt.id = port->id; | 395 | cpkt.id = port_id; |
418 | cpkt.event = event; | 396 | cpkt.event = event; |
419 | cpkt.value = value; | 397 | cpkt.value = value; |
420 | 398 | ||
421 | vq = port->portdev->c_ovq; | 399 | vq = portdev->c_ovq; |
422 | 400 | ||
423 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 401 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
424 | if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { | 402 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { |
425 | vq->vq_ops->kick(vq); | 403 | virtqueue_kick(vq); |
426 | while (!vq->vq_ops->get_buf(vq, &len)) | 404 | while (!virtqueue_get_buf(vq, &len)) |
427 | cpu_relax(); | 405 | cpu_relax(); |
428 | } | 406 | } |
429 | return 0; | 407 | return 0; |
430 | } | 408 | } |
431 | 409 | ||
432 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) | 410 | static ssize_t send_control_msg(struct port *port, unsigned int event, |
411 | unsigned int value) | ||
412 | { | ||
413 | return __send_control_msg(port->portdev, port->id, event, value); | ||
414 | } | ||
415 | |||
416 | /* Callers must take the port->outvq_lock */ | ||
417 | static void reclaim_consumed_buffers(struct port *port) | ||
418 | { | ||
419 | void *buf; | ||
420 | unsigned int len; | ||
421 | |||
422 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { | ||
423 | kfree(buf); | ||
424 | port->outvq_full = false; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | ||
429 | bool nonblock) | ||
433 | { | 430 | { |
434 | struct scatterlist sg[1]; | 431 | struct scatterlist sg[1]; |
435 | struct virtqueue *out_vq; | 432 | struct virtqueue *out_vq; |
436 | ssize_t ret; | 433 | ssize_t ret; |
434 | unsigned long flags; | ||
437 | unsigned int len; | 435 | unsigned int len; |
438 | 436 | ||
439 | out_vq = port->out_vq; | 437 | out_vq = port->out_vq; |
440 | 438 | ||
439 | spin_lock_irqsave(&port->outvq_lock, flags); | ||
440 | |||
441 | reclaim_consumed_buffers(port); | ||
442 | |||
441 | sg_init_one(sg, in_buf, in_count); | 443 | sg_init_one(sg, in_buf, in_count); |
442 | ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); | 444 | ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); |
443 | 445 | ||
444 | /* Tell Host to go! */ | 446 | /* Tell Host to go! */ |
445 | out_vq->vq_ops->kick(out_vq); | 447 | virtqueue_kick(out_vq); |
446 | 448 | ||
447 | if (ret < 0) { | 449 | if (ret < 0) { |
448 | in_count = 0; | 450 | in_count = 0; |
449 | goto fail; | 451 | goto done; |
450 | } | 452 | } |
451 | 453 | ||
452 | /* Wait till the host acknowledges it pushed out the data we sent. */ | 454 | if (ret == 0) |
453 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | 455 | port->outvq_full = true; |
456 | |||
457 | if (nonblock) | ||
458 | goto done; | ||
459 | |||
460 | /* | ||
461 | * Wait till the host acknowledges it pushed out the data we | ||
462 | * sent. This is done for ports in blocking mode or for data | ||
463 | * from the hvc_console; the tty operations are performed with | ||
464 | * spinlocks held so we can't sleep here. | ||
465 | */ | ||
466 | while (!virtqueue_get_buf(out_vq, &len)) | ||
454 | cpu_relax(); | 467 | cpu_relax(); |
455 | fail: | 468 | done: |
456 | /* We're expected to return the amount of data we wrote */ | 469 | spin_unlock_irqrestore(&port->outvq_lock, flags); |
470 | /* | ||
471 | * We're expected to return the amount of data we wrote -- all | ||
472 | * of it | ||
473 | */ | ||
457 | return in_count; | 474 | return in_count; |
458 | } | 475 | } |
459 | 476 | ||
@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, | |||
503 | } | 520 | } |
504 | 521 | ||
505 | /* The condition that must be true for polling to end */ | 522 | /* The condition that must be true for polling to end */ |
506 | static bool wait_is_over(struct port *port) | 523 | static bool will_read_block(struct port *port) |
524 | { | ||
525 | return !port_has_data(port) && port->host_connected; | ||
526 | } | ||
527 | |||
528 | static bool will_write_block(struct port *port) | ||
507 | { | 529 | { |
508 | return port_has_data(port) || !port->host_connected; | 530 | bool ret; |
531 | |||
532 | if (!port->host_connected) | ||
533 | return true; | ||
534 | |||
535 | spin_lock_irq(&port->outvq_lock); | ||
536 | /* | ||
537 | * Check if the Host has consumed any buffers since we last | ||
538 | * sent data (this is only applicable for nonblocking ports). | ||
539 | */ | ||
540 | reclaim_consumed_buffers(port); | ||
541 | ret = port->outvq_full; | ||
542 | spin_unlock_irq(&port->outvq_lock); | ||
543 | |||
544 | return ret; | ||
509 | } | 545 | } |
510 | 546 | ||
511 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | 547 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, |
@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
528 | return -EAGAIN; | 564 | return -EAGAIN; |
529 | 565 | ||
530 | ret = wait_event_interruptible(port->waitqueue, | 566 | ret = wait_event_interruptible(port->waitqueue, |
531 | wait_is_over(port)); | 567 | !will_read_block(port)); |
532 | if (ret < 0) | 568 | if (ret < 0) |
533 | return ret; | 569 | return ret; |
534 | } | 570 | } |
@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
554 | struct port *port; | 590 | struct port *port; |
555 | char *buf; | 591 | char *buf; |
556 | ssize_t ret; | 592 | ssize_t ret; |
593 | bool nonblock; | ||
557 | 594 | ||
558 | port = filp->private_data; | 595 | port = filp->private_data; |
559 | 596 | ||
597 | nonblock = filp->f_flags & O_NONBLOCK; | ||
598 | |||
599 | if (will_write_block(port)) { | ||
600 | if (nonblock) | ||
601 | return -EAGAIN; | ||
602 | |||
603 | ret = wait_event_interruptible(port->waitqueue, | ||
604 | !will_write_block(port)); | ||
605 | if (ret < 0) | ||
606 | return ret; | ||
607 | } | ||
608 | |||
560 | count = min((size_t)(32 * 1024), count); | 609 | count = min((size_t)(32 * 1024), count); |
561 | 610 | ||
562 | buf = kmalloc(count, GFP_KERNEL); | 611 | buf = kmalloc(count, GFP_KERNEL); |
@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
569 | goto free_buf; | 618 | goto free_buf; |
570 | } | 619 | } |
571 | 620 | ||
572 | ret = send_buf(port, buf, count); | 621 | ret = send_buf(port, buf, count, nonblock); |
622 | |||
623 | if (nonblock && ret > 0) | ||
624 | goto out; | ||
625 | |||
573 | free_buf: | 626 | free_buf: |
574 | kfree(buf); | 627 | kfree(buf); |
628 | out: | ||
575 | return ret; | 629 | return ret; |
576 | } | 630 | } |
577 | 631 | ||
@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | |||
586 | ret = 0; | 640 | ret = 0; |
587 | if (port->inbuf) | 641 | if (port->inbuf) |
588 | ret |= POLLIN | POLLRDNORM; | 642 | ret |= POLLIN | POLLRDNORM; |
589 | if (port->host_connected) | 643 | if (!will_write_block(port)) |
590 | ret |= POLLOUT; | 644 | ret |= POLLOUT; |
591 | if (!port->host_connected) | 645 | if (!port->host_connected) |
592 | ret |= POLLHUP; | 646 | ret |= POLLHUP; |
@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp) | |||
610 | 664 | ||
611 | spin_unlock_irq(&port->inbuf_lock); | 665 | spin_unlock_irq(&port->inbuf_lock); |
612 | 666 | ||
667 | spin_lock_irq(&port->outvq_lock); | ||
668 | reclaim_consumed_buffers(port); | ||
669 | spin_unlock_irq(&port->outvq_lock); | ||
670 | |||
613 | return 0; | 671 | return 0; |
614 | } | 672 | } |
615 | 673 | ||
@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp) | |||
638 | port->guest_connected = true; | 696 | port->guest_connected = true; |
639 | spin_unlock_irq(&port->inbuf_lock); | 697 | spin_unlock_irq(&port->inbuf_lock); |
640 | 698 | ||
699 | spin_lock_irq(&port->outvq_lock); | ||
700 | /* | ||
701 | * There might be a chance that we missed reclaiming a few | ||
702 | * buffers in the window of the port getting previously closed | ||
703 | * and opening now. | ||
704 | */ | ||
705 | reclaim_consumed_buffers(port); | ||
706 | spin_unlock_irq(&port->outvq_lock); | ||
707 | |||
641 | /* Notify host of port being opened */ | 708 | /* Notify host of port being opened */ |
642 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); | 709 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); |
643 | 710 | ||
@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count) | |||
676 | 743 | ||
677 | port = find_port_by_vtermno(vtermno); | 744 | port = find_port_by_vtermno(vtermno); |
678 | if (!port) | 745 | if (!port) |
679 | return 0; | 746 | return -EPIPE; |
680 | 747 | ||
681 | return send_buf(port, (void *)buf, count); | 748 | return send_buf(port, (void *)buf, count, false); |
682 | } | 749 | } |
683 | 750 | ||
684 | /* | 751 | /* |
@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count) | |||
692 | { | 759 | { |
693 | struct port *port; | 760 | struct port *port; |
694 | 761 | ||
762 | /* If we've not set up the port yet, we have no input to give. */ | ||
763 | if (unlikely(early_put_chars)) | ||
764 | return 0; | ||
765 | |||
695 | port = find_port_by_vtermno(vtermno); | 766 | port = find_port_by_vtermno(vtermno); |
696 | if (!port) | 767 | if (!port) |
697 | return 0; | 768 | return -EPIPE; |
698 | 769 | ||
699 | /* If we don't have an input queue yet, we can't get input. */ | 770 | /* If we don't have an input queue yet, we can't get input. */ |
700 | BUG_ON(!port->in_vq); | 771 | BUG_ON(!port->in_vq); |
@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count) | |||
705 | static void resize_console(struct port *port) | 776 | static void resize_console(struct port *port) |
706 | { | 777 | { |
707 | struct virtio_device *vdev; | 778 | struct virtio_device *vdev; |
708 | struct winsize ws; | ||
709 | 779 | ||
710 | /* The port could have been hot-unplugged */ | 780 | /* The port could have been hot-unplugged */ |
711 | if (!port) | 781 | if (!port || !is_console_port(port)) |
712 | return; | 782 | return; |
713 | 783 | ||
714 | vdev = port->portdev->vdev; | 784 | vdev = port->portdev->vdev; |
715 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { | 785 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) |
716 | vdev->config->get(vdev, | 786 | hvc_resize(port->cons.hvc, port->cons.ws); |
717 | offsetof(struct virtio_console_config, cols), | ||
718 | &ws.ws_col, sizeof(u16)); | ||
719 | vdev->config->get(vdev, | ||
720 | offsetof(struct virtio_console_config, rows), | ||
721 | &ws.ws_row, sizeof(u16)); | ||
722 | hvc_resize(port->cons.hvc, ws); | ||
723 | } | ||
724 | } | 787 | } |
725 | 788 | ||
726 | /* We set the configuration at this point, since we now have a tty */ | 789 | /* We set the configuration at this point, since we now have a tty */ |
@@ -804,6 +867,13 @@ int init_port_console(struct port *port) | |||
804 | spin_unlock_irq(&pdrvdata_lock); | 867 | spin_unlock_irq(&pdrvdata_lock); |
805 | port->guest_connected = true; | 868 | port->guest_connected = true; |
806 | 869 | ||
870 | /* | ||
871 | * Start using the new console output if this is the first | ||
872 | * console to come up. | ||
873 | */ | ||
874 | if (early_put_chars) | ||
875 | early_put_chars = NULL; | ||
876 | |||
807 | /* Notify host of port being opened */ | 877 | /* Notify host of port being opened */ |
808 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); | 878 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); |
809 | 879 | ||
@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, | |||
859 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 929 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
860 | "host_connected: %d\n", port->host_connected); | 930 | "host_connected: %d\n", port->host_connected); |
861 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 931 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
932 | "outvq_full: %d\n", port->outvq_full); | ||
933 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
862 | "is_console: %s\n", | 934 | "is_console: %s\n", |
863 | is_console_port(port) ? "yes" : "no"); | 935 | is_console_port(port) ? "yes" : "no"); |
864 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 936 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = { | |||
875 | .read = debugfs_read, | 947 | .read = debugfs_read, |
876 | }; | 948 | }; |
877 | 949 | ||
950 | static void set_console_size(struct port *port, u16 rows, u16 cols) | ||
951 | { | ||
952 | if (!port || !is_console_port(port)) | ||
953 | return; | ||
954 | |||
955 | port->cons.ws.ws_row = rows; | ||
956 | port->cons.ws.ws_col = cols; | ||
957 | } | ||
958 | |||
959 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||
960 | { | ||
961 | struct port_buffer *buf; | ||
962 | unsigned int nr_added_bufs; | ||
963 | int ret; | ||
964 | |||
965 | nr_added_bufs = 0; | ||
966 | do { | ||
967 | buf = alloc_buf(PAGE_SIZE); | ||
968 | if (!buf) | ||
969 | break; | ||
970 | |||
971 | spin_lock_irq(lock); | ||
972 | ret = add_inbuf(vq, buf); | ||
973 | if (ret < 0) { | ||
974 | spin_unlock_irq(lock); | ||
975 | free_buf(buf); | ||
976 | break; | ||
977 | } | ||
978 | nr_added_bufs++; | ||
979 | spin_unlock_irq(lock); | ||
980 | } while (ret > 0); | ||
981 | |||
982 | return nr_added_bufs; | ||
983 | } | ||
984 | |||
985 | static int add_port(struct ports_device *portdev, u32 id) | ||
986 | { | ||
987 | char debugfs_name[16]; | ||
988 | struct port *port; | ||
989 | struct port_buffer *buf; | ||
990 | dev_t devt; | ||
991 | unsigned int nr_added_bufs; | ||
992 | int err; | ||
993 | |||
994 | port = kmalloc(sizeof(*port), GFP_KERNEL); | ||
995 | if (!port) { | ||
996 | err = -ENOMEM; | ||
997 | goto fail; | ||
998 | } | ||
999 | |||
1000 | port->portdev = portdev; | ||
1001 | port->id = id; | ||
1002 | |||
1003 | port->name = NULL; | ||
1004 | port->inbuf = NULL; | ||
1005 | port->cons.hvc = NULL; | ||
1006 | |||
1007 | port->cons.ws.ws_row = port->cons.ws.ws_col = 0; | ||
1008 | |||
1009 | port->host_connected = port->guest_connected = false; | ||
1010 | |||
1011 | port->outvq_full = false; | ||
1012 | |||
1013 | port->in_vq = portdev->in_vqs[port->id]; | ||
1014 | port->out_vq = portdev->out_vqs[port->id]; | ||
1015 | |||
1016 | cdev_init(&port->cdev, &port_fops); | ||
1017 | |||
1018 | devt = MKDEV(portdev->chr_major, id); | ||
1019 | err = cdev_add(&port->cdev, devt, 1); | ||
1020 | if (err < 0) { | ||
1021 | dev_err(&port->portdev->vdev->dev, | ||
1022 | "Error %d adding cdev for port %u\n", err, id); | ||
1023 | goto free_port; | ||
1024 | } | ||
1025 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | ||
1026 | devt, port, "vport%up%u", | ||
1027 | port->portdev->drv_index, id); | ||
1028 | if (IS_ERR(port->dev)) { | ||
1029 | err = PTR_ERR(port->dev); | ||
1030 | dev_err(&port->portdev->vdev->dev, | ||
1031 | "Error %d creating device for port %u\n", | ||
1032 | err, id); | ||
1033 | goto free_cdev; | ||
1034 | } | ||
1035 | |||
1036 | spin_lock_init(&port->inbuf_lock); | ||
1037 | spin_lock_init(&port->outvq_lock); | ||
1038 | init_waitqueue_head(&port->waitqueue); | ||
1039 | |||
1040 | /* Fill the in_vq with buffers so the host can send us data. */ | ||
1041 | nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); | ||
1042 | if (!nr_added_bufs) { | ||
1043 | dev_err(port->dev, "Error allocating inbufs\n"); | ||
1044 | err = -ENOMEM; | ||
1045 | goto free_device; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * If we're not using multiport support, this has to be a console port | ||
1050 | */ | ||
1051 | if (!use_multiport(port->portdev)) { | ||
1052 | err = init_port_console(port); | ||
1053 | if (err) | ||
1054 | goto free_inbufs; | ||
1055 | } | ||
1056 | |||
1057 | spin_lock_irq(&portdev->ports_lock); | ||
1058 | list_add_tail(&port->list, &port->portdev->ports); | ||
1059 | spin_unlock_irq(&portdev->ports_lock); | ||
1060 | |||
1061 | /* | ||
1062 | * Tell the Host we're set so that it can send us various | ||
1063 | * configuration parameters for this port (eg, port name, | ||
1064 | * caching, whether this is a console port, etc.) | ||
1065 | */ | ||
1066 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1067 | |||
1068 | if (pdrvdata.debugfs_dir) { | ||
1069 | /* | ||
1070 | * Finally, create the debugfs file that we can use to | ||
1071 | * inspect a port's state at any time | ||
1072 | */ | ||
1073 | sprintf(debugfs_name, "vport%up%u", | ||
1074 | port->portdev->drv_index, id); | ||
1075 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, | ||
1076 | pdrvdata.debugfs_dir, | ||
1077 | port, | ||
1078 | &port_debugfs_ops); | ||
1079 | } | ||
1080 | return 0; | ||
1081 | |||
1082 | free_inbufs: | ||
1083 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | ||
1084 | free_buf(buf); | ||
1085 | free_device: | ||
1086 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1087 | free_cdev: | ||
1088 | cdev_del(&port->cdev); | ||
1089 | free_port: | ||
1090 | kfree(port); | ||
1091 | fail: | ||
1092 | /* The host might want to notify management sw about port add failure */ | ||
1093 | __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); | ||
1094 | return err; | ||
1095 | } | ||
1096 | |||
878 | /* Remove all port-specific data. */ | 1097 | /* Remove all port-specific data. */ |
879 | static int remove_port(struct port *port) | 1098 | static int remove_port(struct port *port) |
880 | { | 1099 | { |
@@ -888,7 +1107,18 @@ static int remove_port(struct port *port) | |||
888 | spin_lock_irq(&pdrvdata_lock); | 1107 | spin_lock_irq(&pdrvdata_lock); |
889 | list_del(&port->cons.list); | 1108 | list_del(&port->cons.list); |
890 | spin_unlock_irq(&pdrvdata_lock); | 1109 | spin_unlock_irq(&pdrvdata_lock); |
1110 | #if 0 | ||
1111 | /* | ||
1112 | * hvc_remove() not called as removing one hvc port | ||
1113 | * results in other hvc ports getting frozen. | ||
1114 | * | ||
1115 | * Once this is resolved in hvc, this functionality | ||
1116 | * will be enabled. Till that is done, the -EPIPE | ||
1117 | * return from get_chars() above will help | ||
1118 | * hvc_console.c to clean up on ports we remove here. | ||
1119 | */ | ||
891 | hvc_remove(port->cons.hvc); | 1120 | hvc_remove(port->cons.hvc); |
1121 | #endif | ||
892 | } | 1122 | } |
893 | if (port->guest_connected) | 1123 | if (port->guest_connected) |
894 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | 1124 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); |
@@ -900,8 +1130,10 @@ static int remove_port(struct port *port) | |||
900 | /* Remove unused data this port might have received. */ | 1130 | /* Remove unused data this port might have received. */ |
901 | discard_port_data(port); | 1131 | discard_port_data(port); |
902 | 1132 | ||
1133 | reclaim_consumed_buffers(port); | ||
1134 | |||
903 | /* Remove buffers we queued up for the Host to send us data in. */ | 1135 | /* Remove buffers we queued up for the Host to send us data in. */ |
904 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | 1136 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
905 | free_buf(buf); | 1137 | free_buf(buf); |
906 | 1138 | ||
907 | kfree(port->name); | 1139 | kfree(port->name); |
@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev, | |||
924 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); | 1156 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); |
925 | 1157 | ||
926 | port = find_port_by_id(portdev, cpkt->id); | 1158 | port = find_port_by_id(portdev, cpkt->id); |
927 | if (!port) { | 1159 | if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { |
928 | /* No valid header at start of buffer. Drop it. */ | 1160 | /* No valid header at start of buffer. Drop it. */ |
929 | dev_dbg(&portdev->vdev->dev, | 1161 | dev_dbg(&portdev->vdev->dev, |
930 | "Invalid index %u in control packet\n", cpkt->id); | 1162 | "Invalid index %u in control packet\n", cpkt->id); |
@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev, | |||
932 | } | 1164 | } |
933 | 1165 | ||
934 | switch (cpkt->event) { | 1166 | switch (cpkt->event) { |
1167 | case VIRTIO_CONSOLE_PORT_ADD: | ||
1168 | if (port) { | ||
1169 | dev_dbg(&portdev->vdev->dev, | ||
1170 | "Port %u already added\n", port->id); | ||
1171 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1172 | break; | ||
1173 | } | ||
1174 | if (cpkt->id >= portdev->config.max_nr_ports) { | ||
1175 | dev_warn(&portdev->vdev->dev, | ||
1176 | "Request for adding port with out-of-bound id %u, max. supported id: %u\n", | ||
1177 | cpkt->id, portdev->config.max_nr_ports - 1); | ||
1178 | break; | ||
1179 | } | ||
1180 | add_port(portdev, cpkt->id); | ||
1181 | break; | ||
1182 | case VIRTIO_CONSOLE_PORT_REMOVE: | ||
1183 | remove_port(port); | ||
1184 | break; | ||
935 | case VIRTIO_CONSOLE_CONSOLE_PORT: | 1185 | case VIRTIO_CONSOLE_CONSOLE_PORT: |
936 | if (!cpkt->value) | 1186 | if (!cpkt->value) |
937 | break; | 1187 | break; |
@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev, | |||
944 | * have to notify the host first. | 1194 | * have to notify the host first. |
945 | */ | 1195 | */ |
946 | break; | 1196 | break; |
947 | case VIRTIO_CONSOLE_RESIZE: | 1197 | case VIRTIO_CONSOLE_RESIZE: { |
1198 | struct { | ||
1199 | __u16 rows; | ||
1200 | __u16 cols; | ||
1201 | } size; | ||
1202 | |||
948 | if (!is_console_port(port)) | 1203 | if (!is_console_port(port)) |
949 | break; | 1204 | break; |
1205 | |||
1206 | memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), | ||
1207 | sizeof(size)); | ||
1208 | set_console_size(port, size.rows, size.cols); | ||
1209 | |||
950 | port->cons.hvc->irq_requested = 1; | 1210 | port->cons.hvc->irq_requested = 1; |
951 | resize_console(port); | 1211 | resize_console(port); |
952 | break; | 1212 | break; |
1213 | } | ||
953 | case VIRTIO_CONSOLE_PORT_OPEN: | 1214 | case VIRTIO_CONSOLE_PORT_OPEN: |
954 | port->host_connected = cpkt->value; | 1215 | port->host_connected = cpkt->value; |
955 | wake_up_interruptible(&port->waitqueue); | 1216 | wake_up_interruptible(&port->waitqueue); |
1217 | /* | ||
1218 | * If the host port got closed and the host had any | ||
1219 | * unconsumed buffers, we'll be able to reclaim them | ||
1220 | * now. | ||
1221 | */ | ||
1222 | spin_lock_irq(&port->outvq_lock); | ||
1223 | reclaim_consumed_buffers(port); | ||
1224 | spin_unlock_irq(&port->outvq_lock); | ||
956 | break; | 1225 | break; |
957 | case VIRTIO_CONSOLE_PORT_NAME: | 1226 | case VIRTIO_CONSOLE_PORT_NAME: |
958 | /* | 1227 | /* |
@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev, | |||
990 | kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); | 1259 | kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); |
991 | } | 1260 | } |
992 | break; | 1261 | break; |
993 | case VIRTIO_CONSOLE_PORT_REMOVE: | ||
994 | /* | ||
995 | * Hot unplug the port. We don't decrement nr_ports | ||
996 | * since we don't want to deal with extra complexities | ||
997 | * of using the lowest-available port id: We can just | ||
998 | * pick up the nr_ports number as the id and not have | ||
999 | * userspace send it to us. This helps us in two | ||
1000 | * ways: | ||
1001 | * | ||
1002 | * - We don't need to have a 'port_id' field in the | ||
1003 | * config space when a port is hot-added. This is a | ||
1004 | * good thing as we might queue up multiple hotplug | ||
1005 | * requests issued in our workqueue. | ||
1006 | * | ||
1007 | * - Another way to deal with this would have been to | ||
1008 | * use a bitmap of the active ports and select the | ||
1009 | * lowest non-active port from that map. That | ||
1010 | * bloats the already tight config space and we | ||
1011 | * would end up artificially limiting the | ||
1012 | * max. number of ports to sizeof(bitmap). Right | ||
1013 | * now we can support 2^32 ports (as the port id is | ||
1014 | * stored in a u32 type). | ||
1015 | * | ||
1016 | */ | ||
1017 | remove_port(port); | ||
1018 | break; | ||
1019 | } | 1262 | } |
1020 | } | 1263 | } |
1021 | 1264 | ||
@@ -1030,7 +1273,7 @@ static void control_work_handler(struct work_struct *work) | |||
1030 | vq = portdev->c_ivq; | 1273 | vq = portdev->c_ivq; |
1031 | 1274 | ||
1032 | spin_lock(&portdev->cvq_lock); | 1275 | spin_lock(&portdev->cvq_lock); |
1033 | while ((buf = vq->vq_ops->get_buf(vq, &len))) { | 1276 | while ((buf = virtqueue_get_buf(vq, &len))) { |
1034 | spin_unlock(&portdev->cvq_lock); | 1277 | spin_unlock(&portdev->cvq_lock); |
1035 | 1278 | ||
1036 | buf->len = len; | 1279 | buf->len = len; |
@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev) | |||
1092 | struct ports_device *portdev; | 1335 | struct ports_device *portdev; |
1093 | 1336 | ||
1094 | portdev = vdev->priv; | 1337 | portdev = vdev->priv; |
1095 | if (use_multiport(portdev)) { | ||
1096 | /* Handle port hot-add */ | ||
1097 | schedule_work(&portdev->config_work); | ||
1098 | } | ||
1099 | /* | ||
1100 | * We'll use this way of resizing only for legacy support. | ||
1101 | * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use | ||
1102 | * control messages to indicate console size changes so that | ||
1103 | * it can be done per-port | ||
1104 | */ | ||
1105 | resize_console(find_port_by_id(portdev, 0)); | ||
1106 | } | ||
1107 | |||
1108 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||
1109 | { | ||
1110 | struct port_buffer *buf; | ||
1111 | unsigned int nr_added_bufs; | ||
1112 | int ret; | ||
1113 | |||
1114 | nr_added_bufs = 0; | ||
1115 | do { | ||
1116 | buf = alloc_buf(PAGE_SIZE); | ||
1117 | if (!buf) | ||
1118 | break; | ||
1119 | |||
1120 | spin_lock_irq(lock); | ||
1121 | ret = add_inbuf(vq, buf); | ||
1122 | if (ret < 0) { | ||
1123 | spin_unlock_irq(lock); | ||
1124 | free_buf(buf); | ||
1125 | break; | ||
1126 | } | ||
1127 | nr_added_bufs++; | ||
1128 | spin_unlock_irq(lock); | ||
1129 | } while (ret > 0); | ||
1130 | |||
1131 | return nr_added_bufs; | ||
1132 | } | ||
1133 | |||
1134 | static int add_port(struct ports_device *portdev, u32 id) | ||
1135 | { | ||
1136 | char debugfs_name[16]; | ||
1137 | struct port *port; | ||
1138 | struct port_buffer *buf; | ||
1139 | dev_t devt; | ||
1140 | unsigned int nr_added_bufs; | ||
1141 | int err; | ||
1142 | |||
1143 | port = kmalloc(sizeof(*port), GFP_KERNEL); | ||
1144 | if (!port) { | ||
1145 | err = -ENOMEM; | ||
1146 | goto fail; | ||
1147 | } | ||
1148 | |||
1149 | port->portdev = portdev; | ||
1150 | port->id = id; | ||
1151 | |||
1152 | port->name = NULL; | ||
1153 | port->inbuf = NULL; | ||
1154 | port->cons.hvc = NULL; | ||
1155 | |||
1156 | port->host_connected = port->guest_connected = false; | ||
1157 | |||
1158 | port->in_vq = portdev->in_vqs[port->id]; | ||
1159 | port->out_vq = portdev->out_vqs[port->id]; | ||
1160 | |||
1161 | cdev_init(&port->cdev, &port_fops); | ||
1162 | |||
1163 | devt = MKDEV(portdev->chr_major, id); | ||
1164 | err = cdev_add(&port->cdev, devt, 1); | ||
1165 | if (err < 0) { | ||
1166 | dev_err(&port->portdev->vdev->dev, | ||
1167 | "Error %d adding cdev for port %u\n", err, id); | ||
1168 | goto free_port; | ||
1169 | } | ||
1170 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | ||
1171 | devt, port, "vport%up%u", | ||
1172 | port->portdev->drv_index, id); | ||
1173 | if (IS_ERR(port->dev)) { | ||
1174 | err = PTR_ERR(port->dev); | ||
1175 | dev_err(&port->portdev->vdev->dev, | ||
1176 | "Error %d creating device for port %u\n", | ||
1177 | err, id); | ||
1178 | goto free_cdev; | ||
1179 | } | ||
1180 | |||
1181 | spin_lock_init(&port->inbuf_lock); | ||
1182 | init_waitqueue_head(&port->waitqueue); | ||
1183 | |||
1184 | /* Fill the in_vq with buffers so the host can send us data. */ | ||
1185 | nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); | ||
1186 | if (!nr_added_bufs) { | ||
1187 | dev_err(port->dev, "Error allocating inbufs\n"); | ||
1188 | err = -ENOMEM; | ||
1189 | goto free_device; | ||
1190 | } | ||
1191 | |||
1192 | /* | ||
1193 | * If we're not using multiport support, this has to be a console port | ||
1194 | */ | ||
1195 | if (!use_multiport(port->portdev)) { | ||
1196 | err = init_port_console(port); | ||
1197 | if (err) | ||
1198 | goto free_inbufs; | ||
1199 | } | ||
1200 | |||
1201 | spin_lock_irq(&portdev->ports_lock); | ||
1202 | list_add_tail(&port->list, &port->portdev->ports); | ||
1203 | spin_unlock_irq(&portdev->ports_lock); | ||
1204 | |||
1205 | /* | ||
1206 | * Tell the Host we're set so that it can send us various | ||
1207 | * configuration parameters for this port (eg, port name, | ||
1208 | * caching, whether this is a console port, etc.) | ||
1209 | */ | ||
1210 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1211 | |||
1212 | if (pdrvdata.debugfs_dir) { | ||
1213 | /* | ||
1214 | * Finally, create the debugfs file that we can use to | ||
1215 | * inspect a port's state at any time | ||
1216 | */ | ||
1217 | sprintf(debugfs_name, "vport%up%u", | ||
1218 | port->portdev->drv_index, id); | ||
1219 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, | ||
1220 | pdrvdata.debugfs_dir, | ||
1221 | port, | ||
1222 | &port_debugfs_ops); | ||
1223 | } | ||
1224 | return 0; | ||
1225 | |||
1226 | free_inbufs: | ||
1227 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | ||
1228 | free_buf(buf); | ||
1229 | free_device: | ||
1230 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1231 | free_cdev: | ||
1232 | cdev_del(&port->cdev); | ||
1233 | free_port: | ||
1234 | kfree(port); | ||
1235 | fail: | ||
1236 | return err; | ||
1237 | } | ||
1238 | 1338 | ||
1239 | /* | 1339 | if (!use_multiport(portdev)) { |
1240 | * The workhandler for config-space updates. | 1340 | struct port *port; |
1241 | * | 1341 | u16 rows, cols; |
1242 | * This is called when ports are hot-added. | ||
1243 | */ | ||
1244 | static void config_work_handler(struct work_struct *work) | ||
1245 | { | ||
1246 | struct virtio_console_multiport_conf virtconconf; | ||
1247 | struct ports_device *portdev; | ||
1248 | struct virtio_device *vdev; | ||
1249 | int err; | ||
1250 | 1342 | ||
1251 | portdev = container_of(work, struct ports_device, config_work); | 1343 | vdev->config->get(vdev, |
1344 | offsetof(struct virtio_console_config, cols), | ||
1345 | &cols, sizeof(u16)); | ||
1346 | vdev->config->get(vdev, | ||
1347 | offsetof(struct virtio_console_config, rows), | ||
1348 | &rows, sizeof(u16)); | ||
1252 | 1349 | ||
1253 | vdev = portdev->vdev; | 1350 | port = find_port_by_id(portdev, 0); |
1254 | vdev->config->get(vdev, | 1351 | set_console_size(port, rows, cols); |
1255 | offsetof(struct virtio_console_multiport_conf, | ||
1256 | nr_ports), | ||
1257 | &virtconconf.nr_ports, | ||
1258 | sizeof(virtconconf.nr_ports)); | ||
1259 | 1352 | ||
1260 | if (portdev->config.nr_ports == virtconconf.nr_ports) { | ||
1261 | /* | 1353 | /* |
1262 | * Port 0 got hot-added. Since we already did all the | 1354 | * We'll use this way of resizing only for legacy |
1263 | * other initialisation for it, just tell the Host | 1355 | * support. For newer userspace |
1264 | * that the port is ready if we find the port. In | 1356 | * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages |
1265 | * case the port was hot-removed earlier, we call | 1357 | * to indicate console size changes so that it can be |
1266 | * add_port to add the port. | 1358 | * done per-port. |
1267 | */ | 1359 | */ |
1268 | struct port *port; | 1360 | resize_console(port); |
1269 | |||
1270 | port = find_port_by_id(portdev, 0); | ||
1271 | if (!port) | ||
1272 | add_port(portdev, 0); | ||
1273 | else | ||
1274 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1275 | return; | ||
1276 | } | ||
1277 | if (virtconconf.nr_ports > portdev->config.max_nr_ports) { | ||
1278 | dev_warn(&vdev->dev, | ||
1279 | "More ports specified (%u) than allowed (%u)", | ||
1280 | portdev->config.nr_ports + 1, | ||
1281 | portdev->config.max_nr_ports); | ||
1282 | return; | ||
1283 | } | ||
1284 | if (virtconconf.nr_ports < portdev->config.nr_ports) | ||
1285 | return; | ||
1286 | |||
1287 | /* Hot-add ports */ | ||
1288 | while (virtconconf.nr_ports - portdev->config.nr_ports) { | ||
1289 | err = add_port(portdev, portdev->config.nr_ports); | ||
1290 | if (err) | ||
1291 | break; | ||
1292 | portdev->config.nr_ports++; | ||
1293 | } | 1361 | } |
1294 | } | 1362 | } |
1295 | 1363 | ||
@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = { | |||
1414 | static int __devinit virtcons_probe(struct virtio_device *vdev) | 1482 | static int __devinit virtcons_probe(struct virtio_device *vdev) |
1415 | { | 1483 | { |
1416 | struct ports_device *portdev; | 1484 | struct ports_device *portdev; |
1417 | u32 i; | ||
1418 | int err; | 1485 | int err; |
1419 | bool multiport; | 1486 | bool multiport; |
1420 | 1487 | ||
@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
1443 | } | 1510 | } |
1444 | 1511 | ||
1445 | multiport = false; | 1512 | multiport = false; |
1446 | portdev->config.nr_ports = 1; | ||
1447 | portdev->config.max_nr_ports = 1; | 1513 | portdev->config.max_nr_ports = 1; |
1448 | #if 0 /* Multiport is not quite ready yet --RR */ | ||
1449 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { | 1514 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { |
1450 | multiport = true; | 1515 | multiport = true; |
1451 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; | 1516 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; |
1452 | 1517 | ||
1453 | vdev->config->get(vdev, | 1518 | vdev->config->get(vdev, offsetof(struct virtio_console_config, |
1454 | offsetof(struct virtio_console_multiport_conf, | 1519 | max_nr_ports), |
1455 | nr_ports), | ||
1456 | &portdev->config.nr_ports, | ||
1457 | sizeof(portdev->config.nr_ports)); | ||
1458 | vdev->config->get(vdev, | ||
1459 | offsetof(struct virtio_console_multiport_conf, | ||
1460 | max_nr_ports), | ||
1461 | &portdev->config.max_nr_ports, | 1520 | &portdev->config.max_nr_ports, |
1462 | sizeof(portdev->config.max_nr_ports)); | 1521 | sizeof(portdev->config.max_nr_ports)); |
1463 | if (portdev->config.nr_ports > portdev->config.max_nr_ports) { | ||
1464 | dev_warn(&vdev->dev, | ||
1465 | "More ports (%u) specified than allowed (%u). Will init %u ports.", | ||
1466 | portdev->config.nr_ports, | ||
1467 | portdev->config.max_nr_ports, | ||
1468 | portdev->config.max_nr_ports); | ||
1469 | |||
1470 | portdev->config.nr_ports = portdev->config.max_nr_ports; | ||
1471 | } | ||
1472 | } | 1522 | } |
1473 | 1523 | ||
1474 | /* Let the Host know we support multiple ports.*/ | 1524 | /* Let the Host know we support multiple ports.*/ |
1475 | vdev->config->finalize_features(vdev); | 1525 | vdev->config->finalize_features(vdev); |
1476 | #endif | ||
1477 | 1526 | ||
1478 | err = init_vqs(portdev); | 1527 | err = init_vqs(portdev); |
1479 | if (err < 0) { | 1528 | if (err < 0) { |
@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
1489 | 1538 | ||
1490 | spin_lock_init(&portdev->cvq_lock); | 1539 | spin_lock_init(&portdev->cvq_lock); |
1491 | INIT_WORK(&portdev->control_work, &control_work_handler); | 1540 | INIT_WORK(&portdev->control_work, &control_work_handler); |
1492 | INIT_WORK(&portdev->config_work, &config_work_handler); | ||
1493 | 1541 | ||
1494 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 1542 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); |
1495 | if (!nr_added_bufs) { | 1543 | if (!nr_added_bufs) { |
@@ -1498,16 +1546,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
1498 | err = -ENOMEM; | 1546 | err = -ENOMEM; |
1499 | goto free_vqs; | 1547 | goto free_vqs; |
1500 | } | 1548 | } |
1549 | } else { | ||
1550 | /* | ||
1551 | * For backward compatibility: Create a console port | ||
1552 | * if we're running on older host. | ||
1553 | */ | ||
1554 | add_port(portdev, 0); | ||
1501 | } | 1555 | } |
1502 | 1556 | ||
1503 | for (i = 0; i < portdev->config.nr_ports; i++) | 1557 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, |
1504 | add_port(portdev, i); | 1558 | VIRTIO_CONSOLE_DEVICE_READY, 1); |
1505 | |||
1506 | /* Start using the new console output. */ | ||
1507 | early_put_chars = NULL; | ||
1508 | return 0; | 1559 | return 0; |
1509 | 1560 | ||
1510 | free_vqs: | 1561 | free_vqs: |
1562 | /* The host might want to notify mgmt sw about device add failure */ | ||
1563 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, | ||
1564 | VIRTIO_CONSOLE_DEVICE_READY, 0); | ||
1511 | vdev->config->del_vqs(vdev); | 1565 | vdev->config->del_vqs(vdev); |
1512 | kfree(portdev->in_vqs); | 1566 | kfree(portdev->in_vqs); |
1513 | kfree(portdev->out_vqs); | 1567 | kfree(portdev->out_vqs); |
@@ -1529,17 +1583,16 @@ static void virtcons_remove(struct virtio_device *vdev) | |||
1529 | portdev = vdev->priv; | 1583 | portdev = vdev->priv; |
1530 | 1584 | ||
1531 | cancel_work_sync(&portdev->control_work); | 1585 | cancel_work_sync(&portdev->control_work); |
1532 | cancel_work_sync(&portdev->config_work); | ||
1533 | 1586 | ||
1534 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | 1587 | list_for_each_entry_safe(port, port2, &portdev->ports, list) |
1535 | remove_port(port); | 1588 | remove_port(port); |
1536 | 1589 | ||
1537 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | 1590 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); |
1538 | 1591 | ||
1539 | while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) | 1592 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) |
1540 | free_buf(buf); | 1593 | free_buf(buf); |
1541 | 1594 | ||
1542 | while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) | 1595 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) |
1543 | free_buf(buf); | 1596 | free_buf(buf); |
1544 | 1597 | ||
1545 | vdev->config->del_vqs(vdev); | 1598 | vdev->config->del_vqs(vdev); |
@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = { | |||
1556 | 1609 | ||
1557 | static unsigned int features[] = { | 1610 | static unsigned int features[] = { |
1558 | VIRTIO_CONSOLE_F_SIZE, | 1611 | VIRTIO_CONSOLE_F_SIZE, |
1612 | VIRTIO_CONSOLE_F_MULTIPORT, | ||
1559 | }; | 1613 | }; |
1560 | 1614 | ||
1561 | static struct virtio_driver virtio_console = { | 1615 | static struct virtio_driver virtio_console = { |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index bd1d1164fec5..7cdb6ee569cd 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) | |||
3967 | font.charcount = op->charcount; | 3967 | font.charcount = op->charcount; |
3968 | font.height = op->height; | 3968 | font.height = op->height; |
3969 | font.width = op->width; | 3969 | font.width = op->width; |
3970 | font.data = kmalloc(size, GFP_KERNEL); | 3970 | font.data = memdup_user(op->data, size); |
3971 | if (!font.data) | 3971 | if (IS_ERR(font.data)) |
3972 | return -ENOMEM; | 3972 | return PTR_ERR(font.data); |
3973 | if (copy_from_user(font.data, op->data, size)) { | ||
3974 | kfree(font.data); | ||
3975 | return -EFAULT; | ||
3976 | } | ||
3977 | acquire_console_sem(); | 3973 | acquire_console_sem(); |
3978 | if (vc->vc_sw->con_font_set) | 3974 | if (vc->vc_sw->con_font_set) |
3979 | rc = vc->vc_sw->con_font_set(vc, &font, op->flags); | 3975 | rc = vc->vc_sw->con_font_set(vc, &font, op->flags); |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 7261b8d9087c..ed8a9cec2a05 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -772,18 +772,18 @@ hwicap_of_probe(struct of_device *op, const struct of_device_id *match) | |||
772 | 772 | ||
773 | dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match); | 773 | dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match); |
774 | 774 | ||
775 | rc = of_address_to_resource(op->node, 0, &res); | 775 | rc = of_address_to_resource(op->dev.of_node, 0, &res); |
776 | if (rc) { | 776 | if (rc) { |
777 | dev_err(&op->dev, "invalid address\n"); | 777 | dev_err(&op->dev, "invalid address\n"); |
778 | return rc; | 778 | return rc; |
779 | } | 779 | } |
780 | 780 | ||
781 | id = of_get_property(op->node, "port-number", NULL); | 781 | id = of_get_property(op->dev.of_node, "port-number", NULL); |
782 | 782 | ||
783 | /* It's most likely that we're using V4, if the family is not | 783 | /* It's most likely that we're using V4, if the family is not |
784 | specified */ | 784 | specified */ |
785 | regs = &v4_config_registers; | 785 | regs = &v4_config_registers; |
786 | family = of_get_property(op->node, "xlnx,family", NULL); | 786 | family = of_get_property(op->dev.of_node, "xlnx,family", NULL); |
787 | 787 | ||
788 | if (family) { | 788 | if (family) { |
789 | if (!strcmp(family, "virtex2p")) { | 789 | if (!strcmp(family, "virtex2p")) { |
@@ -812,13 +812,12 @@ static const struct of_device_id __devinitconst hwicap_of_match[] = { | |||
812 | MODULE_DEVICE_TABLE(of, hwicap_of_match); | 812 | MODULE_DEVICE_TABLE(of, hwicap_of_match); |
813 | 813 | ||
814 | static struct of_platform_driver hwicap_of_driver = { | 814 | static struct of_platform_driver hwicap_of_driver = { |
815 | .owner = THIS_MODULE, | ||
816 | .name = DRIVER_NAME, | ||
817 | .match_table = hwicap_of_match, | ||
818 | .probe = hwicap_of_probe, | 815 | .probe = hwicap_of_probe, |
819 | .remove = __devexit_p(hwicap_of_remove), | 816 | .remove = __devexit_p(hwicap_of_remove), |
820 | .driver = { | 817 | .driver = { |
821 | .name = DRIVER_NAME, | 818 | .name = DRIVER_NAME, |
819 | .owner = THIS_MODULE, | ||
820 | .of_match_table = hwicap_of_match, | ||
822 | }, | 821 | }, |
823 | }; | 822 | }; |
824 | 823 | ||